code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/ChangePasswordDialog.ui'
#
# Created: Sun Jun 28 08:12:25 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_dlgChangepassword(object):
def setupUi(self, dlgChangepassword):
dlgChangepassword.setObjectName(_fromUtf8("dlgChangepassword"))
dlgChangepassword.setWindowModality(QtCore.Qt.ApplicationModal)
dlgChangepassword.resize(289, 166)
dlgChangepassword.setModal(True)
self.gridLayoutWidget = QtGui.QWidget(dlgChangepassword)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 0, 271, 158))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.edlOldpass = QtGui.QLineEdit(self.gridLayoutWidget)
self.edlOldpass.setInputMask(_fromUtf8(""))
self.edlOldpass.setObjectName(_fromUtf8("edlOldpass"))
self.gridLayout.addWidget(self.edlOldpass, 1, 1, 1, 1)
self.edlNewpass2 = QtGui.QLineEdit(self.gridLayoutWidget)
self.edlNewpass2.setInputMask(_fromUtf8(""))
self.edlNewpass2.setObjectName(_fromUtf8("edlNewpass2"))
self.gridLayout.addWidget(self.edlNewpass2, 3, 1, 1, 1)
self.edlNewpass1 = QtGui.QLineEdit(self.gridLayoutWidget)
self.edlNewpass1.setInputMask(_fromUtf8(""))
self.edlNewpass1.setObjectName(_fromUtf8("edlNewpass1"))
self.gridLayout.addWidget(self.edlNewpass1, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self.gridLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 6, 0, 1, 2)
self.retranslateUi(dlgChangepassword)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), dlgChangepassword.reject)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), dlgChangepassword.accept)
QtCore.QMetaObject.connectSlotsByName(dlgChangepassword)
dlgChangepassword.setTabOrder(self.edlOldpass, self.edlNewpass1)
dlgChangepassword.setTabOrder(self.edlNewpass1, self.edlNewpass2)
dlgChangepassword.setTabOrder(self.edlNewpass2, self.buttonBox)
def retranslateUi(self, dlgChangepassword):
dlgChangepassword.setWindowTitle(_translate("dlgChangepassword", "Dialog", None))
self.label.setText(_translate("dlgChangepassword", "Old Password", None))
self.label_3.setText(_translate("dlgChangepassword", "Enter again", None))
self.label_2.setText(_translate("dlgChangepassword", "New Password", None)) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.dispatch import receiver
from django.db import models, connection
from django.db.models.signals import post_save
from django.contrib.contenttypes.models import ContentType
from psycopg2._psycopg import AsIs
from addons.base.models import BaseNodeSettings, BaseStorageAddon, BaseUserSettings
from osf.utils.fields import EncryptedJSONField
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.exceptions import InvalidTagError, NodeStateError, TagNotFoundError
from framework.auth.core import Auth
from osf.models.mixins import Loggable
from osf.models import AbstractNode
from osf.models.files import File, FileVersion, Folder, TrashedFileNode, BaseFileNode, BaseFileNodeManager
from osf.models.metaschema import FileMetadataSchema
from osf.utils import permissions
from website.files import exceptions
from website.files import utils as files_utils
from website.util import api_url_for
from website import settings as website_settings
from addons.osfstorage.settings import DEFAULT_REGION_ID
from website.util import api_v2_url
settings = apps.get_app_config('addons_osfstorage')
logger = logging.getLogger(__name__)
class OsfStorageFolderManager(BaseFileNodeManager):
def get_root(self, target):
# Get the root folder that the target file belongs to
content_type = ContentType.objects.get_for_model(target)
return self.get(target_object_id=target.id, target_content_type=content_type, is_root=True)
class OsfStorageFileNode(BaseFileNode):
_provider = 'osfstorage'
@property
def materialized_path(self):
sql = """
WITH RECURSIVE materialized_path_cte(parent_id, GEN_PATH) AS (
SELECT
T.parent_id,
T.name :: TEXT AS GEN_PATH
FROM %s AS T
WHERE T.id = %s
UNION ALL
SELECT
T.parent_id,
(T.name || '/' || R.GEN_PATH) AS GEN_PATH
FROM materialized_path_cte AS R
JOIN %s AS T ON T.id = R.parent_id
WHERE R.parent_id IS NOT NULL
)
SELECT gen_path
FROM materialized_path_cte AS N
WHERE parent_id IS NULL
LIMIT 1;
"""
with connection.cursor() as cursor:
cursor.execute(sql, [AsIs(self._meta.db_table), self.pk, AsIs(self._meta.db_table)])
row = cursor.fetchone()
if not row:
return '/'
path = row[0]
if not self.is_file:
path = path + '/'
return path
@materialized_path.setter
def materialized_path(self, val):
# raise Exception('Cannot set materialized path on OSFStorage as it is computed.')
logger.warn('Cannot set materialized path on OSFStorage because it\'s computed.')
@classmethod
def get(cls, _id, target):
return cls.objects.get(_id=_id, target_object_id=target.id, target_content_type=ContentType.objects.get_for_model(target))
@classmethod
def get_or_create(cls, target, path):
"""Override get or create for osfstorage
Path is always the _id of the osfstorage filenode.
Use load here as its way faster than find.
Just manually assert that node is equal to node.
"""
inst = cls.load(path.strip('/'))
if inst and inst.target.id == target.id:
return inst
# Dont raise anything a 404 will be raised later
return cls(target=target, path=path)
@classmethod
def get_file_guids(cls, materialized_path, provider, target=None):
guids = []
path = materialized_path.strip('/')
file_obj = cls.load(path)
if not file_obj:
file_obj = TrashedFileNode.load(path)
# At this point, file_obj may be an OsfStorageFile, an OsfStorageFolder, or a
# TrashedFileNode. TrashedFileNodes do not have *File and *Folder subclasses, since
# only osfstorage trashes folders. To search for children of TrashFileNodes
# representing ex-OsfStorageFolders, we will reimplement the `children` method of the
# Folder class here.
if not file_obj.is_file:
children = []
if isinstance(file_obj, TrashedFileNode):
children = file_obj.trashed_children.all()
else:
children = file_obj.children
for item in children:
guids.extend(cls.get_file_guids(item.path, provider, target=target))
else:
guid = file_obj.get_guid()
if guid:
guids.append(guid._id)
return sorted(guids)
@property
def kind(self):
return 'file' if self.is_file else 'folder'
@property
def path(self):
"""Path is dynamically computed as storedobject.path is stored
as an empty string to make the unique index work properly for osfstorage
"""
return '/' + self._id + ('' if self.is_file else '/')
@property
def is_checked_out(self):
return self.checkout is not None
# overrides BaseFileNode
@property
def current_version_number(self):
return self.versions.count() or 1
def _check_delete_allowed(self):
if self.is_preprint_primary:
raise exceptions.FileNodeIsPrimaryFile()
if self.is_checked_out:
raise exceptions.FileNodeCheckedOutError()
return True
@property
def is_preprint_primary(self):
return (
getattr(self.target, 'primary_file', None) == self and
not getattr(self.target, 'is_deleted', None)
)
def delete(self, user=None, parent=None, **kwargs):
self._path = self.path
self._materialized_path = self.materialized_path
return super(OsfStorageFileNode, self).delete(user=user, parent=parent) if self._check_delete_allowed() else None
def update_region_from_latest_version(self, destination_parent):
raise NotImplementedError
def move_under(self, destination_parent, name=None):
if self.is_preprint_primary:
if self.target != destination_parent.target or self.provider != destination_parent.provider:
raise exceptions.FileNodeIsPrimaryFile()
if self.is_checked_out:
raise exceptions.FileNodeCheckedOutError()
self.update_region_from_latest_version(destination_parent)
return super(OsfStorageFileNode, self).move_under(destination_parent, name)
def check_in_or_out(self, user, checkout, save=False):
"""
Updates self.checkout with the requesting user or None,
iff user has permission to check out file or folder.
Adds log to self.target if target is a node.
:param user: User making the request
:param checkout: Either the same user or None, depending on in/out-checking
:param save: Whether or not to save the user
"""
from osf.models import NodeLog # Avoid circular import
target = self.target
if isinstance(target, AbstractNode) and self.is_checked_out and self.checkout != user:
# Allow project admins to force check in
if target.has_permission(user, permissions.ADMIN):
# But don't allow force check in for prereg admin checked out files
if self.checkout.has_perm('osf.view_prereg') and target.draft_registrations_active.filter(
registration_schema__name='Prereg Challenge').exists():
raise exceptions.FileNodeCheckedOutError()
else:
raise exceptions.FileNodeCheckedOutError()
if not target.has_permission(user, permissions.WRITE):
raise exceptions.FileNodeCheckedOutError()
action = NodeLog.CHECKED_OUT if checkout else NodeLog.CHECKED_IN
if self.is_checked_out and action == NodeLog.CHECKED_IN or not self.is_checked_out and action == NodeLog.CHECKED_OUT:
self.checkout = checkout
if isinstance(target, Loggable):
target.add_log(
action=action,
params={
'kind': self.kind,
'project': target.parent_id,
'node': target._id,
'urls': {
# web_url_for unavailable -- called from within the API, so no flask app
'download': '/project/{}/files/{}/{}/?action=download'.format(target._id,
self.provider,
self._id),
'view': '/project/{}/files/{}/{}'.format(target._id, self.provider, self._id)},
'path': self.materialized_path
},
auth=Auth(user),
)
if save:
self.save()
def save(self):
self._path = ''
self._materialized_path = ''
return super(OsfStorageFileNode, self).save()
class OsfStorageFile(OsfStorageFileNode, File):
@property
def _hashes(self):
last_version = self.versions.last()
if not last_version:
return None
return {
'sha1': last_version.metadata['sha1'],
'sha256': last_version.metadata['sha256'],
'md5': last_version.metadata['md5']
}
@property
def last_known_metadata(self):
last_version = self.versions.last()
if not last_version:
size = None
else:
size = last_version.size
return {
'path': self.materialized_path,
'hashes': self._hashes,
'size': size,
'last_seen': self.modified
}
def touch(self, bearer, version=None, revision=None, **kwargs):
try:
return self.get_version(revision or version)
except ValueError:
return None
@property
def history(self):
return list(self.versions.values_list('metadata', flat=True))
@history.setter
def history(self, value):
logger.warn('Tried to set history on OsfStorageFile/Folder')
def serialize(self, include_full=None, version=None):
ret = super(OsfStorageFile, self).serialize()
if include_full:
ret['fullPath'] = self.materialized_path
version = self.get_version(version)
earliest_version = self.versions.order_by('created').first()
ret.update({
'version': self.versions.count(),
'md5': version.metadata.get('md5') if version else None,
'sha256': version.metadata.get('sha256') if version else None,
'modified': version.created.isoformat() if version else None,
'created': earliest_version.created.isoformat() if version else None,
})
return ret
def update_region_from_latest_version(self, destination_parent):
most_recent_fileversion = self.versions.select_related('region').order_by('-created').first()
if most_recent_fileversion and most_recent_fileversion.region != destination_parent.target.osfstorage_region:
most_recent_fileversion.region = destination_parent.target.osfstorage_region
most_recent_fileversion.save()
def create_version(self, creator, location, metadata=None):
latest_version = self.get_version()
version = FileVersion(identifier=self.versions.count() + 1, creator=creator, location=location)
if latest_version and latest_version.is_duplicate(version):
return latest_version
if metadata:
version.update_metadata(metadata, save=False)
version.region = self.target.osfstorage_region
version._find_matching_archive(save=False)
version.save()
self.versions.add(version)
self.save()
return version
def get_version(self, version=None, required=False):
if version is None:
if self.versions.exists():
return self.versions.first()
return None
try:
return self.versions.get(identifier=version)
except FileVersion.DoesNotExist:
if required:
raise exceptions.VersionNotFoundError(version)
return None
def add_tag_log(self, action, tag, auth):
if isinstance(self.target, Loggable):
target = self.target
params = {
'urls': {
'download': '/{}/files/osfstorage/{}/?action=download'.format(target._id, self._id),
'view': '/{}/files/osfstorage/{}/'.format(target._id, self._id)},
'path': self.materialized_path,
'tag': tag,
}
if isinstance(target, AbstractNode):
params['parent_node'] = target.parent_id
params['node'] = target._id
target.add_log(
action=action,
params=params,
auth=auth,
)
else:
raise NotImplementedError('Cannot add a tag log to a {}'.format(self.target.__class__.__name__))
def add_tag(self, tag, auth, save=True, log=True):
from osf.models import Tag, NodeLog # Prevent import error
if not self.tags.filter(system=False, name=tag).exists() and not getattr(self.target, 'is_registration', False):
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(name=tag)
new_tag.save()
self.tags.add(new_tag)
if log:
self.add_tag_log(NodeLog.FILE_TAG_ADDED, tag, auth)
if save:
self.save()
return True
return False
def remove_tag(self, tag, auth, save=True, log=True):
from osf.models import Tag, NodeLog # Prevent import error
if getattr(self.target, 'is_registration', False):
# Can't perform edits on a registration
raise NodeStateError
tag_instance = Tag.objects.filter(system=False, name=tag).first()
if not tag_instance:
raise InvalidTagError
elif not self.tags.filter(id=tag_instance.id).exists():
raise TagNotFoundError
else:
self.tags.remove(tag_instance)
if log:
self.add_tag_log(NodeLog.FILE_TAG_REMOVED, tag_instance._id, auth)
if save:
self.save()
return True
def delete(self, user=None, parent=None, **kwargs):
from website.search import search
search.update_file(self, delete=True)
return super(OsfStorageFile, self).delete(user, parent, **kwargs)
def save(self, skip_search=False, *args, **kwargs):
from website.search import search
ret = super(OsfStorageFile, self).save()
if not skip_search:
search.update_file(self)
return ret
class OsfStorageFolder(OsfStorageFileNode, Folder):
is_root = models.NullBooleanField()
objects = OsfStorageFolderManager()
@property
def is_checked_out(self):
sql = """
WITH RECURSIVE is_checked_out_cte(id, parent_id, checkout_id) AS (
SELECT
T.id,
T.parent_id,
T.checkout_id
FROM %s AS T
WHERE T.id = %s
UNION ALL
SELECT
T.id,
T.parent_id,
T.checkout_id
FROM is_checked_out_cte AS R
JOIN %s AS T ON T.parent_id = R.id
)
SELECT N.checkout_id
FROM is_checked_out_cte as N
WHERE N.checkout_id IS NOT NULL
LIMIT 1;
"""
with connection.cursor() as cursor:
cursor.execute(sql, [AsIs(self._meta.db_table), self.pk, AsIs(self._meta.db_table)])
row = cursor.fetchone()
if row and row[0]:
return True
return False
@property
def is_preprint_primary(self):
if hasattr(self.target, 'primary_file') and self.target.primary_file:
for child in self.children.all():
if getattr(child.target, 'primary_file', None):
if child.is_preprint_primary:
return True
return False
def serialize(self, include_full=False, version=None):
# Versions just for compatibility
ret = super(OsfStorageFolder, self).serialize()
if include_full:
ret['fullPath'] = self.materialized_path
return ret
def update_region_from_latest_version(self, destination_parent):
for child in self.children.all().prefetch_related('versions'):
child.update_region_from_latest_version(destination_parent)
class Region(models.Model):
_id = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=200)
waterbutler_credentials = EncryptedJSONField(default=dict)
waterbutler_url = models.URLField(default=website_settings.WATERBUTLER_URL)
mfr_url = models.URLField(default=website_settings.MFR_SERVER_URL)
waterbutler_settings = DateTimeAwareJSONField(default=dict)
def __unicode__(self):
return '{}'.format(self.name)
def get_absolute_url(self):
return '{}regions/{}'.format(self.absolute_api_v2_url, self._id)
@property
def absolute_api_v2_url(self):
path = '/regions/{}/'.format(self._id)
return api_v2_url(path)
class Meta:
unique_together = ('_id', 'name')
class UserSettings(BaseUserSettings):
default_region = models.ForeignKey(Region, null=True, on_delete=models.CASCADE)
def on_add(self):
default_region = Region.objects.get(_id=DEFAULT_REGION_ID)
self.default_region = default_region
def merge(self, user_settings):
"""Merge `user_settings` into this instance"""
NodeSettings.objects.filter(user_settings=user_settings).update(user_settings=self)
def set_region(self, region_id):
try:
region = Region.objects.get(_id=region_id)
except Region.DoesNotExist:
raise ValueError('Region cannot be found.')
self.default_region = region
self.save()
return
class NodeSettings(BaseNodeSettings, BaseStorageAddon):
# Required overrides
complete = True
has_auth = True
root_node = models.ForeignKey(OsfStorageFolder, null=True, blank=True, on_delete=models.CASCADE)
region = models.ForeignKey(Region, null=True, on_delete=models.CASCADE)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
@property
def folder_name(self):
return self.root_node.name
def get_root(self):
return self.root_node
def on_add(self):
if self.root_node:
return
creator_user_settings = UserSettings.objects.get(owner=self.owner.creator)
self.user_settings = creator_user_settings
self.region_id = creator_user_settings.default_region_id
# A save is required here to both create and attach the root_node
# When on_add is called the model that self refers to does not yet exist
# in the database and thus odm cannot attach foreign fields to it
self.save(clean=False)
# Note: The "root" node will always be "named" empty string
root = OsfStorageFolder(name='', target=self.owner, is_root=True)
root.save()
self.root_node = root
self.save(clean=False)
def before_fork(self, node, user):
pass
def after_fork(self, node, fork, user, save=True):
clone = self.clone()
clone.owner = fork
user_settings = user.get_addon('osfstorage')
clone.user_settings = user_settings
clone.region_id = user_settings.default_region_id
clone.save()
if not self.root_node:
self.on_add()
clone.root_node = files_utils.copy_files(self.get_root(), clone.owner)
clone.save()
return clone, None
def after_register(self, node, registration, user, save=True):
clone = self.clone()
clone.owner = registration
clone.on_add()
clone.region_id = self.region_id
clone.save()
return clone, None
def serialize_waterbutler_settings(self):
return dict(Region.objects.get(id=self.region_id).waterbutler_settings, **{
'nid': self.owner._id,
'rootId': self.root_node._id,
'baseUrl': api_url_for(
'osfstorage_get_metadata',
guid=self.owner._id,
_absolute=True,
_internal=True
),
})
def serialize_waterbutler_credentials(self):
return Region.objects.get(id=self.region_id).waterbutler_credentials
def create_waterbutler_log(self, auth, action, metadata):
params = {
'node': self.owner._id,
'project': self.owner.parent_id,
'path': metadata['materialized'],
}
if (metadata['kind'] != 'folder'):
url = self.owner.web_url_for(
'addon_view_or_download_file',
guid=self.owner._id,
path=metadata['path'],
provider='osfstorage'
)
params['urls'] = {'view': url, 'download': url + '?action=download'}
self.owner.add_log(
'osf_storage_{0}'.format(action),
auth=auth,
params=params
)
@receiver(post_save, sender=OsfStorageFile)
def create_metadata_records(sender, instance, created, **kwargs):
if created:
from osf.models.metadata import FileMetadataRecord
for schema in FileMetadataSchema.objects.all():
FileMetadataRecord.objects.create(file=instance, schema=schema) | unknown | codeparrot/codeparrot-clean | ||
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Config Encoder Filters (CEF)
#
# CEF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CEF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CEF. If not, see <http://www.gnu.org/licenses/>.
"""
Config Encoder Filters
More information: https://github.com/jtyr/ansible-config_encoder_filters
"""
from __future__ import (absolute_import, division, print_function)
from ansible import errors
from copy import copy
import re
def _str_is_bool(data):
"""Verify if data is boolean."""
return re.match(r"^(true|false)$", str(data), flags=re.IGNORECASE)
def _str_is_int(data):
"""Verify if data is integer."""
return re.match(r"^[-+]?(0|[1-9][0-9]*)$", str(data))
def _str_is_float(data):
"""Verify if data is float."""
return re.match(
r"^[-+]?(0|[1-9][0-9]*)(\.[0-9]*)?(e[-+]?[0-9]+)?$",
str(data), flags=re.IGNORECASE)
def _str_is_num(data):
"""Verify if data is either integer or float."""
return _str_is_int(data) or _str_is_float(data)
def _is_num(data):
"""Verify if data is either int or float.
Could be replaced by:
from numbers import Number as number
isinstance(data, number)
but that requires Python v2.6+.
"""
return isinstance(data, int) or isinstance(data, float)
def _escape(data, quote='"', format=None):
"""Escape special characters in a string."""
if format == 'xml':
return (
str(data).
replace('&', '&').
replace('<', '<').
replace('>', '>'))
elif format == 'control':
return (
str(data).
replace('\b', '\\b').
replace('\f', '\\f').
replace('\n', '\\n').
replace('\r', '\\r').
replace('\t', '\\t'))
elif quote is not None and len(quote):
return str(data).replace('\\', '\\\\').replace(quote, "\\%s" % quote)
else:
return data
def encode_apache(
data, convert_bools=False, convert_nums=False, indent=" ", level=0,
quote_all_nums=False, quote_all_strings=False, block_type='sections'):
"""Convert Python data structure to Apache format."""
# Return value
rv = ""
if block_type == 'sections':
for c in data['content']:
# First check if this section has options
if 'options' in c:
rv += encode_apache(
c['options'],
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote_all_nums=quote_all_nums,
quote_all_strings=quote_all_strings,
block_type='options')
is_empty = False
# Check if this section has some sub-sections
if 'sections' in c:
for s in c['sections']:
# Check for empty sub-sections
for i in s['content']:
if (
('options' in i and len(i['options']) > 0) or
('sections' in i and len(i['sections']) > 0)):
is_empty = True
if is_empty:
rv += "%s<%s " % (indent * level, s['name'])
if 'operator' in s:
rv += "%s " % s['operator']
if 'param' in s:
rv += encode_apache(
s['param'],
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote_all_nums=quote_all_nums,
quote_all_strings=quote_all_strings,
block_type='value')
rv += ">\n"
rv += encode_apache(
s,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote_all_nums=quote_all_nums,
quote_all_strings=quote_all_strings,
block_type='sections')
rv += "%s</%s>\n" % (indent * level, s['name'])
# If not last item of the loop
if c['sections'][-1] != s:
rv += "\n"
if (
data['content'][-1] != c and (
'options' in c and len(c['options']) > 0 or (
'sections' in c and
len(c['sections']) > 0 and
is_empty))):
rv += "\n"
elif block_type == 'options':
for o in data:
for key, val in sorted(o.iteritems()):
rv += "%s%s " % (indent * (level-1), key)
rv += encode_apache(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote_all_nums=quote_all_nums,
quote_all_strings=quote_all_strings,
block_type='value')
rv += "\n"
elif block_type == 'value':
if isinstance(data, bool) or convert_bools and _str_is_bool(data):
# Value is a boolean
rv += str(data).lower()
elif (
_is_num(data) or
(convert_nums and _str_is_num(data))):
# Value is a number
if quote_all_nums:
rv += '"%s"' % data
else:
rv += str(data)
elif isinstance(data, basestring):
# Value is a string
if (
quote_all_strings or
" " in data or
"\t" in data or
"\n" in data or
"\r" in data or
data == ""):
rv += '"%s"' % _escape(data)
else:
rv += data
elif isinstance(data, list):
# Value is a list
for v in data:
rv += encode_apache(
v,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote_all_nums=quote_all_nums,
quote_all_strings=quote_all_strings,
block_type='value')
# If not last item of the loop
if data[-1] != v:
rv += " "
return rv
def encode_erlang(
data, atom_value_indicator=":", convert_bools=False,
convert_nums=False, indent=" ", level=0):
"""Convert Python data structure to Erlang format."""
# Return value
rv = ""
if isinstance(data, dict):
# It's a dict
rv += "\n"
for key, val in sorted(data.iteritems()):
rv += "%s{%s," % (indent*level, key)
if not isinstance(val, dict):
rv += " "
rv += encode_erlang(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1)
rv += "}"
elif (
data == "null" or
_is_num(data) or
isinstance(data, bool) or
(convert_nums and _str_is_num(data)) or
(convert_bools and _str_is_bool(data))):
# It's null, number or boolean
rv += str(data).lower()
elif isinstance(data, basestring):
# It's a string
atom_len = len(atom_value_indicator)
if (
len(data) > atom_len and
data[0:atom_len] == atom_value_indicator):
# Atom configuration value
rv += data[atom_len:]
else:
rv += '"%s"' % _escape(data)
else:
# It's a list
rv += "["
for val in data:
if (
isinstance(val, basestring) or
_is_num(val)):
rv += "\n%s" % (indent*level)
rv += encode_erlang(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1)
if data[-1] == val:
# Last item of the loop
rv += "\n"
else:
rv += ","
if len(data) > 0:
rv += "%s]" % (indent * (level-1))
else:
rv += "]"
if level == 0:
rv += ".\n"
return rv
def encode_haproxy(data, indent=" "):
"""Convert Python data structure to HAProxy format."""
# Return value
rv = ""
# Indicates first loop
first = True
# Indicates whether the previous section was a comment
prev_comment = False
for section in data:
if first:
first = False
elif prev_comment:
prev_comment = False
else:
# Print empty line between sections
rv += "\n"
if isinstance(section, dict):
# It's a section
rv += "%s\n" % section.keys()[0]
# Process all parameters of the section
for param in section.values()[0]:
rv += "%s%s\n" % (indent, param)
else:
# It's a comment of a parameter
rv += "%s\n" % section
prev_comment = True
return rv
def encode_ini(
data, comment="#", delimiter=" = ", quote="", section_is_comment=False,
ucase_prop=False):
"""Convert Python data structure to INI format."""
# Return value
rv = ""
# First process all standalone properties
for prop, val in sorted(data.iteritems()):
if ucase_prop:
prop = prop.upper()
vals = []
if isinstance(val, list):
vals = val
elif not isinstance(val, dict):
vals = [val]
for item in vals:
if item is not None:
rv += "%s%s%s%s%s\n" % (
prop, delimiter, quote, _escape(item, quote), quote)
# Then process all sections
for section, props in sorted(data.iteritems()):
if isinstance(props, dict):
if rv != "":
rv += "\n"
if section_is_comment:
rv += "%s %s\n" % (comment, section)
else:
rv += "[%s]\n" % (section)
# Let process all section options as standalone properties
rv += encode_ini(
props,
delimiter=delimiter,
quote=quote,
section_is_comment=section_is_comment,
ucase_prop=ucase_prop)
return rv
def encode_json(
data, convert_bools=False, convert_nums=False, indent=" ", level=0):
"""Convert Python data structure to JSON format."""
# Return value
rv = ""
if isinstance(data, dict):
# It's a dict
rv += "{"
if len(data) > 0:
rv += "\n"
items = sorted(data.iteritems())
for key, val in items:
rv += '%s"%s": ' % (indent * (level+1), key)
rv += encode_json(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1)
# Last item of the loop
if items[-1] == (key, val):
rv += "\n"
else:
rv += ",\n"
if len(data) > 0:
rv += "%s}" % (indent * level)
else:
rv += "}"
if level == 0:
rv += "\n"
elif (
data == "null" or
_is_num(data) or
(convert_nums and _str_is_num(data)) or
(convert_bools and _str_is_bool(data))):
# It's a number, null or boolean
rv += str(data).lower()
elif isinstance(data, basestring):
# It's a string
rv += '"%s"' % _escape(_escape(data), format='control')
else:
# It's a list
rv += "["
if len(data) > 0:
rv += "\n"
for val in data:
rv += indent * (level+1)
rv += encode_json(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1)
# Last item of the loop
if data[-1] == val:
rv += "\n"
else:
rv += ",\n"
if len(data) > 0:
rv += "%s]" % (indent * level)
else:
rv += "]"
return rv
def encode_logstash(
data, convert_bools=False, convert_nums=False, indent=" ", level=0,
prevtype="", section_prefix=":"):
"""Convert Python data structure to Logstash format."""
# Return value
rv = ""
if isinstance(data, dict):
# The item is a dict
if prevtype in ('value', 'value_hash', 'array'):
rv += "{\n"
items = sorted(data.iteritems())
for key, val in items:
if key[0] == section_prefix:
rv += "%s%s {\n" % (indent * level, key[1:])
rv += encode_logstash(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
prevtype='block')
# Last item of the loop
if items[-1] == (key, val):
if (
isinstance(val, basestring) or
_is_num(val) or
isinstance(val, bool) or (
isinstance(val, dict) and
val.keys()[0][0] != section_prefix)):
rv += "\n%s}\n" % (indent * level)
else:
rv += "%s}\n" % (indent * level)
else:
rv += indent * level
if prevtype == 'value_hash':
rv += '"%s" => ' % key
else:
rv += "%s => " % key
rv += encode_logstash(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
prevtype=(
'value_hash' if isinstance(val, dict) else 'value'))
if (
items[-1] != (key, val) and (
isinstance(val, basestring) or
_is_num(val) or
isinstance(val, bool))):
rv += "\n"
if prevtype in ('value', 'value_hash', 'array'):
rv += "\n%s}" % (indent * (level-1))
if prevtype in ('value', 'value_array'):
rv += "\n"
elif (
_is_num(data) or
isinstance(data, bool) or
(convert_nums and _str_is_num(data)) or
(convert_bools and _str_is_bool(data))):
# It's number or boolean
rv += str(data).lower()
elif isinstance(data, basestring):
# It's a string
rv += '"%s"' % _escape(data)
else:
# It's a list
for val in data:
if isinstance(val, dict) and val.keys()[0][0] == section_prefix:
# Value is a block
rv += encode_logstash(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level,
prevtype='block')
else:
# First item of the loop
if data[0] == val:
rv += "[\n"
rv += indent * level
rv += encode_logstash(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
prevtype='array')
# Last item of the loop
if data[-1] == val:
rv += "\n%s]" % (indent * (level-1))
else:
rv += ",\n"
return rv
def encode_nginx(data, indent=" ", level=0, block_semicolon=False):
"""Convert Python data structure to Nginx format."""
# Return value
rv = ""
# Indicates the item type [section|line]
item_type = ""
for item in data:
if isinstance(item, dict):
# Section
if item_type in ('section', 'line'):
rv += "\n"
rv += "%s%s {\n" % (level*indent, item.keys()[0])
rv += encode_nginx(
item.values()[0],
level=level+1,
block_semicolon=block_semicolon)
rv += "%s}%s\n" % (level*indent, ';' if block_semicolon else '')
item_type = 'section'
elif isinstance(item, basestring):
# Normal line
if item_type == 'section':
rv += "\n"
item_type = 'line'
rv += "%s%s" % (level*indent, item)
# Do not finish comments with semicolon
if item.startswith("# "):
rv += "\n"
else:
rv += ";\n"
else:
raise errors.AnsibleFilterError(
"Unexpected data type: %s" % (type(item)))
return rv
def encode_pam(
data, print_label=False, separate_types=True, separator=" "):
"""Convert Python data structure to PAM format."""
# Return value
rv = ""
# Remember previous type to make newline between type blocks
prev_type = None
for label, rule in sorted(data.iteritems()):
if separate_types:
# Add extra newline to separate blocks of the same type
if prev_type is not None and prev_type != rule['type']:
rv += "\n"
prev_type = rule['type']
if print_label:
rv += "# %s\n" % label
if 'service' in rule:
rv += "%s%s" % (rule['service'], separator)
if 'silent' in rule and rule['silent']:
rv += '-'
rv += "%s%s" % (rule['type'], separator)
if isinstance(rule['control'], list):
rv += "[%s]%s" % (
" ".join(
map(
lambda k: "=".join(map(str, k)),
map(lambda x: x.items()[0], rule['control']))),
separator)
else:
rv += "%s%s" % (rule['control'], separator)
rv += rule['path']
if 'args' in rule and rule['args']:
rv += separator
for i, arg in enumerate(rule['args']):
if i > 0:
rv += ' '
if isinstance(arg, dict):
rv += "=".join(map(str, arg.items()[0]))
else:
rv += arg
rv += "\n"
return rv
def encode_toml(
data, convert_bools=False, convert_nums=False, first=True,
indent=" ", level=0, prevkey="", prevtype="", quote='"'):
"""Convert Python data structure to TOML format."""
# Return value
rv = ""
if isinstance(data, dict):
# It's a dict
# First process all standalone strings, numbers, booleans and lists
for key, val in sorted(data.iteritems()):
if (
isinstance(val, basestring) or
_is_num(val) or
isinstance(val, bool) or (
isinstance(val, list) and
len(val) > 0 and
not isinstance(val[0], dict))):
# The value is string, number, boolean or list
rv += "%s%s = " % (indent * level, key)
rv += encode_toml(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
first=first,
indent=indent,
level=level,
prevkey=prevkey)
first = False
# Then process all data structures
for key, val in sorted(data.iteritems()):
if (
isinstance(val, dict) or
isinstance(val, list) and isinstance(val[0], dict)):
# Values for the next recursive call
tmp_prevkey = prevkey
tmp_level = level
if isinstance(val, dict):
# The val is a dict
if prevkey != "" and prevkey != key:
tmp_level += 1
if re.match(r'^[a-zA-Z0-9_-]+$', key) is None:
key = '"%s"' % key
if prevkey == "":
tmp_prevkey = key
else:
tmp_prevkey = "%s.%s" % (prevkey, key)
if not first:
rv += "\n"
rv += "%s[%s]\n" % (indent * tmp_level, tmp_prevkey)
elif isinstance(val[0], dict):
# The val is a table
if re.match(r'^[a-zA-Z0-9_-]+$', key) is None:
key = '"%s"' % key
if prevkey == "":
tmp_prevkey = key
else:
tmp_prevkey = "%s.%s" % (prevkey, key)
tmp_level += 1
rv += encode_toml(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
first=first,
indent=indent,
level=tmp_level,
prevkey=tmp_prevkey)
first = False
elif (
_is_num(data) or
isinstance(data, bool) or
(convert_nums and _str_is_num(data)) or
(convert_bools and _str_is_bool(data))):
# It's number or boolean
rv += str(data).lower()
if prevtype != 'list':
rv += "\n"
elif isinstance(data, basestring):
# It's a string
rv += "%s%s%s" % (
quote, _escape(data, quote), quote)
if prevtype != 'list':
rv += "\n"
else:
# It's a list
if isinstance(data[0], dict):
for d in data:
rv += "\n%s[[%s]]\n" % (indent * level, prevkey)
rv += encode_toml(
d,
convert_bools=convert_bools,
convert_nums=convert_nums,
first=first,
indent=indent,
level=level)
else:
rv += "["
for d in data:
rv += encode_toml(
d,
convert_bools=convert_bools,
convert_nums=convert_nums,
first=first,
indent=indent,
level=level,
prevtype='list')
# Last item of the loop
if data[-1] != d:
rv += ", "
rv += "]"
if prevtype != 'list':
rv += "\n"
return rv
def encode_xml(
data, attribute_sign="^", escape_xml=True, indent=" ", level=0):
"""Convert Python data structure to XML format."""
# Return value
rv = ""
if isinstance(data, list):
# Pocess anything what's not attribute
for item in data:
if (
not (
isinstance(item, dict) and
item.keys()[0].startswith(attribute_sign))):
rv += encode_xml(
item,
attribute_sign=attribute_sign,
indent=indent,
level=level,
escape_xml=escape_xml)
elif isinstance(data, dict):
# It's eiher an attribute or an element
key, val = data.items()[0]
if key.startswith(attribute_sign):
# Process attribute
rv += ' %s="%s"' % (key[1:], _escape(val))
else:
# Process element
rv = '%s<%s' % (level*indent, key)
# Check if there are any attributes
if isinstance(val, list):
num_attrs = 0
for item in val:
if (
isinstance(item, dict) and
item.keys()[0].startswith(attribute_sign)):
num_attrs += 1
rv += encode_xml(
item,
attribute_sign=attribute_sign,
indent=indent,
level=level)
if val == '' or (isinstance(val, list) and num_attrs == len(val)):
# Close the element as empty
rv += " />\n"
else:
# Close the element as normal
rv += ">"
# Check if the value is text
val_not_text = False
if isinstance(val, list):
# Check if it contains only attributes and a text value
for item in val:
if (
isinstance(item, dict) and
not item.keys()[0].startswith(attribute_sign)):
val_not_text = True
break
elif isinstance(val, dict):
val_not_text = True
if val_not_text:
rv += "\n"
# Process inner content of the element
rv += encode_xml(
val,
attribute_sign=attribute_sign,
indent=indent,
level=level+1,
escape_xml=escape_xml)
if val_not_text:
rv += level*indent
rv += "</%s>\n" % key
else:
# It's a string
rv += "%s" % _escape(data, format=('xml' if escape_xml else None))
return rv
def encode_yaml(
data, convert_bools=False, convert_nums=False, indent=" ", level=0,
quote='"', skip_indent=False):
"""Convert Python data structure to YAML format."""
# Return value
rv = ""
if isinstance(data, dict):
# It's a dictionary
if len(data.keys()) == 0:
rv += "{}\n"
else:
for i, (key, val) in enumerate(sorted(data.iteritems())):
# Skip indentation only for the first pair
rv += "%s%s:" % ("" if i == 0 and skip_indent else level*indent, key)
if isinstance(val, dict) and len(val.keys()) == 0:
rv += " {}\n"
else:
if (
isinstance(val, dict) or (
isinstance(val, list) and
len(val) != 0)):
rv += "\n"
else:
rv += " "
rv += encode_yaml(
val,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote=quote)
elif isinstance(data, list):
# It's a list
if len(data) == 0:
rv += "[]\n"
else:
for item in data:
list_indent = "%s- " % (level*indent)
rv += "%s%s" % (list_indent, encode_yaml(
item,
convert_bools=convert_bools,
convert_nums=convert_nums,
indent=indent,
level=level+1,
quote=quote,
skip_indent=True))
elif (
data == "null" or
isinstance(data, bool) or
(convert_bools and _str_is_bool(data))):
# It's a boolean
rv += "%s\n" % str(data).lower()
elif (
_is_num(data) or
(convert_nums and _str_is_num(data))):
# It's a number
rv += "%s\n" % str(data)
else:
# It's a string
rv += "%s%s%s\n" % (quote, _escape(data, quote), quote)
return rv
def __eval_replace(match):
"""Evaluate the real value of the variable specified as a string."""
ret = '__item'
ret += ''.join(match.groups()[1:])
# Try to evaluate the value of the special string
try:
ret = eval(ret)
except Exception:
# Return empty string if something went wrong
ret = ''
return str(ret)
def template_replace(data, replacement):
"""Replace special template decorated variable with its real value."""
# Make the replacement variable visible for the __eval_replace function
global __item
__item = replacement
# Clone the data to keep the original untouched
local_data = copy(data)
# Walk through the data structure and try to replace all special strings
if isinstance(local_data, list):
local_data = map(
lambda x: template_replace(x, replacement), local_data)
elif isinstance(local_data, dict):
for key, val in local_data.iteritems():
local_data[key] = template_replace(val, replacement)
elif isinstance(local_data, basestring):
# Replace the special string by it's evaluated value
p = re.compile(r'\{\[\{\s*(\w+)([^}\s]+|)\s*\}\]\}')
local_data = p.sub(__eval_replace, local_data)
return local_data
class FilterModule(object):
"""Ansible encoder Jinja2 filters."""
def filters(self):
"""Expose filters to ansible."""
return {
'encode_apache': encode_apache,
'encode_erlang': encode_erlang,
'encode_haproxy': encode_haproxy,
'encode_ini': encode_ini,
'encode_json': encode_json,
'encode_logstash': encode_logstash,
'encode_nginx': encode_nginx,
'encode_pam': encode_pam,
'encode_toml': encode_toml,
'encode_xml': encode_xml,
'encode_yaml': encode_yaml,
'template_replace': template_replace,
} | unknown | codeparrot/codeparrot-clean | ||
"""
Xylem - Phylogenetic Pipelines with MPI
Delete.py contains tasks that delete taxa from datasets.
Copyright (C) 2015 Pranjal Vachaspati
pr@nj.al
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xylem.Task
import cPickle
import numpy as np
import subprocess
import StringIO
import uuid
import dendropy
import sys
class DeleteTaxaUniform(xylem.Task):
def setup(self, ndelete, gtrees=True, alignments=True, stree=True, seed=None, *args, **kwargs):
self.ndelete = ndelete
self.gtrees=gtrees
self.seqs=alignments
self.stree = stree
self.seed = seed
def inputs(self):
inp = []
if self.gtrees:
inp.append(("genetrees", dendropy.TreeList))
if self.seqs:
inp.append(("alignments", (dendropy.DnaCharacterMatrix,)))
if self.stree:
inp.append(("speciestree", dendropy.Tree))
return inp
def outputs(self):
return self.inputs()
def desc(self):
return str(self.ndelete)
def run(self):
tn = None
if self.seqs:
dna = [i.clone() for i in self.input_data["alignments"]]
tn = dna[0].taxon_namespace
if self.gtrees:
gt = self.input_data["genetrees"].clone()
if tn:
gt.migrate_taxon_namespace(tn)
else:
tn = gt.taxon_namespace
if self.stree:
st = self.input_data["speciestree"].clone()
if tn:
st.migrate_taxon_namespace(tn)
else:
tn = st.taxon_namespace
if self.seed != None:
print "using seed", self.seed
np.random.seed(self.seed)
deletion_list = np.random.choice(sorted(list(tn), key=lambda x:x.label), size=self.ndelete, replace=False)
print "deleting", [i.label for i in deletion_list]
self.result = {}
if self.gtrees:
for g in gt:
g.prune_taxa(deletion_list)
self.result['genetrees'] = gt
if self.seqs:
for seq in dna:
seq.discard_sequences(deletion_list)
self.result['alignments'] = dna
if self.stree:
st.prune_taxa(deletion_list)
self.result['speciestree'] = st
for t in deletion_list:
tn.remove_taxon(t)
return self.result
class DeleteTaxaRandom(xylem.Task):
def setup(self, ndelete, sigma=0, *args, **kwargs):
self.ndelete = ndelete
self.sigma = sigma
def desc(self):
return str(self.ndelete) + ' +/- ' + str(self.sigma)
def inputs(self):
return [("genetrees", dendropy.TreeList), ("alignments", (dendropy.DnaCharacterMatrix,))]
def outputs(self):
return [("genetrees", dendropy.TreeList), ("alignments", (dendropy.DnaCharacterMatrix,))]
def run(self):
debug = False
if '--debug' in sys.argv:
debug = True
dna = [i.clone() for i in self.input_data["alignments"]]
gt = self.input_data["genetrees"].clone()
gt.migrate_taxon_namespace(dna[0].taxon_namespace)
for seq, g in zip(dna, gt):
if debug:
print g
taxon_list = [i.taxon for i in g.leaf_nodes()]
nd = min(self.ndelete + np.random.randn() * self.sigma, len(taxon_list) - 4)
if debug:
print nd
print taxon_list
deletion_list = np.random.choice(taxon_list, size=nd, replace=False)
if debug:
print deletion_list
g.prune_taxa(deletion_list)
if debug:
print g
print
seq.discard_sequences(deletion_list)
self.result = {"alignments":dna, "genetrees":gt}
return self.result
class DeleteTaxaRandomList(xylem.Task):
def setup(self, ndelete, *args, **kwargs):
self.ndelete = ndelete
def inputs(self):
return [("genetrees", dendropy.TreeList), ("alignments", (dendropy.DnaCharacterMatrix,))]
def outputs(self):
return [("genetrees", dendropy.TreeList), ("alignments", (dendropy.DnaCharacterMatrix,))]
def run(self):
debug = False
if '--debug' in sys.argv:
debug = True
dna = [i.clone() for i in self.input_data["alignments"]]
gt = self.input_data["genetrees"].clone()
gt.migrate_taxon_namespace(dna[0].taxon_namespace)
for seq, g, nd in zip(dna, gt, self.ndelete):
if debug:
print g
taxon_list = [i.taxon for i in g.leaf_nodes()]
deletion_list = np.random.choice(taxon_list, size=nd, replace=False)
if debug:
print deletion_list
g.prune_taxa(deletion_list)
if debug:
print g
print
seq.discard_sequences(deletion_list)
self.result = {"alignments":dna, "genetrees":gt}
return self.result
class LimitSeqLength(xylem.Task):
def setup(self, maxlen, *args, **kwargs):
self.maxlen = maxlen
def inputs(self):
return [("alignments", (dendropy.DnaCharacterMatrix,))]
def outputs(self):
return [("alignments", (dendropy.DnaCharacterMatrix,))]
def desc(self):
return str(self.maxlen)
def run(self):
dna = [i.clone() for i in self.input_data["alignments"]]
for seq in dna:
for tax in seq:
del seq[tax][self.maxlen:]
self.result = {"alignments":dna}
return self.result | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop LGDT_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2'
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LGDT_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2'
wrbase tsg, t2
wrlimit tsg, t1
};
#
# These versions are for when the original data size was 16 bits. The base is
# still 32 bits, but the top byte is zeroed before being used.
#
def macroop LGDT_16_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LGDT_16_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LIDT_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2'
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LIDT_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2'
wrbase idtr, t2
wrlimit idtr, t1
};
#
# These versions are for when the original data size was 16 bits. The base is
# still 32 bits, but the top byte is zeroed before being used.
#
def macroop LIDT_16_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LIDT_16_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LTR_R
{
.serializing
chks reg, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, reg, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks reg, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, reg
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LTR_M
{
.serializing
ld t5, seg, sib, disp, dataSize=2
chks t5, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, t5
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LTR_P
{
.serializing
rdip t7
ld t5, seg, riprel, disp, dataSize=2
chks t5, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, t5
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LLDT_R
{
.serializing
chks reg, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, reg, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks reg, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, reg
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop LLDT_M
{
.serializing
ld t5, seg, sib, disp, dataSize=2
chks t5, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, t5
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop LLDT_P
{
.serializing
rdip t7
ld t5, seg, riprel, disp, dataSize=2
chks t5, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, t5
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop SWAPGS
{
rdval t1, kernel_gs_base, dataSize=8
rdbase t2, gs, dataSize=8
wrbase gs, t1, dataSize=8
wrval kernel_gs_base, t2, dataSize=8
};
''' | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package netutil implements network-related utility functions.
package netutil | go | github | https://github.com/etcd-io/etcd | pkg/netutil/doc.go |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/system_parameter.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/system_parameter.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n!google/api/system_parameter.proto\x12\ngoogle.api\"B\n\x10SystemParameters\x12.\n\x05rules\x18\x01 \x03(\x0b\x32\x1f.google.api.SystemParameterRule\"X\n\x13SystemParameterRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12/\n\nparameters\x18\x02 \x03(\x0b\x32\x1b.google.api.SystemParameter\"Q\n\x0fSystemParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bhttp_header\x18\x02 \x01(\t\x12\x1b\n\x13url_query_parameter\x18\x03 \x01(\tB/\n\x0e\x63om.google.apiB\x14SystemParameterProtoP\x01\xa2\x02\x04GAPIb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SYSTEMPARAMETERS = _descriptor.Descriptor(
name='SystemParameters',
full_name='google.api.SystemParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.SystemParameters.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=115,
)
_SYSTEMPARAMETERRULE = _descriptor.Descriptor(
name='SystemParameterRule',
full_name='google.api.SystemParameterRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.SystemParameterRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='google.api.SystemParameterRule.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=205,
)
_SYSTEMPARAMETER = _descriptor.Descriptor(
name='SystemParameter',
full_name='google.api.SystemParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.api.SystemParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='http_header', full_name='google.api.SystemParameter.http_header', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url_query_parameter', full_name='google.api.SystemParameter.url_query_parameter', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=207,
serialized_end=288,
)
_SYSTEMPARAMETERS.fields_by_name['rules'].message_type = _SYSTEMPARAMETERRULE
_SYSTEMPARAMETERRULE.fields_by_name['parameters'].message_type = _SYSTEMPARAMETER
DESCRIPTOR.message_types_by_name['SystemParameters'] = _SYSTEMPARAMETERS
DESCRIPTOR.message_types_by_name['SystemParameterRule'] = _SYSTEMPARAMETERRULE
DESCRIPTOR.message_types_by_name['SystemParameter'] = _SYSTEMPARAMETER
SystemParameters = _reflection.GeneratedProtocolMessageType('SystemParameters', (_message.Message,), dict(
DESCRIPTOR = _SYSTEMPARAMETERS,
__module__ = 'google.api.system_parameter_pb2'
# @@protoc_insertion_point(class_scope:google.api.SystemParameters)
))
_sym_db.RegisterMessage(SystemParameters)
SystemParameterRule = _reflection.GeneratedProtocolMessageType('SystemParameterRule', (_message.Message,), dict(
DESCRIPTOR = _SYSTEMPARAMETERRULE,
__module__ = 'google.api.system_parameter_pb2'
# @@protoc_insertion_point(class_scope:google.api.SystemParameterRule)
))
_sym_db.RegisterMessage(SystemParameterRule)
SystemParameter = _reflection.GeneratedProtocolMessageType('SystemParameter', (_message.Message,), dict(
DESCRIPTOR = _SYSTEMPARAMETER,
__module__ = 'google.api.system_parameter_pb2'
# @@protoc_insertion_point(class_scope:google.api.SystemParameter)
))
_sym_db.RegisterMessage(SystemParameter)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\024SystemParameterProtoP\001\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope) | unknown | codeparrot/codeparrot-clean | ||
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: script
version_added: "0.9"
short_description: Runs a local script on a remote node after transferring it
description:
- "The C(script) module takes the script name followed by a list of
space-delimited arguments. "
- "The local script at path will be transferred to the remote node and then executed. "
- "The given script will be processed through the shell environment on the remote node. "
- "This module does not require python on the remote system, much like
the M(raw) module. "
- This module is also supported for Windows targets.
options:
free_form:
description:
- path to the local script file followed by optional arguments. There is no parameter actually named 'free form'; see the examples!
required: true
default: null
aliases: []
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
version_added: "1.5"
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
required: no
default: null
version_added: "1.5"
chdir:
description:
- cd into this directory on the remote node before running the script
version_added: "2.4"
required: false
default: null
notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
- The ssh connection plugin will force pseudo-tty allocation via -tt when scripts are executed. pseudo-ttys do not have a stderr channel and all
stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
- This module is also supported for Windows targets.
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- decrypt
"""
EXAMPLES = '''
# Example from Ansible Playbooks
- script: /some/local/script.sh --some-arguments 1234
# Run a script that creates a file, but only if the file is not yet created
- script: /some/local/create_file.sh --some-arguments 1234
args:
creates: /the/created/file.txt
# Run a script that removes a file, but only if the file is not yet removed
- script: /some/local/remove_file.sh --some-arguments 1234
args:
removes: /the/removed/file.txt
''' | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef ROTATION_CONVERTERS_HPP
#define ROTATION_CONVERTERS_HPP
#include <opencv2/core.hpp>
namespace calib
{
#define CALIB_RADIANS 0
#define CALIB_DEGREES 1
void Euler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void RodriguesToEuler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void EulerToRodrigues(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
}
#endif | unknown | github | https://github.com/opencv/opencv | apps/interactive-calibration/rotationConverters.hpp |
"""pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
import pytest
import textwrap
import difflib
import re
import sys
import contextlib
import platform
import gc
_unicode_marker = re.compile(r'u(\'[^\']*\')')
_long_marker = re.compile(r'([0-9])L')
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
class Output(object):
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString(object):
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = s.replace("unicode", "str")
s = _long_marker.sub(r"\1", s)
s = _unicode_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = _sanitize_general(s)
return s
@pytest.fixture
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, 'explanation'):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
''' Run the garbage collector twice (needed when running
reference counting tests with PyPy) '''
gc.collect()
gc.collect()
def pytest_configure():
"""Add import suppression and test requirements to `pytest` namespace"""
try:
import numpy as np
except ImportError:
np = None
try:
import scipy
except ImportError:
scipy = None
try:
from pybind11_tests.eigen import have_eigen
except ImportError:
have_eigen = False
pypy = platform.python_implementation() == "PyPy"
skipif = pytest.mark.skipif
pytest.suppress = suppress
pytest.requires_numpy = skipif(not np, reason="numpy is not installed")
pytest.requires_scipy = skipif(not np, reason="scipy is not installed")
pytest.requires_eigen_and_numpy = skipif(not have_eigen or not np,
reason="eigen and/or numpy are not installed")
pytest.requires_eigen_and_scipy = skipif(
not have_eigen or not scipy, reason="eigen and/or scipy are not installed")
pytest.unsupported_on_pypy = skipif(pypy, reason="unsupported on PyPy")
pytest.unsupported_on_py2 = skipif(sys.version_info.major < 3,
reason="unsupported on Python 2.x")
pytest.gc_collect = gc_collect
def _test_import_pybind11():
"""Early diagnostic for test module initialization errors
When there is an error during initialization, the first import will report the
real error while all subsequent imports will report nonsense. This import test
is done early (in the pytest configuration file, before any tests) in order to
avoid the noise of having all tests fail with identical error messages.
Any possible exception is caught here and reported manually *without* the stack
trace. This further reduces noise since the trace would only show pytest internals
which are not useful for debugging pybind11 module issues.
"""
# noinspection PyBroadException
try:
import pybind11_tests # noqa: F401 imported but unused
except Exception as e:
print("Failed to import pybind11_tests from pytest:")
print(" {}: {}".format(type(e).__name__, e))
sys.exit(1)
_test_import_pybind11() | unknown | codeparrot/codeparrot-clean | ||
from test.support import import_helper, threading_helper
syslog = import_helper.import_module("syslog") #skip if not supported
from test import support
import sys
import threading
import time
import unittest
from textwrap import dedent
# XXX(nnorwitz): This test sucks. I don't know of a platform independent way
# to verify that the messages were really logged.
# The only purpose of this test is to verify the code doesn't crash or leak.
class Test(unittest.TestCase):
def tearDown(self):
syslog.closelog()
def test_openlog(self):
syslog.openlog('python')
# Issue #6697.
self.assertRaises(UnicodeEncodeError, syslog.openlog, '\uD800')
def test_syslog(self):
syslog.openlog('python')
syslog.syslog('test message from python test_syslog')
syslog.syslog(syslog.LOG_ERR, 'test error from python test_syslog')
def test_syslog_implicit_open(self):
syslog.closelog() # Make sure log is closed
syslog.syslog('test message from python test_syslog')
syslog.syslog(syslog.LOG_ERR, 'test error from python test_syslog')
def test_closelog(self):
syslog.openlog('python')
syslog.closelog()
syslog.closelog() # idempotent operation
def test_setlogmask(self):
mask = syslog.LOG_UPTO(syslog.LOG_WARNING)
oldmask = syslog.setlogmask(mask)
self.assertEqual(syslog.setlogmask(0), mask)
self.assertEqual(syslog.setlogmask(oldmask), mask)
def test_log_mask(self):
mask = syslog.LOG_UPTO(syslog.LOG_WARNING)
self.assertTrue(mask & syslog.LOG_MASK(syslog.LOG_WARNING))
self.assertTrue(mask & syslog.LOG_MASK(syslog.LOG_ERR))
self.assertFalse(mask & syslog.LOG_MASK(syslog.LOG_INFO))
def test_openlog_noargs(self):
syslog.openlog()
syslog.syslog('test message from python test_syslog')
@threading_helper.requires_working_threading()
def test_syslog_threaded(self):
start = threading.Event()
stop = False
def opener():
start.wait(10)
i = 1
while not stop:
syslog.openlog(f'python-test-{i}') # new string object
i += 1
def logger():
start.wait(10)
while not stop:
syslog.syslog('test message from python test_syslog')
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-9)
try:
threads = [threading.Thread(target=opener)]
threads += [threading.Thread(target=logger) for k in range(10)]
with threading_helper.start_threads(threads):
start.set()
time.sleep(0.1)
stop = True
finally:
sys.setswitchinterval(orig_si)
def test_subinterpreter_syslog(self):
# syslog.syslog() is not allowed in subinterpreters, but only if
# syslog.openlog() hasn't been called in the main interpreter yet.
with self.subTest('before openlog()'):
code = dedent('''
import syslog
caught_error = False
try:
syslog.syslog('foo')
except RuntimeError:
caught_error = True
assert(caught_error)
''')
res = support.run_in_subinterp(code)
self.assertEqual(res, 0)
syslog.openlog()
try:
with self.subTest('after openlog()'):
code = dedent('''
import syslog
syslog.syslog('foo')
''')
res = support.run_in_subinterp(code)
self.assertEqual(res, 0)
finally:
syslog.closelog()
def test_subinterpreter_openlog(self):
try:
code = dedent('''
import syslog
caught_error = False
try:
syslog.openlog()
except RuntimeError:
caught_error = True
assert(caught_error)
''')
res = support.run_in_subinterp(code)
self.assertEqual(res, 0)
finally:
syslog.closelog()
def test_subinterpreter_closelog(self):
syslog.openlog('python')
try:
code = dedent('''
import syslog
caught_error = False
try:
syslog.closelog()
except RuntimeError:
caught_error = True
assert(caught_error)
''')
res = support.run_in_subinterp(code)
self.assertEqual(res, 0)
finally:
syslog.closelog()
if __name__ == "__main__":
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_syslog.py |
# Copyright 2013 Sascha Peilicke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import shutil
import subprocess
import sys
from distutils.core import Command
class CleanupCommand(Command):
patterns = [".coverage", ".tox", ".venv", "build", "dist", "*.egg", "*.egg-info"]
description = "Clean up project directory"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for pattern in CleanupCommand.patterns:
for f in glob.glob(pattern):
if os.path.isdir(f):
shutil.rmtree(f, ignore_errors=True)
else:
os.remove(f)
class DocCommand(Command):
description = "Generate manpage, HTML and PDF documentation"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
subprocess.call(["xsltproc", "--output", "doc/py2pack.html", "/usr/share/xml/docbook/stylesheet/nwalsh/current/html/docbook.xsl", "doc/src/py2pack.xml.in"])
subprocess.call(["xsltproc", "--output", "doc/py2pack.1", "/usr/share/xml/docbook/stylesheet/nwalsh/current/manpages/docbook.xsl", "doc/src/py2pack.xml.in"])
except:
pass
class SPDXUpdateCommand(Command):
description = "Update SDPX license map"
user_options = []
LICENSE_FILE = 'py2pack/spdx_license_map.p'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Not part of any requirements, could happen through setup(setup_requires=...)
import pickle
import lxml.html
import requests
response = requests.get('https://docs.google.com/spreadsheet/pub?key=0AqPp4y2wyQsbdGQ1V3pRRDg5NEpGVWpubzdRZ0tjUWc')
html = lxml.html.fromstring(response.text)
licenses = {}
for i, tr in enumerate(html.cssselect('table#tblMain > tr[class!="rShim"]')):
if i == 0:
continue # Skip the first tr, only contains row descriptions
_, td_new, td_old = tr.getchildren()
licenses[td_old.text] = td_new.text
pickle.dump(licenses, open(SPDXUpdateCommand.LICENSE_FILE, 'wb'))
def get_cmdclass():
"""Dictionary of all distutils commands defined in this module.
"""
return {"cleanup": CleanupCommand,
"spdx_update": SPDXUpdateCommand}
def parse_requirements(requirements_file='requirements.txt'):
requirements = []
with open(requirements_file, 'r') as f:
for line in f:
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# -r lines are for including other files, and don't get used here
elif re.match(r'\s*-r\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line.strip())
return requirements | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import status_params
# server configurations
config = Script.get_config()
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_server_conf_dir = "/etc/hive/conf.server"
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
#users
hive_user = config['configurations']['global']['hive_user']
hive_lib = '/usr/lib/hive/lib/'
#JDBC driver jar name
hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
if hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = "mysql-connector-java.jar"
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = "ojdbc6.jar"
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
#common
hive_metastore_port = config['configurations']['global']['hive_metastore_port']
hive_var_lib = '/var/lib/hive'
hive_server_host = config['clusterHostInfo']['hive_server_host']
hive_url = format("jdbc:hive2://{hive_server_host}:10000")
smokeuser = config['configurations']['global']['smokeuser']
smoke_test_sql = "/tmp/hiveserver2.sql"
smoke_test_path = "/tmp/hiveserver2Smoke.sh"
smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
security_enabled = config['configurations']['global']['security_enabled']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
#hive_env
hive_conf_dir = "/etc/hive/conf"
hive_dbroot = config['configurations']['global']['hive_dbroot']
hive_log_dir = config['configurations']['global']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#hive-site
hive_database_name = config['configurations']['global']['hive_database_name']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh'
hadoop_home = '/usr'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
java_share_dir = '/usr/share/java'
driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
hdfs_user = config['configurations']['global']['hdfs_user']
user_group = config['configurations']['global']['user_group']
artifact_dir = "/tmp/HDP-artifacts/"
target = format("{hive_lib}/{jdbc_jar_name}")
jdk_location = config['ambariLevelParams']['jdk_location']
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
start_hiveserver2_path = "/tmp/start_hiveserver2_script"
start_metastore_path = "/tmp/start_metastore_script"
hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
java64_home = config['ambariLevelParams']['java_home']
##### MYSQL
db_name = config['configurations']['global']['hive_database_name']
mysql_user = "mysql"
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = "/tmp/addMysqlUser.sh"
########## HCAT
hcat_conf_dir = '/etc/hcatalog/conf'
metastore_port = 9933
hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['global']['hcat_user']
webhcat_user = config['configurations']['global']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['global']['hcat_log_dir'] #hcat_log_dir
hadoop_conf_dir = '/etc/hadoop/conf' | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Module containing classes with common behaviour for consoles of both VMs and Instances of all types.
"""
import base64
import re
import tempfile
import time
from cfme.exceptions import ItemNotFound
from PIL import Image, ImageFilter
from pytesseract import image_to_string
from selenium.webdriver.common.keys import Keys
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from wait_for import wait_for, TimedOutError
class VMConsole(Pretty):
"""Class to manage the VM Console. Presently, only support HTML5/WebMKS Console."""
pretty_attrs = ['appliance_handle', 'browser', 'console_handle', 'name']
def __init__(self, vm, console_handle, appliance_handle):
self.name = vm.name
self.selenium = vm.appliance.browser.widgetastic.selenium
self.console_handle = console_handle
self.appliance_handle = appliance_handle
self.provider = vm.provider
###
# Methods
#
def get_banner(self):
"""Get the text of the banner above the console screen."""
self.switch_to_console()
# We know the widget may or may not be available right away
# so we do this in a try-catch to ensure the code is not stopped
# due to an exception being thrown.
try:
text = self.provider.get_console_connection_status()
except ItemNotFound:
logger.exception('Could not find banner element.')
return None
finally:
self.switch_to_appliance()
logger.info('Read following text from console banner: %s', text)
return text
def get_screen(self, timeout=15):
"""
Retrieve the bit map from the canvas widget that represents the console screen.
Returns it as a binary string.
Implementation:
The canvas tag has a method toDataURL() which one can use in javascript to
obtain the canvas image base64 encoded. Examples of how to do this can be
seen here:
https://qxf2.com/blog/selenium-html5-canvas-verify-what-was-drawn/
https://stackoverflow.com/questions/38316402/how-to-save-a-canvas-as-png-in-selenium
"""
# Internal function to use in wait_for(). We need to try to get the
# canvas element within a try-catch, that is in within a wait_for() so
# we can handle it not showing up right away as it is wont to do on
# at least RHV providers.
def _get_canvas_element(provider):
try:
canvas = provider.get_remote_console_canvas()
except ItemNotFound:
logger.exception('Could not find canvas element.')
return False
return canvas
self.switch_to_console()
# Get the canvas element
canvas, wait = wait_for(func=_get_canvas_element, func_args=[self.provider],
delay=1, handle_exceptions=True,
num_sec=timeout)
logger.info("canvas: {}\n".format(canvas))
# Now run some java script to get the contents of the canvas element
# base 64 encoded.
image_base64_url = self.selenium.execute_script(
"return arguments[0].toDataURL('image/jpeg',1);",
canvas
)
# The results will look like:
#
# data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAAfQAAABkCAYAAABwx8J9AA...
#
# So parse out the data from the non image data from the URL:
image_base64 = image_base64_url.split(",")[1]
# Now convert to binary:
image_jpeg = base64.b64decode(image_base64)
self.switch_to_appliance()
return image_jpeg
def get_screen_text(self):
"""
Return the text from a text console.
Uses OCR to scrape the text from the console image taken at the time of the call.
"""
image_str = self.get_screen()
# Write the image string to a file as pytesseract requires
# a file, and doesn't take a string.
tmp_file = tempfile.NamedTemporaryFile(suffix='.jpeg')
tmp_file.write(image_str)
tmp_file.flush()
tmp_file_name = tmp_file.name
# Open Image file, resize it to high resolution, sharpen it for clearer text
# and then run image_to_string operation which returns unicode that needs to
# be converted to utf-8 which gives us text [typr(text) == 'str']
# higher resolution allows tesseract to recognize text correctly
text = (image_to_string(((Image.open(tmp_file_name)).resize((7680, 4320),
Image.ANTIALIAS)).filter(ImageFilter.SHARPEN), lang='eng',
config='--user-words eng.user-words')).encode('utf-8')
tmp_file.close()
logger.info('screen text:{}'.format(text))
return text
def is_connected(self):
"""Wait for the banner on the console to say the console is connected."""
banner = self.get_banner()
if banner is None:
return False
return re.match('Connected', banner) is not None
def send_keys(self, text):
"""Send text to the console."""
self.switch_to_console()
canvas = self.provider.get_remote_console_canvas()
logger.info("Sending following Keys to Console {}".format(text))
for character in text:
canvas.send_keys(character)
# time.sleep() is used as a short delay between two keystrokes.
# If keys are sent to canvas any faster, canvas fails to receive them.
time.sleep(0.3)
canvas.send_keys(Keys.ENTER)
self.switch_to_appliance()
def send_ctrl_alt_delete(self):
"""Press the ctrl-alt-delete button in the console tab."""
self.switch_to_console()
ctrl_alt_del_btn = self.provider.get_console_ctrl_alt_del_btn()
logger.info("Sending following Keys to Console CTRL+ALT+DEL")
ctrl_alt_del_btn.click()
self.switch_to_appliance()
def send_fullscreen(self):
"""Press the fullscreen button in the console tab."""
self.switch_to_console()
fullscreen_btn = self.provider.get_console_fullscreen_btn()
logger.info("Sending following Keys to Console Toggle Fullscreen")
before_height = self.selenium.get_window_size()['height']
fullscreen_btn.click()
after_height = self.selenium.get_window_size()['height']
fullscreen_btn.click()
self.switch_to_console()
logger.info("Height before fullscreen: {}\n Height after fullscreen:{}\n".format(
before_height, after_height))
if after_height > before_height:
return True
return False
def switch_to_appliance(self):
"""Switch focus to appliance tab/window."""
logger.info("Switching to appliance: window handle = {}".format(self.appliance_handle))
self.selenium.switch_to_window(self.appliance_handle)
def switch_to_console(self):
"""Switch focus to console tab/window."""
logger.info("Switching to console: window handle = {}".format(self.console_handle))
self.selenium.switch_to_window(self.console_handle)
def wait_for_connect(self, timeout=30):
"""Wait for as long as the specified/default timeout for the console to be connected."""
try:
logger.info('Waiting for console connection (timeout={})'.format(timeout))
wait_for(func=lambda: self.is_connected(),
delay=1, handle_exceptions=True,
num_sec=timeout)
return True
except TimedOutError:
return False
def close_console_window(self):
"""Attempt to close Console window at the end of test."""
if self.console_handle is not None:
self.switch_to_console()
self.selenium.close()
logger.info("Browser window/tab containing Console was closed.")
self.switch_to_appliance()
def find_text_on_screen(self, text_to_find, current_line=False):
"""Find particular text is present on Screen.
This function uses get_screen_text function to get string containing
the text on the screen and then tries to match it against the 'text_to_find'.
Args:
text_to_find: This is what re.search will try to search for on screen.
Returns:
If the match is found returns True else False.
"""
# With provider RHOS7-GA, VMs spawned from Cirros template goes into screensaver mode
# sometimes, and shows a blank black screen, which causes test failures. To avoid that,
# and wake Cirros up from screensaver, following check is applied ,"\n" is sent if required.
if not self.get_screen_text():
self.send_keys("\n")
if current_line:
return re.search(text_to_find, self.get_screen_text().split('\n')[-1]) is not None
return re.search(text_to_find, self.get_screen_text()) is not None
def wait_for_text(self, timeout=45, text_to_find="", to_disappear=False):
"""Wait for as long as the specified/default timeout for the 'text' to show up on screen.
Args:
timeout: Wait Time before wait_for function times out.
text_to_find: value passed to find_text_on_screen function
to_disappear: if set to True, function will wait for text_to_find to disappear
from screen.
"""
if not text_to_find:
return None
try:
if to_disappear:
logger.info("Waiting for {} to disappear from screen".format(text_to_find))
result = wait_for(func=lambda: to_disappear != self.find_text_on_screen(text_to_find),
delay=5,
num_sec=timeout)
return result.out
except TimedOutError:
return None | unknown | codeparrot/codeparrot-clean | ||
"""This module provides the Units class for simplification of units.
It should be rolled into SymPy. It can perform simplification of
units, e.g., volts / amperes -> ohms.
Copyright 2020--2021 Michael Hayes, UCECE
"""
import sympy.physics.units as u
from sympy.physics.units.systems.si import dimsys_SI
from sympy.physics.units.systems import SI
from sympy.physics.units import UnitSystem, Quantity
from sympy import S
dB = Quantity('dB', 'dB')
class Units(object):
def __init__(self, unit_system="SI"):
self.unit_system = UnitSystem.get_unit_system(unit_system)
self.dim_sys = self.unit_system.get_dimension_system()
self._mapping = {}
for i in u.__dict__:
unit = getattr(u, i)
if not isinstance(unit, u.Quantity):
continue
key = self._makekey(unit)
# Use earlier defined units
if key not in self._mapping:
self._mapping[key] = unit
# Remove entry for no units.
self._mapping.pop(self._makekey(1))
# Add entry for S * ohm, etc.
key = (None, ) * len(key)
self._mapping[key] = S.One
def _get_dependencies(self, unit):
dim = self.unit_system.get_dimensional_expr(unit)
return self.dim_sys.get_dimensional_dependencies(dim)
def _makekey(self, unit):
deps = self._get_dependencies(unit)
key = tuple([deps.get(str(dim.name)) for dim in self.dim_sys.base_dims])
return key
def simplify_units(self, unit):
key = self._makekey(unit)
if not key in self._mapping:
return unit
result = self._mapping[key]
# V s or Wb? In the context of Laplace transforms, V s makes more
# sense since the Laplace domain voltage has units (V / rad / s).
# However, for magnetic field strength, Wb makes more sense. Since
# this is for circuit analysis we plump for V s.
if result.has(u.webers):
result = result.replace(u.webers, u.volt * u.s)
# There are probably many more special cases like this.
if result == u.hbar:
result = u.joule / u.Hz
if not unit.has(u.rad):
return result
# If original expression has rad (or 1 / rad) then this will
# get lost in the mapping, so need to reapply it.
factors = unit.as_ordered_factors()
if u.rad in factors:
return result * u.rad
return result / u.rad
def simplify(self, expr):
value, unit = self.as_value_unit(expr)
return value * self.simplify_units(unit)
def as_value_unit(self, expr):
return as_value_unit(expr)
def as_value_unit(expr):
if isinstance(expr, u.Quantity):
return 1, expr
if not expr.has(u.Quantity):
return expr, 1
if expr.is_Pow and expr.args[1] == -1:
value, unit = as_value_unit(expr.args[0])
return S.one / value, S.one / unit
defs = {x: 1 for x in expr.args if not x.has(u.Quantity)}
unit = expr.subs(defs)
value = expr / unit
if value.has(u.Quantity):
# FIXME: This function only works for something like 42 * volt or 42 * amp * ohm.
# It fails for 4 * amp * 2 * ohm + 42 * volt.
raise ValueError('Expression not of form value * units: %s' % expr)
return value, unit
units = Units() | unknown | codeparrot/codeparrot-clean | ||
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef SRC_TCP_WRAP_H_
#define SRC_TCP_WRAP_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include "async_wrap.h"
#include "connection_wrap.h"
namespace node {
class ExternalReferenceRegistry;
class Environment;
class TCPWrap : public ConnectionWrap<TCPWrap, uv_tcp_t> {
public:
enum SocketType {
SOCKET,
SERVER
};
static v8::MaybeLocal<v8::Object> Instantiate(Environment* env,
AsyncWrap* parent,
SocketType type);
static void Initialize(v8::Local<v8::Object> target,
v8::Local<v8::Value> unused,
v8::Local<v8::Context> context,
void* priv);
static void RegisterExternalReferences(ExternalReferenceRegistry* registry);
SET_NO_MEMORY_INFO()
SET_SELF_SIZE(TCPWrap)
const char* MemoryInfoName() const override {
switch (provider_type()) {
case ProviderType::PROVIDER_TCPWRAP:
return "TCPSocketWrap";
case ProviderType::PROVIDER_TCPSERVERWRAP:
return "TCPServerWrap";
default:
UNREACHABLE();
}
}
private:
typedef uv_tcp_t HandleType;
template <typename T,
int (*F)(const typename T::HandleType*, sockaddr*, int*)>
friend void GetSockOrPeerName(const v8::FunctionCallbackInfo<v8::Value>&);
TCPWrap(Environment* env, v8::Local<v8::Object> object,
ProviderType provider);
static void New(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetNoDelay(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetKeepAlive(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetTypeOfService(const v8::FunctionCallbackInfo<v8::Value>& args);
static void GetTypeOfService(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Bind(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Bind6(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Listen(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Connect(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Connect6(const v8::FunctionCallbackInfo<v8::Value>& args);
template <typename T>
static void Connect(const v8::FunctionCallbackInfo<v8::Value>& args,
std::function<int(const char* ip_address, T* addr)> uv_ip_addr);
static void Open(const v8::FunctionCallbackInfo<v8::Value>& args);
template <typename T>
static void Bind(
const v8::FunctionCallbackInfo<v8::Value>& args,
int family,
std::function<int(const char* ip_address, int port, T* addr)> uv_ip_addr);
static void Reset(const v8::FunctionCallbackInfo<v8::Value>& args);
int Reset(v8::Local<v8::Value> close_callback = v8::Local<v8::Value>());
#ifdef _WIN32
static void SetSimultaneousAccepts(
const v8::FunctionCallbackInfo<v8::Value>& args);
#endif
};
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_TCP_WRAP_H_ | c | github | https://github.com/nodejs/node | src/tcp_wrap.h |
// Copyright 2018 Ulf Adams
//
// The contents of this file may be used under the terms of the Apache License,
// Version 2.0.
//
// Alternatively, the contents of this file may be used under the terms of
// the Boost Software License, Version 1.0.
//
// Unless required by applicable law or agreed to in writing, this software
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
//
// ---
//
// Apache License
// Version 2.0, January 2004
// http://www.apache.org/licenses/
//
// TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
//
// 1. Definitions.
//
// "License" shall mean the terms and conditions for use, reproduction,
// and distribution as defined by Sections 1 through 9 of this document.
//
// "Licensor" shall mean the copyright owner or entity authorized by
// the copyright owner that is granting the License.
//
// "Legal Entity" shall mean the union of the acting entity and all
// other entities that control, are controlled by, or are under common
// control with that entity. For the purposes of this definition,
// "control" means (i) the power, direct or indirect, to cause the
// direction or management of such entity, whether by contract or
// otherwise, or (ii) ownership of fifty percent (50%) or more of the
// outstanding shares, or (iii) beneficial ownership of such entity.
//
// "You" (or "Your") shall mean an individual or Legal Entity
// exercising permissions granted by this License.
//
// "Source" form shall mean the preferred form for making modifications,
// including but not limited to software source code, documentation
// source, and configuration files.
//
// "Object" form shall mean any form resulting from mechanical
// transformation or translation of a Source form, including but
// not limited to compiled object code, generated documentation,
// and conversions to other media types.
//
// "Work" shall mean the work of authorship, whether in Source or
// Object form, made available under the License, as indicated by a
// copyright notice that is included in or attached to the work
// (an example is provided in the Appendix below).
//
// "Derivative Works" shall mean any work, whether in Source or Object
// form, that is based on (or derived from) the Work and for which the
// editorial revisions, annotations, elaborations, or other modifications
// represent, as a whole, an original work of authorship. For the purposes
// of this License, Derivative Works shall not include works that remain
// separable from, or merely link (or bind by name) to the interfaces of,
// the Work and Derivative Works thereof.
//
// "Contribution" shall mean any work of authorship, including
// the original version of the Work and any modifications or additions
// to that Work or Derivative Works thereof, that is intentionally
// submitted to Licensor for inclusion in the Work by the copyright owner
// or by an individual or Legal Entity authorized to submit on behalf of
// the copyright owner. For the purposes of this definition, "submitted"
// means any form of electronic, verbal, or written communication sent
// to the Licensor or its representatives, including but not limited to
// communication on electronic mailing lists, source code control systems,
// and issue tracking systems that are managed by, or on behalf of, the
// Licensor for the purpose of discussing and improving the Work, but
// excluding communication that is conspicuously marked or otherwise
// designated in writing by the copyright owner as "Not a Contribution."
//
// "Contributor" shall mean Licensor and any individual or Legal Entity
// on behalf of whom a Contribution has been received by Licensor and
// subsequently incorporated within the Work.
//
// 2. Grant of Copyright License. Subject to the terms and conditions of
// this License, each Contributor hereby grants to You a perpetual,
// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
// copyright license to reproduce, prepare Derivative Works of,
// publicly display, publicly perform, sublicense, and distribute the
// Work and such Derivative Works in Source or Object form.
//
// 3. Grant of Patent License. Subject to the terms and conditions of
// this License, each Contributor hereby grants to You a perpetual,
// worldwide, non-exclusive, no-charge, royalty-free, irrevocable
// (except as stated in this section) patent license to make, have made,
// use, offer to sell, sell, import, and otherwise transfer the Work,
// where such license applies only to those patent claims licensable
// by such Contributor that are necessarily infringed by their
// Contribution(s) alone or by combination of their Contribution(s)
// with the Work to which such Contribution(s) was submitted. If You
// institute patent litigation against any entity (including a
// cross-claim or counterclaim in a lawsuit) alleging that the Work
// or a Contribution incorporated within the Work constitutes direct
// or contributory patent infringement, then any patent licenses
// granted to You under this License for that Work shall terminate
// as of the date such litigation is filed.
//
// 4. Redistribution. You may reproduce and distribute copies of the
// Work or Derivative Works thereof in any medium, with or without
// modifications, and in Source or Object form, provided that You
// meet the following conditions:
//
// (a) You must give any other recipients of the Work or
// Derivative Works a copy of this License; and
//
// (b) You must cause any modified files to carry prominent notices
// stating that You changed the files; and
//
// (c) You must retain, in the Source form of any Derivative Works
// that You distribute, all copyright, patent, trademark, and
// attribution notices from the Source form of the Work,
// excluding those notices that do not pertain to any part of
// the Derivative Works; and
//
// (d) If the Work includes a "NOTICE" text file as part of its
// distribution, then any Derivative Works that You distribute must
// include a readable copy of the attribution notices contained
// within such NOTICE file, excluding those notices that do not
// pertain to any part of the Derivative Works, in at least one
// of the following places: within a NOTICE text file distributed
// as part of the Derivative Works; within the Source form or
// documentation, if provided along with the Derivative Works; or,
// within a display generated by the Derivative Works, if and
// wherever such third-party notices normally appear. The contents
// of the NOTICE file are for informational purposes only and
// do not modify the License. You may add Your own attribution
// notices within Derivative Works that You distribute, alongside
// or as an addendum to the NOTICE text from the Work, provided
// that such additional attribution notices cannot be construed
// as modifying the License.
//
// You may add Your own copyright statement to Your modifications and
// may provide additional or different license terms and conditions
// for use, reproduction, or distribution of Your modifications, or
// for any such Derivative Works as a whole, provided Your use,
// reproduction, and distribution of the Work otherwise complies with
// the conditions stated in this License.
//
// 5. Submission of Contributions. Unless You explicitly state otherwise,
// any Contribution intentionally submitted for inclusion in the Work
// by You to the Licensor shall be under the terms and conditions of
// this License, without any additional terms or conditions.
// Notwithstanding the above, nothing herein shall supersede or modify
// the terms of any separate license agreement you may have executed
// with Licensor regarding such Contributions.
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor,
// except as required for reasonable and customary use in describing the
// origin of the Work and reproducing the content of the NOTICE file.
//
// 7. Disclaimer of Warranty. Unless required by applicable law or
// agreed to in writing, Licensor provides the Work (and each
// Contributor provides its Contributions) on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied, including, without limitation, any warranties or conditions
// of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
// PARTICULAR PURPOSE. You are solely responsible for determining the
// appropriateness of using or redistributing the Work and assume any
// risks associated with Your exercise of permissions under this License.
//
// 8. Limitation of Liability. In no event and under no legal theory,
// whether in tort (including negligence), contract, or otherwise,
// unless required by applicable law (such as deliberate and grossly
// negligent acts) or agreed to in writing, shall any Contributor be
// liable to You for damages, including any direct, indirect, special,
// incidental, or consequential damages of any character arising as a
// result of this License or out of the use or inability to use the
// Work (including but not limited to damages for loss of goodwill,
// work stoppage, computer failure or malfunction, or any and all
// other commercial damages or losses), even if such Contributor
// has been advised of the possibility of such damages.
//
// 9. Accepting Warranty or Additional Liability. While redistributing
// the Work or Derivative Works thereof, You may choose to offer,
// and charge a fee for, acceptance of support, warranty, indemnity,
// or other liability obligations and/or rights consistent with this
// License. However, in accepting such obligations, You may act only
// on Your own behalf and on Your sole responsibility, not on behalf
// of any other Contributor, and only if You agree to indemnify,
// defend, and hold each Contributor harmless for any liability
// incurred by, or claims asserted against, such Contributor by reason
// of your accepting any such warranty or additional liability.
//
// END OF TERMS AND CONDITIONS
//
// APPENDIX: How to apply the Apache License to your work.
//
// To apply the Apache License to your work, attach the following
// boilerplate notice, with the fields enclosed by brackets "[]"
// replaced with your own identifying information. (Don't include
// the brackets!) The text should be enclosed in the appropriate
// comment syntax for the file format. We also recommend that a
// file or class name and description of purpose be included on the
// same "printed page" as the copyright notice for easier
// identification within third-party archives.
//
// Copyright [yyyy] [name of copyright owner]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ---
//
// Boost Software License - Version 1.0 - August 17th, 2003
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// ---
// Minimal Ryu implementation adapted for Ruby JSON gem by Josef Šimánek
// Optimized for pre-extracted mantissa/exponent from JSON parsing
// This is a stripped-down version containing only what's needed for
// converting decimal mantissa+exponent to IEEE 754 double precision.
#ifndef RYU_H
#define RYU_H
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
// Detect __builtin_clzll availability (for floor_log2)
// Note: MSVC doesn't have __builtin_clzll, so we provide a fallback
#ifdef __clang__
#if __has_builtin(__builtin_clzll)
#define RYU_HAVE_BUILTIN_CLZLL 1
#else
#define RYU_HAVE_BUILTIN_CLZLL 0
#endif
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define RYU_HAVE_BUILTIN_CLZLL 1
#else
#define RYU_HAVE_BUILTIN_CLZLL 0
#endif
// Count leading zeros (for floor_log2)
static inline uint32_t ryu_leading_zeros64(uint64_t input)
{
#if RYU_HAVE_BUILTIN_CLZLL
return __builtin_clzll(input);
#else
// Fallback: binary search for the highest set bit
// This works on MSVC and other compilers without __builtin_clzll
if (input == 0) return 64;
uint32_t n = 0;
if (input <= 0x00000000FFFFFFFFULL) { n += 32; input <<= 32; }
if (input <= 0x0000FFFFFFFFFFFFULL) { n += 16; input <<= 16; }
if (input <= 0x00FFFFFFFFFFFFFFULL) { n += 8; input <<= 8; }
if (input <= 0x0FFFFFFFFFFFFFFFULL) { n += 4; input <<= 4; }
if (input <= 0x3FFFFFFFFFFFFFFFULL) { n += 2; input <<= 2; }
if (input <= 0x7FFFFFFFFFFFFFFFULL) { n += 1; }
return n;
#endif
}
// These tables are generated by PrintDoubleLookupTable.
#define DOUBLE_POW5_INV_BITCOUNT 125
#define DOUBLE_POW5_BITCOUNT 125
#define DOUBLE_POW5_INV_TABLE_SIZE 342
#define DOUBLE_POW5_TABLE_SIZE 326
static const uint64_t DOUBLE_POW5_INV_SPLIT[DOUBLE_POW5_INV_TABLE_SIZE][2] = {
{ 1u, 2305843009213693952u }, { 11068046444225730970u, 1844674407370955161u },
{ 5165088340638674453u, 1475739525896764129u }, { 7821419487252849886u, 1180591620717411303u },
{ 8824922364862649494u, 1888946593147858085u }, { 7059937891890119595u, 1511157274518286468u },
{ 13026647942995916322u, 1208925819614629174u }, { 9774590264567735146u, 1934281311383406679u },
{ 11509021026396098440u, 1547425049106725343u }, { 16585914450600699399u, 1237940039285380274u },
{ 15469416676735388068u, 1980704062856608439u }, { 16064882156130220778u, 1584563250285286751u },
{ 9162556910162266299u, 1267650600228229401u }, { 7281393426775805432u, 2028240960365167042u },
{ 16893161185646375315u, 1622592768292133633u }, { 2446482504291369283u, 1298074214633706907u },
{ 7603720821608101175u, 2076918743413931051u }, { 2393627842544570617u, 1661534994731144841u },
{ 16672297533003297786u, 1329227995784915872u }, { 11918280793837635165u, 2126764793255865396u },
{ 5845275820328197809u, 1701411834604692317u }, { 15744267100488289217u, 1361129467683753853u },
{ 3054734472329800808u, 2177807148294006166u }, { 17201182836831481939u, 1742245718635204932u },
{ 6382248639981364905u, 1393796574908163946u }, { 2832900194486363201u, 2230074519853062314u },
{ 5955668970331000884u, 1784059615882449851u }, { 1075186361522890384u, 1427247692705959881u },
{ 12788344622662355584u, 2283596308329535809u }, { 13920024512871794791u, 1826877046663628647u },
{ 3757321980813615186u, 1461501637330902918u }, { 10384555214134712795u, 1169201309864722334u },
{ 5547241898389809503u, 1870722095783555735u }, { 4437793518711847602u, 1496577676626844588u },
{ 10928932444453298728u, 1197262141301475670u }, { 17486291911125277965u, 1915619426082361072u },
{ 6610335899416401726u, 1532495540865888858u }, { 12666966349016942027u, 1225996432692711086u },
{ 12888448528943286597u, 1961594292308337738u }, { 17689456452638449924u, 1569275433846670190u },
{ 14151565162110759939u, 1255420347077336152u }, { 7885109000409574610u, 2008672555323737844u },
{ 9997436015069570011u, 1606938044258990275u }, { 7997948812055656009u, 1285550435407192220u },
{ 12796718099289049614u, 2056880696651507552u }, { 2858676849947419045u, 1645504557321206042u },
{ 13354987924183666206u, 1316403645856964833u }, { 17678631863951955605u, 2106245833371143733u },
{ 3074859046935833515u, 1684996666696914987u }, { 13527933681774397782u, 1347997333357531989u },
{ 10576647446613305481u, 2156795733372051183u }, { 15840015586774465031u, 1725436586697640946u },
{ 8982663654677661702u, 1380349269358112757u }, { 18061610662226169046u, 2208558830972980411u },
{ 10759939715039024913u, 1766847064778384329u }, { 12297300586773130254u, 1413477651822707463u },
{ 15986332124095098083u, 2261564242916331941u }, { 9099716884534168143u, 1809251394333065553u },
{ 14658471137111155161u, 1447401115466452442u }, { 4348079280205103483u, 1157920892373161954u },
{ 14335624477811986218u, 1852673427797059126u }, { 7779150767507678651u, 1482138742237647301u },
{ 2533971799264232598u, 1185710993790117841u }, { 15122401323048503126u, 1897137590064188545u },
{ 12097921058438802501u, 1517710072051350836u }, { 5988988032009131678u, 1214168057641080669u },
{ 16961078480698431330u, 1942668892225729070u }, { 13568862784558745064u, 1554135113780583256u },
{ 7165741412905085728u, 1243308091024466605u }, { 11465186260648137165u, 1989292945639146568u },
{ 16550846638002330379u, 1591434356511317254u }, { 16930026125143774626u, 1273147485209053803u },
{ 4951948911778577463u, 2037035976334486086u }, { 272210314680951647u, 1629628781067588869u },
{ 3907117066486671641u, 1303703024854071095u }, { 6251387306378674625u, 2085924839766513752u },
{ 16069156289328670670u, 1668739871813211001u }, { 9165976216721026213u, 1334991897450568801u },
{ 7286864317269821294u, 2135987035920910082u }, { 16897537898041588005u, 1708789628736728065u },
{ 13518030318433270404u, 1367031702989382452u }, { 6871453250525591353u, 2187250724783011924u },
{ 9186511415162383406u, 1749800579826409539u }, { 11038557946871817048u, 1399840463861127631u },
{ 10282995085511086630u, 2239744742177804210u }, { 8226396068408869304u, 1791795793742243368u },
{ 13959814484210916090u, 1433436634993794694u }, { 11267656730511734774u, 2293498615990071511u },
{ 5324776569667477496u, 1834798892792057209u }, { 7949170070475892320u, 1467839114233645767u },
{ 17427382500606444826u, 1174271291386916613u }, { 5747719112518849781u, 1878834066219066582u },
{ 15666221734240810795u, 1503067252975253265u }, { 12532977387392648636u, 1202453802380202612u },
{ 5295368560860596524u, 1923926083808324180u }, { 4236294848688477220u, 1539140867046659344u },
{ 7078384693692692099u, 1231312693637327475u }, { 11325415509908307358u, 1970100309819723960u },
{ 9060332407926645887u, 1576080247855779168u }, { 14626963555825137356u, 1260864198284623334u },
{ 12335095245094488799u, 2017382717255397335u }, { 9868076196075591040u, 1613906173804317868u },
{ 15273158586344293478u, 1291124939043454294u }, { 13369007293925138595u, 2065799902469526871u },
{ 7005857020398200553u, 1652639921975621497u }, { 16672732060544291412u, 1322111937580497197u },
{ 11918976037903224966u, 2115379100128795516u }, { 5845832015580669650u, 1692303280103036413u },
{ 12055363241948356366u, 1353842624082429130u }, { 841837113407818570u, 2166148198531886609u },
{ 4362818505468165179u, 1732918558825509287u }, { 14558301248600263113u, 1386334847060407429u },
{ 12225235553534690011u, 2218135755296651887u }, { 2401490813343931363u, 1774508604237321510u },
{ 1921192650675145090u, 1419606883389857208u }, { 17831303500047873437u, 2271371013423771532u },
{ 6886345170554478103u, 1817096810739017226u }, { 1819727321701672159u, 1453677448591213781u },
{ 16213177116328979020u, 1162941958872971024u }, { 14873036941900635463u, 1860707134196753639u },
{ 15587778368262418694u, 1488565707357402911u }, { 8780873879868024632u, 1190852565885922329u },
{ 2981351763563108441u, 1905364105417475727u }, { 13453127855076217722u, 1524291284333980581u },
{ 7073153469319063855u, 1219433027467184465u }, { 11317045550910502167u, 1951092843947495144u },
{ 12742985255470312057u, 1560874275157996115u }, { 10194388204376249646u, 1248699420126396892u },
{ 1553625868034358140u, 1997919072202235028u }, { 8621598323911307159u, 1598335257761788022u },
{ 17965325103354776697u, 1278668206209430417u }, { 13987124906400001422u, 2045869129935088668u },
{ 121653480894270168u, 1636695303948070935u }, { 97322784715416134u, 1309356243158456748u },
{ 14913111714512307107u, 2094969989053530796u }, { 8241140556867935363u, 1675975991242824637u },
{ 17660958889720079260u, 1340780792994259709u }, { 17189487779326395846u, 2145249268790815535u },
{ 13751590223461116677u, 1716199415032652428u }, { 18379969808252713988u, 1372959532026121942u },
{ 14650556434236701088u, 2196735251241795108u }, { 652398703163629901u, 1757388200993436087u },
{ 11589965406756634890u, 1405910560794748869u }, { 7475898206584884855u, 2249456897271598191u },
{ 2291369750525997561u, 1799565517817278553u }, { 9211793429904618695u, 1439652414253822842u },
{ 18428218302589300235u, 2303443862806116547u }, { 7363877012587619542u, 1842755090244893238u },
{ 13269799239553916280u, 1474204072195914590u }, { 10615839391643133024u, 1179363257756731672u },
{ 2227947767661371545u, 1886981212410770676u }, { 16539753473096738529u, 1509584969928616540u },
{ 13231802778477390823u, 1207667975942893232u }, { 6413489186596184024u, 1932268761508629172u },
{ 16198837793502678189u, 1545815009206903337u }, { 5580372605318321905u, 1236652007365522670u },
{ 8928596168509315048u, 1978643211784836272u }, { 18210923379033183008u, 1582914569427869017u },
{ 7190041073742725760u, 1266331655542295214u }, { 436019273762630246u, 2026130648867672343u },
{ 7727513048493924843u, 1620904519094137874u }, { 9871359253537050198u, 1296723615275310299u },
{ 4726128361433549347u, 2074757784440496479u }, { 7470251503888749801u, 1659806227552397183u },
{ 13354898832594820487u, 1327844982041917746u }, { 13989140502667892133u, 2124551971267068394u },
{ 14880661216876224029u, 1699641577013654715u }, { 11904528973500979224u, 1359713261610923772u },
{ 4289851098633925465u, 2175541218577478036u }, { 18189276137874781665u, 1740432974861982428u },
{ 3483374466074094362u, 1392346379889585943u }, { 1884050330976640656u, 2227754207823337509u },
{ 5196589079523222848u, 1782203366258670007u }, { 15225317707844309248u, 1425762693006936005u },
{ 5913764258841343181u, 2281220308811097609u }, { 8420360221814984868u, 1824976247048878087u },
{ 17804334621677718864u, 1459980997639102469u }, { 17932816512084085415u, 1167984798111281975u },
{ 10245762345624985047u, 1868775676978051161u }, { 4507261061758077715u, 1495020541582440929u },
{ 7295157664148372495u, 1196016433265952743u }, { 7982903447895485668u, 1913626293225524389u },
{ 10075671573058298858u, 1530901034580419511u }, { 4371188443704728763u, 1224720827664335609u },
{ 14372599139411386667u, 1959553324262936974u }, { 15187428126271019657u, 1567642659410349579u },
{ 15839291315758726049u, 1254114127528279663u }, { 3206773216762499739u, 2006582604045247462u },
{ 13633465017635730761u, 1605266083236197969u }, { 14596120828850494932u, 1284212866588958375u },
{ 4907049252451240275u, 2054740586542333401u }, { 236290587219081897u, 1643792469233866721u },
{ 14946427728742906810u, 1315033975387093376u }, { 16535586736504830250u, 2104054360619349402u },
{ 5849771759720043554u, 1683243488495479522u }, { 15747863852001765813u, 1346594790796383617u },
{ 10439186904235184007u, 2154551665274213788u }, { 15730047152871967852u, 1723641332219371030u },
{ 12584037722297574282u, 1378913065775496824u }, { 9066413911450387881u, 2206260905240794919u },
{ 10942479943902220628u, 1765008724192635935u }, { 8753983955121776503u, 1412006979354108748u },
{ 10317025513452932081u, 2259211166966573997u }, { 874922781278525018u, 1807368933573259198u },
{ 8078635854506640661u, 1445895146858607358u }, { 13841606313089133175u, 1156716117486885886u },
{ 14767872471458792434u, 1850745787979017418u }, { 746251532941302978u, 1480596630383213935u },
{ 597001226353042382u, 1184477304306571148u }, { 15712597221132509104u, 1895163686890513836u },
{ 8880728962164096960u, 1516130949512411069u }, { 10793931984473187891u, 1212904759609928855u },
{ 17270291175157100626u, 1940647615375886168u }, { 2748186495899949531u, 1552518092300708935u },
{ 2198549196719959625u, 1242014473840567148u }, { 18275073973719576693u, 1987223158144907436u },
{ 10930710364233751031u, 1589778526515925949u }, { 12433917106128911148u, 1271822821212740759u },
{ 8826220925580526867u, 2034916513940385215u }, { 7060976740464421494u, 1627933211152308172u },
{ 16716827836597268165u, 1302346568921846537u }, { 11989529279587987770u, 2083754510274954460u },
{ 9591623423670390216u, 1667003608219963568u }, { 15051996368420132820u, 1333602886575970854u },
{ 13015147745246481542u, 2133764618521553367u }, { 3033420566713364587u, 1707011694817242694u },
{ 6116085268112601993u, 1365609355853794155u }, { 9785736428980163188u, 2184974969366070648u },
{ 15207286772667951197u, 1747979975492856518u }, { 1097782973908629988u, 1398383980394285215u },
{ 1756452758253807981u, 2237414368630856344u }, { 5094511021344956708u, 1789931494904685075u },
{ 4075608817075965366u, 1431945195923748060u }, { 6520974107321544586u, 2291112313477996896u },
{ 1527430471115325346u, 1832889850782397517u }, { 12289990821117991246u, 1466311880625918013u },
{ 17210690286378213644u, 1173049504500734410u }, { 9090360384495590213u, 1876879207201175057u },
{ 18340334751822203140u, 1501503365760940045u }, { 14672267801457762512u, 1201202692608752036u },
{ 16096930852848599373u, 1921924308174003258u }, { 1809498238053148529u, 1537539446539202607u },
{ 12515645034668249793u, 1230031557231362085u }, { 1578287981759648052u, 1968050491570179337u },
{ 12330676829633449412u, 1574440393256143469u }, { 13553890278448669853u, 1259552314604914775u },
{ 3239480371808320148u, 2015283703367863641u }, { 17348979556414297411u, 1612226962694290912u },
{ 6500486015647617283u, 1289781570155432730u }, { 10400777625036187652u, 2063650512248692368u },
{ 15699319729512770768u, 1650920409798953894u }, { 16248804598352126938u, 1320736327839163115u },
{ 7551343283653851484u, 2113178124542660985u }, { 6041074626923081187u, 1690542499634128788u },
{ 12211557331022285596u, 1352433999707303030u }, { 1091747655926105338u, 2163894399531684849u },
{ 4562746939482794594u, 1731115519625347879u }, { 7339546366328145998u, 1384892415700278303u },
{ 8053925371383123274u, 2215827865120445285u }, { 6443140297106498619u, 1772662292096356228u },
{ 12533209867169019542u, 1418129833677084982u }, { 5295740528502789974u, 2269007733883335972u },
{ 15304638867027962949u, 1815206187106668777u }, { 4865013464138549713u, 1452164949685335022u },
{ 14960057215536570740u, 1161731959748268017u }, { 9178696285890871890u, 1858771135597228828u },
{ 14721654658196518159u, 1487016908477783062u }, { 4398626097073393881u, 1189613526782226450u },
{ 7037801755317430209u, 1903381642851562320u }, { 5630241404253944167u, 1522705314281249856u },
{ 814844308661245011u, 1218164251424999885u }, { 1303750893857992017u, 1949062802279999816u },
{ 15800395974054034906u, 1559250241823999852u }, { 5261619149759407279u, 1247400193459199882u },
{ 12107939454356961969u, 1995840309534719811u }, { 5997002748743659252u, 1596672247627775849u },
{ 8486951013736837725u, 1277337798102220679u }, { 2511075177753209390u, 2043740476963553087u },
{ 13076906586428298482u, 1634992381570842469u }, { 14150874083884549109u, 1307993905256673975u },
{ 4194654460505726958u, 2092790248410678361u }, { 18113118827372222859u, 1674232198728542688u },
{ 3422448617672047318u, 1339385758982834151u }, { 16543964232501006678u, 2143017214372534641u },
{ 9545822571258895019u, 1714413771498027713u }, { 15015355686490936662u, 1371531017198422170u },
{ 5577825024675947042u, 2194449627517475473u }, { 11840957649224578280u, 1755559702013980378u },
{ 16851463748863483271u, 1404447761611184302u }, { 12204946739213931940u, 2247116418577894884u },
{ 13453306206113055875u, 1797693134862315907u }, { 3383947335406624054u, 1438154507889852726u },
{ 16482362180876329456u, 2301047212623764361u }, { 9496540929959153242u, 1840837770099011489u },
{ 11286581558709232917u, 1472670216079209191u }, { 5339916432225476010u, 1178136172863367353u },
{ 4854517476818851293u, 1885017876581387765u }, { 3883613981455081034u, 1508014301265110212u },
{ 14174937629389795797u, 1206411441012088169u }, { 11611853762797942306u, 1930258305619341071u },
{ 5600134195496443521u, 1544206644495472857u }, { 15548153800622885787u, 1235365315596378285u },
{ 6430302007287065643u, 1976584504954205257u }, { 16212288050055383484u, 1581267603963364205u },
{ 12969830440044306787u, 1265014083170691364u }, { 9683682259845159889u, 2024022533073106183u },
{ 15125643437359948558u, 1619218026458484946u }, { 8411165935146048523u, 1295374421166787957u },
{ 17147214310975587960u, 2072599073866860731u }, { 10028422634038560045u, 1658079259093488585u },
{ 8022738107230848036u, 1326463407274790868u }, { 9147032156827446534u, 2122341451639665389u },
{ 11006974540203867551u, 1697873161311732311u }, { 5116230817421183718u, 1358298529049385849u },
{ 15564666937357714594u, 2173277646479017358u }, { 1383687105660440706u, 1738622117183213887u },
{ 12174996128754083534u, 1390897693746571109u }, { 8411947361780802685u, 2225436309994513775u },
{ 6729557889424642148u, 1780349047995611020u }, { 5383646311539713719u, 1424279238396488816u },
{ 1235136468979721303u, 2278846781434382106u }, { 15745504434151418335u, 1823077425147505684u },
{ 16285752362063044992u, 1458461940118004547u }, { 5649904260166615347u, 1166769552094403638u },
{ 5350498001524674232u, 1866831283351045821u }, { 591049586477829062u, 1493465026680836657u },
{ 11540886113407994219u, 1194772021344669325u }, { 18673707743239135u, 1911635234151470921u },
{ 14772334225162232601u, 1529308187321176736u }, { 8128518565387875758u, 1223446549856941389u },
{ 1937583260394870242u, 1957514479771106223u }, { 8928764237799716840u, 1566011583816884978u },
{ 14521709019723594119u, 1252809267053507982u }, { 8477339172590109297u, 2004494827285612772u },
{ 17849917782297818407u, 1603595861828490217u }, { 6901236596354434079u, 1282876689462792174u },
{ 18420676183650915173u, 2052602703140467478u }, { 3668494502695001169u, 1642082162512373983u },
{ 10313493231639821582u, 1313665730009899186u }, { 9122891541139893884u, 2101865168015838698u },
{ 14677010862395735754u, 1681492134412670958u }, { 673562245690857633u, 1345193707530136767u }
};
static const uint64_t DOUBLE_POW5_SPLIT[DOUBLE_POW5_TABLE_SIZE][2] = {
{ 0u, 1152921504606846976u }, { 0u, 1441151880758558720u },
{ 0u, 1801439850948198400u }, { 0u, 2251799813685248000u },
{ 0u, 1407374883553280000u }, { 0u, 1759218604441600000u },
{ 0u, 2199023255552000000u }, { 0u, 1374389534720000000u },
{ 0u, 1717986918400000000u }, { 0u, 2147483648000000000u },
{ 0u, 1342177280000000000u }, { 0u, 1677721600000000000u },
{ 0u, 2097152000000000000u }, { 0u, 1310720000000000000u },
{ 0u, 1638400000000000000u }, { 0u, 2048000000000000000u },
{ 0u, 1280000000000000000u }, { 0u, 1600000000000000000u },
{ 0u, 2000000000000000000u }, { 0u, 1250000000000000000u },
{ 0u, 1562500000000000000u }, { 0u, 1953125000000000000u },
{ 0u, 1220703125000000000u }, { 0u, 1525878906250000000u },
{ 0u, 1907348632812500000u }, { 0u, 1192092895507812500u },
{ 0u, 1490116119384765625u }, { 4611686018427387904u, 1862645149230957031u },
{ 9799832789158199296u, 1164153218269348144u }, { 12249790986447749120u, 1455191522836685180u },
{ 15312238733059686400u, 1818989403545856475u }, { 14528612397897220096u, 2273736754432320594u },
{ 13692068767113150464u, 1421085471520200371u }, { 12503399940464050176u, 1776356839400250464u },
{ 15629249925580062720u, 2220446049250313080u }, { 9768281203487539200u, 1387778780781445675u },
{ 7598665485932036096u, 1734723475976807094u }, { 274959820560269312u, 2168404344971008868u },
{ 9395221924704944128u, 1355252715606880542u }, { 2520655369026404352u, 1694065894508600678u },
{ 12374191248137781248u, 2117582368135750847u }, { 14651398557727195136u, 1323488980084844279u },
{ 13702562178731606016u, 1654361225106055349u }, { 3293144668132343808u, 2067951531382569187u },
{ 18199116482078572544u, 1292469707114105741u }, { 8913837547316051968u, 1615587133892632177u },
{ 15753982952572452864u, 2019483917365790221u }, { 12152082354571476992u, 1262177448353618888u },
{ 15190102943214346240u, 1577721810442023610u }, { 9764256642163156992u, 1972152263052529513u },
{ 17631875447420442880u, 1232595164407830945u }, { 8204786253993389888u, 1540743955509788682u },
{ 1032610780636961552u, 1925929944387235853u }, { 2951224747111794922u, 1203706215242022408u },
{ 3689030933889743652u, 1504632769052528010u }, { 13834660704216955373u, 1880790961315660012u },
{ 17870034976990372916u, 1175494350822287507u }, { 17725857702810578241u, 1469367938527859384u },
{ 3710578054803671186u, 1836709923159824231u }, { 26536550077201078u, 2295887403949780289u },
{ 11545800389866720434u, 1434929627468612680u }, { 14432250487333400542u, 1793662034335765850u },
{ 8816941072311974870u, 2242077542919707313u }, { 17039803216263454053u, 1401298464324817070u },
{ 12076381983474541759u, 1751623080406021338u }, { 5872105442488401391u, 2189528850507526673u },
{ 15199280947623720629u, 1368455531567204170u }, { 9775729147674874978u, 1710569414459005213u },
{ 16831347453020981627u, 2138211768073756516u }, { 1296220121283337709u, 1336382355046097823u },
{ 15455333206886335848u, 1670477943807622278u }, { 10095794471753144002u, 2088097429759527848u },
{ 6309871544845715001u, 1305060893599704905u }, { 12499025449484531656u, 1631326116999631131u },
{ 11012095793428276666u, 2039157646249538914u }, { 11494245889320060820u, 1274473528905961821u },
{ 532749306367912313u, 1593091911132452277u }, { 5277622651387278295u, 1991364888915565346u },
{ 7910200175544436838u, 1244603055572228341u }, { 14499436237857933952u, 1555753819465285426u },
{ 8900923260467641632u, 1944692274331606783u }, { 12480606065433357876u, 1215432671457254239u },
{ 10989071563364309441u, 1519290839321567799u }, { 9124653435777998898u, 1899113549151959749u },
{ 8008751406574943263u, 1186945968219974843u }, { 5399253239791291175u, 1483682460274968554u },
{ 15972438586593889776u, 1854603075343710692u }, { 759402079766405302u, 1159126922089819183u },
{ 14784310654990170340u, 1448908652612273978u }, { 9257016281882937117u, 1811135815765342473u },
{ 16182956370781059300u, 2263919769706678091u }, { 7808504722524468110u, 1414949856066673807u },
{ 5148944884728197234u, 1768687320083342259u }, { 1824495087482858639u, 2210859150104177824u },
{ 1140309429676786649u, 1381786968815111140u }, { 1425386787095983311u, 1727233711018888925u },
{ 6393419502297367043u, 2159042138773611156u }, { 13219259225790630210u, 1349401336733506972u },
{ 16524074032238287762u, 1686751670916883715u }, { 16043406521870471799u, 2108439588646104644u },
{ 803757039314269066u, 1317774742903815403u }, { 14839754354425000045u, 1647218428629769253u },
{ 4714634887749086344u, 2059023035787211567u }, { 9864175832484260821u, 1286889397367007229u },
{ 16941905809032713930u, 1608611746708759036u }, { 2730638187581340797u, 2010764683385948796u },
{ 10930020904093113806u, 1256727927116217997u }, { 18274212148543780162u, 1570909908895272496u },
{ 4396021111970173586u, 1963637386119090621u }, { 5053356204195052443u, 1227273366324431638u },
{ 15540067292098591362u, 1534091707905539547u }, { 14813398096695851299u, 1917614634881924434u },
{ 13870059828862294966u, 1198509146801202771u }, { 12725888767650480803u, 1498136433501503464u },
{ 15907360959563101004u, 1872670541876879330u }, { 14553786618154326031u, 1170419088673049581u },
{ 4357175217410743827u, 1463023860841311977u }, { 10058155040190817688u, 1828779826051639971u },
{ 7961007781811134206u, 2285974782564549964u }, { 14199001900486734687u, 1428734239102843727u },
{ 13137066357181030455u, 1785917798878554659u }, { 11809646928048900164u, 2232397248598193324u },
{ 16604401366885338411u, 1395248280373870827u }, { 16143815690179285109u, 1744060350467338534u },
{ 10956397575869330579u, 2180075438084173168u }, { 6847748484918331612u, 1362547148802608230u },
{ 17783057643002690323u, 1703183936003260287u }, { 17617136035325974999u, 2128979920004075359u },
{ 17928239049719816230u, 1330612450002547099u }, { 17798612793722382384u, 1663265562503183874u },
{ 13024893955298202172u, 2079081953128979843u }, { 5834715712847682405u, 1299426220705612402u },
{ 16516766677914378815u, 1624282775882015502u }, { 11422586310538197711u, 2030353469852519378u },
{ 11750802462513761473u, 1268970918657824611u }, { 10076817059714813937u, 1586213648322280764u },
{ 12596021324643517422u, 1982767060402850955u }, { 5566670318688504437u, 1239229412751781847u },
{ 2346651879933242642u, 1549036765939727309u }, { 7545000868343941206u, 1936295957424659136u },
{ 4715625542714963254u, 1210184973390411960u }, { 5894531928393704067u, 1512731216738014950u },
{ 16591536947346905892u, 1890914020922518687u }, { 17287239619732898039u, 1181821263076574179u },
{ 16997363506238734644u, 1477276578845717724u }, { 2799960309088866689u, 1846595723557147156u },
{ 10973347230035317489u, 1154122327223216972u }, { 13716684037544146861u, 1442652909029021215u },
{ 12534169028502795672u, 1803316136286276519u }, { 11056025267201106687u, 2254145170357845649u },
{ 18439230838069161439u, 1408840731473653530u }, { 13825666510731675991u, 1761050914342066913u },
{ 3447025083132431277u, 2201313642927583642u }, { 6766076695385157452u, 1375821026829739776u },
{ 8457595869231446815u, 1719776283537174720u }, { 10571994836539308519u, 2149720354421468400u },
{ 6607496772837067824u, 1343575221513417750u }, { 17482743002901110588u, 1679469026891772187u },
{ 17241742735199000331u, 2099336283614715234u }, { 15387775227926763111u, 1312085177259197021u },
{ 5399660979626290177u, 1640106471573996277u }, { 11361262242960250625u, 2050133089467495346u },
{ 11712474920277544544u, 1281333180917184591u }, { 10028907631919542777u, 1601666476146480739u },
{ 7924448521472040567u, 2002083095183100924u }, { 14176152362774801162u, 1251301934489438077u },
{ 3885132398186337741u, 1564127418111797597u }, { 9468101516160310080u, 1955159272639746996u },
{ 15140935484454969608u, 1221974545399841872u }, { 479425281859160394u, 1527468181749802341u },
{ 5210967620751338397u, 1909335227187252926u }, { 17091912818251750210u, 1193334516992033078u },
{ 12141518985959911954u, 1491668146240041348u }, { 15176898732449889943u, 1864585182800051685u },
{ 11791404716994875166u, 1165365739250032303u }, { 10127569877816206054u, 1456707174062540379u },
{ 8047776328842869663u, 1820883967578175474u }, { 836348374198811271u, 2276104959472719343u },
{ 7440246761515338900u, 1422565599670449589u }, { 13911994470321561530u, 1778206999588061986u },
{ 8166621051047176104u, 2222758749485077483u }, { 2798295147690791113u, 1389224218428173427u },
{ 17332926989895652603u, 1736530273035216783u }, { 17054472718942177850u, 2170662841294020979u },
{ 8353202440125167204u, 1356664275808763112u }, { 10441503050156459005u, 1695830344760953890u },
{ 3828506775840797949u, 2119787930951192363u }, { 86973725686804766u, 1324867456844495227u },
{ 13943775212390669669u, 1656084321055619033u }, { 3594660960206173375u, 2070105401319523792u },
{ 2246663100128858359u, 1293815875824702370u }, { 12031700912015848757u, 1617269844780877962u },
{ 5816254103165035138u, 2021587305976097453u }, { 5941001823691840913u, 1263492066235060908u },
{ 7426252279614801142u, 1579365082793826135u }, { 4671129331091113523u, 1974206353492282669u },
{ 5225298841145639904u, 1233878970932676668u }, { 6531623551432049880u, 1542348713665845835u },
{ 3552843420862674446u, 1927935892082307294u }, { 16055585193321335241u, 1204959932551442058u },
{ 10846109454796893243u, 1506199915689302573u }, { 18169322836923504458u, 1882749894611628216u },
{ 11355826773077190286u, 1176718684132267635u }, { 9583097447919099954u, 1470898355165334544u },
{ 11978871809898874942u, 1838622943956668180u }, { 14973589762373593678u, 2298278679945835225u },
{ 2440964573842414192u, 1436424174966147016u }, { 3051205717303017741u, 1795530218707683770u },
{ 13037379183483547984u, 2244412773384604712u }, { 8148361989677217490u, 1402757983365377945u },
{ 14797138505523909766u, 1753447479206722431u }, { 13884737113477499304u, 2191809349008403039u },
{ 15595489723564518921u, 1369880843130251899u }, { 14882676136028260747u, 1712351053912814874u },
{ 9379973133180550126u, 2140438817391018593u }, { 17391698254306313589u, 1337774260869386620u },
{ 3292878744173340370u, 1672217826086733276u }, { 4116098430216675462u, 2090272282608416595u },
{ 266718509671728212u, 1306420176630260372u }, { 333398137089660265u, 1633025220787825465u },
{ 5028433689789463235u, 2041281525984781831u }, { 10060300083759496378u, 1275800953740488644u },
{ 12575375104699370472u, 1594751192175610805u }, { 1884160825592049379u, 1993438990219513507u },
{ 17318501580490888525u, 1245899368887195941u }, { 7813068920331446945u, 1557374211108994927u },
{ 5154650131986920777u, 1946717763886243659u }, { 915813323278131534u, 1216698602428902287u },
{ 14979824709379828129u, 1520873253036127858u }, { 9501408849870009354u, 1901091566295159823u },
{ 12855909558809837702u, 1188182228934474889u }, { 2234828893230133415u, 1485227786168093612u },
{ 2793536116537666769u, 1856534732710117015u }, { 8663489100477123587u, 1160334207943823134u },
{ 1605989338741628675u, 1450417759929778918u }, { 11230858710281811652u, 1813022199912223647u },
{ 9426887369424876662u, 2266277749890279559u }, { 12809333633531629769u, 1416423593681424724u },
{ 16011667041914537212u, 1770529492101780905u }, { 6179525747111007803u, 2213161865127226132u },
{ 13085575628799155685u, 1383226165704516332u }, { 16356969535998944606u, 1729032707130645415u },
{ 15834525901571292854u, 2161290883913306769u }, { 2979049660840976177u, 1350806802445816731u },
{ 17558870131333383934u, 1688508503057270913u }, { 8113529608884566205u, 2110635628821588642u },
{ 9682642023980241782u, 1319147268013492901u }, { 16714988548402690132u, 1648934085016866126u },
{ 11670363648648586857u, 2061167606271082658u }, { 11905663298832754689u, 1288229753919426661u },
{ 1047021068258779650u, 1610287192399283327u }, { 15143834390605638274u, 2012858990499104158u },
{ 4853210475701136017u, 1258036869061940099u }, { 1454827076199032118u, 1572546086327425124u },
{ 1818533845248790147u, 1965682607909281405u }, { 3442426662494187794u, 1228551629943300878u },
{ 13526405364972510550u, 1535689537429126097u }, { 3072948650933474476u, 1919611921786407622u },
{ 15755650962115585259u, 1199757451116504763u }, { 15082877684217093670u, 1499696813895630954u },
{ 9630225068416591280u, 1874621017369538693u }, { 8324733676974063502u, 1171638135855961683u },
{ 5794231077790191473u, 1464547669819952104u }, { 7242788847237739342u, 1830684587274940130u },
{ 18276858095901949986u, 2288355734093675162u }, { 16034722328366106645u, 1430222333808546976u },
{ 1596658836748081690u, 1787777917260683721u }, { 6607509564362490017u, 2234722396575854651u },
{ 1823850468512862308u, 1396701497859909157u }, { 6891499104068465790u, 1745876872324886446u },
{ 17837745916940358045u, 2182346090406108057u }, { 4231062170446641922u, 1363966306503817536u },
{ 5288827713058302403u, 1704957883129771920u }, { 6611034641322878003u, 2131197353912214900u },
{ 13355268687681574560u, 1331998346195134312u }, { 16694085859601968200u, 1664997932743917890u },
{ 11644235287647684442u, 2081247415929897363u }, { 4971804045566108824u, 1300779634956185852u },
{ 6214755056957636030u, 1625974543695232315u }, { 3156757802769657134u, 2032468179619040394u },
{ 6584659645158423613u, 1270292612261900246u }, { 17454196593302805324u, 1587865765327375307u },
{ 17206059723201118751u, 1984832206659219134u }, { 6142101308573311315u, 1240520129162011959u },
{ 3065940617289251240u, 1550650161452514949u }, { 8444111790038951954u, 1938312701815643686u },
{ 665883850346957067u, 1211445438634777304u }, { 832354812933696334u, 1514306798293471630u },
{ 10263815553021896226u, 1892883497866839537u }, { 17944099766707154901u, 1183052186166774710u },
{ 13206752671529167818u, 1478815232708468388u }, { 16508440839411459773u, 1848519040885585485u },
{ 12623618533845856310u, 1155324400553490928u }, { 15779523167307320387u, 1444155500691863660u },
{ 1277659885424598868u, 1805194375864829576u }, { 1597074856780748586u, 2256492969831036970u },
{ 5609857803915355770u, 1410308106144398106u }, { 16235694291748970521u, 1762885132680497632u },
{ 1847873790976661535u, 2203606415850622041u }, { 12684136165428883219u, 1377254009906638775u },
{ 11243484188358716120u, 1721567512383298469u }, { 219297180166231438u, 2151959390479123087u },
{ 7054589765244976505u, 1344974619049451929u }, { 13429923224983608535u, 1681218273811814911u },
{ 12175718012802122765u, 2101522842264768639u }, { 14527352785642408584u, 1313451776415480399u },
{ 13547504963625622826u, 1641814720519350499u }, { 12322695186104640628u, 2052268400649188124u },
{ 16925056528170176201u, 1282667750405742577u }, { 7321262604930556539u, 1603334688007178222u },
{ 18374950293017971482u, 2004168360008972777u }, { 4566814905495150320u, 1252605225005607986u },
{ 14931890668723713708u, 1565756531257009982u }, { 9441491299049866327u, 1957195664071262478u },
{ 1289246043478778550u, 1223247290044539049u }, { 6223243572775861092u, 1529059112555673811u },
{ 3167368447542438461u, 1911323890694592264u }, { 1979605279714024038u, 1194577431684120165u },
{ 7086192618069917952u, 1493221789605150206u }, { 18081112809442173248u, 1866527237006437757u },
{ 13606538515115052232u, 1166579523129023598u }, { 7784801107039039482u, 1458224403911279498u },
{ 507629346944023544u, 1822780504889099373u }, { 5246222702107417334u, 2278475631111374216u },
{ 3278889188817135834u, 1424047269444608885u }, { 8710297504448807696u, 1780059086805761106u }
};
// IEEE 754 double precision constants
#define DOUBLE_MANTISSA_BITS 52
#define DOUBLE_EXPONENT_BITS 11
#define DOUBLE_EXPONENT_BIAS 1023
// Helper: floor(log2(value)) using ryu_leading_zeros64
static inline uint32_t floor_log2(const uint64_t value) {
return 63 - ryu_leading_zeros64(value);
}
// Helper: log2(5^e) approximation
static inline int32_t log2pow5(const int32_t e) {
return (int32_t) ((((uint32_t) e) * 1217359) >> 19);
}
// Helper: ceil(log2(5^e))
static inline int32_t ceil_log2pow5(const int32_t e) {
return log2pow5(e) + 1;
}
// Helper: max of two int32
static inline int32_t max32(int32_t a, int32_t b) {
return a < b ? b : a;
}
// Helper: convert uint64 bits to double
static inline double int64Bits2Double(uint64_t bits) {
double f;
memcpy(&f, &bits, sizeof(double));
return f;
}
// Check if value is multiple of 2^p
static inline bool multipleOfPowerOf2(const uint64_t value, const uint32_t p) {
return (value & ((1ull << p) - 1)) == 0;
}
// Count how many times value is divisible by 5
// Uses modular inverse to avoid expensive division
static inline uint32_t pow5Factor(uint64_t value) {
const uint64_t m_inv_5 = 14757395258967641293u; // 5 * m_inv_5 = 1 (mod 2^64)
const uint64_t n_div_5 = 3689348814741910323u; // 2^64 / 5
uint32_t count = 0;
for (;;) {
value *= m_inv_5;
if (value > n_div_5)
break;
++count;
}
return count;
}
// Check if value is multiple of 5^p
// Optimized: uses modular inverse instead of division
static inline bool multipleOfPowerOf5(const uint64_t value, const uint32_t p) {
return pow5Factor(value) >= p;
}
// 128-bit multiplication with shift
// This is the core operation for converting decimal to binary
#if defined(__SIZEOF_INT128__)
// Use native 128-bit integers if available (GCC/Clang)
static inline uint64_t mulShift64(const uint64_t m, const uint64_t* const mul, const int32_t j) {
const unsigned __int128 b0 = ((unsigned __int128) m) * mul[0];
const unsigned __int128 b2 = ((unsigned __int128) m) * mul[1];
return (uint64_t) (((b0 >> 64) + b2) >> (j - 64));
}
#else
// Fallback for systems without 128-bit integers
static inline uint64_t umul128(const uint64_t a, const uint64_t b, uint64_t* const productHi) {
const uint32_t aLo = (uint32_t)a;
const uint32_t aHi = (uint32_t)(a >> 32);
const uint32_t bLo = (uint32_t)b;
const uint32_t bHi = (uint32_t)(b >> 32);
const uint64_t b00 = (uint64_t)aLo * bLo;
const uint64_t b01 = (uint64_t)aLo * bHi;
const uint64_t b10 = (uint64_t)aHi * bLo;
const uint64_t b11 = (uint64_t)aHi * bHi;
const uint32_t b00Lo = (uint32_t)b00;
const uint32_t b00Hi = (uint32_t)(b00 >> 32);
const uint64_t mid1 = b10 + b00Hi;
const uint32_t mid1Lo = (uint32_t)(mid1);
const uint32_t mid1Hi = (uint32_t)(mid1 >> 32);
const uint64_t mid2 = b01 + mid1Lo;
const uint32_t mid2Lo = (uint32_t)(mid2);
const uint32_t mid2Hi = (uint32_t)(mid2 >> 32);
const uint64_t pHi = b11 + mid1Hi + mid2Hi;
const uint64_t pLo = ((uint64_t)mid2Lo << 32) | b00Lo;
*productHi = pHi;
return pLo;
}
static inline uint64_t shiftright128(const uint64_t lo, const uint64_t hi, const uint32_t dist) {
return (hi << (64 - dist)) | (lo >> dist);
}
static inline uint64_t mulShift64(const uint64_t m, const uint64_t* const mul, const int32_t j) {
uint64_t high1;
const uint64_t low1 = umul128(m, mul[1], &high1);
uint64_t high0;
umul128(m, mul[0], &high0);
const uint64_t sum = high0 + low1;
if (sum < high0) {
++high1;
}
return shiftright128(sum, high1, j - 64);
}
#endif
// Main conversion function: decimal mantissa+exponent to IEEE 754 double
// Optimized for JSON parsing with fast paths for edge cases
static inline double ryu_s2d_from_parts(uint64_t m10, int m10digits, int32_t e10, bool signedM) {
// Fast path: handle zero explicitly (e.g., "0.0", "0e0")
if (m10 == 0) {
return int64Bits2Double(((uint64_t) signedM) << 63);
}
// Fast path: handle overflow/underflow early
if (m10digits + e10 <= -324) {
// Underflow to zero
return int64Bits2Double(((uint64_t) signedM) << 63);
}
if (m10digits + e10 >= 310) {
// Overflow to infinity
return int64Bits2Double((((uint64_t) signedM) << 63) | 0x7ff0000000000000ULL);
}
// Convert decimal to binary: m10 * 10^e10 = m2 * 2^e2
int32_t e2;
uint64_t m2;
bool trailingZeros;
if (e10 >= 0) {
// Positive exponent: multiply by 5^e10 and adjust binary exponent
e2 = floor_log2(m10) + e10 + log2pow5(e10) - (DOUBLE_MANTISSA_BITS + 1);
int j = e2 - e10 - ceil_log2pow5(e10) + DOUBLE_POW5_BITCOUNT;
m2 = mulShift64(m10, DOUBLE_POW5_SPLIT[e10], j);
trailingZeros = e2 < e10 || (e2 - e10 < 64 && multipleOfPowerOf2(m10, e2 - e10));
} else {
// Negative exponent: divide by 5^(-e10)
e2 = floor_log2(m10) + e10 - ceil_log2pow5(-e10) - (DOUBLE_MANTISSA_BITS + 1);
int j = e2 - e10 + ceil_log2pow5(-e10) - 1 + DOUBLE_POW5_INV_BITCOUNT;
m2 = mulShift64(m10, DOUBLE_POW5_INV_SPLIT[-e10], j);
trailingZeros = multipleOfPowerOf5(m10, -e10);
}
// Compute IEEE 754 exponent
uint32_t ieee_e2 = (uint32_t) max32(0, e2 + DOUBLE_EXPONENT_BIAS + floor_log2(m2));
if (ieee_e2 > 0x7fe) {
// Overflow to infinity
return int64Bits2Double((((uint64_t) signedM) << 63) | 0x7ff0000000000000ULL);
}
// Compute shift amount for rounding
int32_t shift = (ieee_e2 == 0 ? 1 : ieee_e2) - e2 - DOUBLE_EXPONENT_BIAS - DOUBLE_MANTISSA_BITS;
// IEEE 754 round-to-even (banker's rounding)
trailingZeros &= (m2 & ((1ull << (shift - 1)) - 1)) == 0;
uint64_t lastRemovedBit = (m2 >> (shift - 1)) & 1;
bool roundUp = (lastRemovedBit != 0) && (!trailingZeros || (((m2 >> shift) & 1) != 0));
uint64_t ieee_m2 = (m2 >> shift) + roundUp;
ieee_m2 &= (1ull << DOUBLE_MANTISSA_BITS) - 1;
if (ieee_m2 == 0 && roundUp) {
ieee_e2++;
}
// Pack sign, exponent, and mantissa into IEEE 754 format
// Match original Ryu: group sign+exponent, then shift and add mantissa
uint64_t ieee = (((((uint64_t) signedM) << DOUBLE_EXPONENT_BITS) | (uint64_t)ieee_e2) << DOUBLE_MANTISSA_BITS) | ieee_m2;
return int64Bits2Double(ieee);
}
#endif // RYU_H | c | github | https://github.com/ruby/ruby | ext/json/vendor/ryu.h |
# -*- coding: utf-8 -*-
import unittest
from pynes import sprite
class SpriteTest(unittest.TestCase):
def __init__(self, testcase_name):
unittest.TestCase.__init__(self, testcase_name)
f = open('fixtures/nerdynights/scrolling/mario.chr', 'rb')
content = f.read()
self.bin = [ord(c) for c in content]
self.mario1 = [
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 3, 3, 3, 2, 2],
[0, 0, 3, 2, 2, 3, 2, 2],
[0, 0, 3, 2, 2, 3, 3, 2],
[0, 3, 3, 2, 2, 3, 3, 2]
]
self.mario2 = [
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 2, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[3, 2, 2, 2, 0, 0, 0, 0],
[3, 3, 2, 2, 2, 2, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 0],
[2, 2, 3, 2, 2, 2, 2, 0]
]
self.blank = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
def test_load_sprites(self):
sprites = sprite.load_sprites(
'fixtures/nerdynights/scrolling/mario.chr')
self.assertEquals(self.bin, sprites)
def test_decode_first_sprite(self):
channelA = self.bin[0:8]
channelB = self.bin[8:16]
s1 = sprite.decode_sprite(channelA, channelB)
self.assertEquals(self.mario1, s1)
def test_decode_second_sprite(self):
channelA = self.bin[16:24]
channelB = self.bin[24:32]
s2 = sprite.decode_sprite(channelA, channelB)
self.assertEquals(self.mario2, s2)
def test_get_first_sprite(self):
s1 = sprite.get_sprite(0, self.bin)
self.assertEquals(self.mario1, s1)
def test_get_second_sprite(self):
s2 = sprite.get_sprite(1, self.bin)
self.assertEquals(self.mario2, s2)
def test_sprite_length(self):
length = sprite.length(self.bin)
self.assertEquals(512, length)
def test_encode_first_sprite(self):
encoded = sprite.encode_sprite(self.mario1)
expected = self.bin[0:16]
self.assertEquals(expected, encoded)
def test_encode_second_sprite(self):
encoded = sprite.encode_sprite(self.mario2)
expected = self.bin[16:32]
self.assertEquals(expected, encoded)
def test_put_first_sprite(self):
expected = [
[0, 1, 2, 3, 0, 1, 2, 3],
[1, 0, 1, 2, 3, 0, 1, 2],
[2, 1, 0, 1, 2, 3, 0, 1],
[3, 2, 1, 0, 1, 2, 3, 0],
[0, 3, 2, 1, 0, 1, 2, 3],
[1, 0, 3, 2, 1, 0, 1, 2],
[2, 1, 0, 3, 2, 1, 0, 1],
[3, 2, 1, 0, 3, 2, 1, 0]
]
sprite.put_sprite(0, self.bin, expected)
s1 = sprite.get_sprite(0, self.bin)
self.assertEquals(expected, s1)
def test_put_second_sprite(self):
expected = [
[0, 1, 2, 3, 0, 1, 2, 3],
[1, 0, 1, 2, 3, 0, 1, 2],
[2, 1, 0, 1, 2, 3, 0, 1],
[3, 2, 1, 0, 1, 2, 3, 0],
[0, 3, 2, 1, 0, 1, 2, 3],
[1, 0, 3, 2, 1, 0, 1, 2],
[2, 1, 0, 3, 2, 1, 0, 1],
[3, 2, 1, 0, 3, 2, 1, 0]
]
sprite.put_sprite(1, self.bin, expected)
s1 = sprite.get_sprite(1, self.bin)
self.assertEquals(expected, s1)
def test_find_sprite_1(self):
index = sprite.find_sprite(self.bin, self.mario1)
self.assertEquals(0, index)
def test_find_sprite_2(self):
index = sprite.find_sprite(self.bin, self.mario2)
self.assertEquals(1, index)
def test_find_sprite_3(self):
index = sprite.find_sprite(self.bin, self.blank, 256)
self.assertEquals(292 - 256, index) | unknown | codeparrot/codeparrot-clean | ||
from bokeh.plotting import figure, HBox, output_file, show, VBox
from bokeh.models import Range1d
# create some data using python lists
x1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y1 = [0, 8, 2, 4, 6, 9, 15, 18, 19, 25, 28]
# EXERCISE: create two more data sets, x2, y2 and x3, y3, however
# you want. Make sure the corresponding x and y data are the same length
from math import sin
x2 = [i/20.0 for i in range(200)]
y2 = [sin(x) for x in x2]
x3 = list(range(11))
y3 = [x**2 for x in x3]
# specify and output static HTML file
output_file("scatter.html")
# EXERCISE: Plot all the sets of points on different plots p1, p2, p3.
# Try setting `color` (or `line_color`) and `alpha` (or `line_alpha`).
# You can also set `line_dash` and `line_width`. One example is given.
p1 = figure(plot_width=300, plot_height=300)
p1.line(x1, y1, size=12, color="red", alpha=0.5)
p2 = figure(plot_width=300, plot_height=300)
p2.line(x2, y2, size=12, color="blue", line_dash=[2, 4])
p3 = figure(plot_width=300, plot_height=300)
p3.line(x3, y3, size=12, line_color="orange", line_width=3)
# create a figure
p4 = figure()
# EXERCISE: add all the same renderers above, on this one plot
p4.line(x1, y1, size=12, color="red", alpha=0.5)
p4.line(x2, y2, size=12, color="blue", line_dash=[2, 4])
p4.line(x3, y3, size=12, line_color="orange", line_width=2)
# show the plots arrayed in a VBox
show(VBox(HBox(p1, p2, p3), p4)) | unknown | codeparrot/codeparrot-clean | ||
"""
This file is part of Gnotero.
Gnotero is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gnotero is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Gnotero. If not, see <http://www.gnu.org/licenses/>.
"""
import sqlite3
import os
import os.path
import sys
import shutil
import shlex
import sys
import time
from pygnotero.zotero_item import zotero_item
class libzotero:
"""
Libzotero provides access to the zotero database.
This is an object oriented reimplementation of the
original zoterotools.
"""
attachment_query = """
select items.itemID, itemAttachments.path, itemAttachments.itemID
from items, itemAttachments
where items.itemID = itemAttachments.sourceItemID
"""
info_query = """
select items.itemID, fields.fieldName, itemDataValues.value, items.key
from items, itemData, fields, itemDataValues
where
items.itemID = itemData.itemID
and itemData.fieldID = fields.fieldID
and itemData.valueID = itemDataValues.valueID
and (fields.fieldName = "date" or fields.fieldName = "publicationTitle" or fields.fieldName = "volume" or fields.fieldName = "issue" or fields.fieldName = "title")
"""
author_query = """
select items.itemID, creatorData.lastName
from items, itemCreators, creators, creatorData
where
items.itemID = itemCreators.itemID
and itemCreators.creatorID = creators.creatorID
and creators.creatorDataID = creatorData.creatorDataID
order by itemCreators.orderIndex
"""
collection_query = """
select items.itemID, collections.collectionName
from items, collections, collectionItems
where
items.itemID = collectionItems.itemID
and collections.collectionID = collectionItems.collectionID
order by collections.collectionName != "To Read", collections.collectionName
"""
deleted_query = "select itemID from deletedItems"
def __init__(self, zotero_path):
"""
Intialize libzotero
"""
# Set paths
self.zotero_path = zotero_path
self.storage_path = os.path.join(self.zotero_path, "storage")
self.zotero_database = os.path.join(self.zotero_path, "zotero.sqlite")
if os.name == "nt":
home_folder = os.environ["USERPROFILE"].decode('iso8859-15')
elif os.name == "posix":
home_folder = os.environ["HOME"].decode('iso8859-15')
else:
print "libzotero.__init__(): you appear to be running an unsupported OS"
self.gnotero_database = os.path.join(home_folder, ".gnotero.sqlite")
# Remember search results so results speed up over time
self.search_cache = {}
# Check whether verbosity is turned on
self.verbose = "-v" in sys.argv
# These dates are treated as special and are not parsed into a year representation
self.special_dates = "in press", "submitted", "in preparation", "unpublished"
# These extensions are recognized as fulltext attachments
self.attachment_ext = ".pdf", ".epub"
self.index = {}
self.collection_index = []
self.last_update = None
# The notry parameter can be used to show errors which would
# otherwise be obscured by the try clause
if "--notry" in sys.argv:
self.search("dummy")
# Start by updating the database
try:
self.search("dummy")
self.error = False
except:
self.error = True
def log(self, msg):
"""
Print a message to the output if verbosity is on.
"""
if self.verbose:
print "zoterotools2: " + msg
def update(self, force = False):
"""
This function checks if the local copy of the zotero
database is up to date. If not, the data is also indexed.
"""
stats = os.stat(self.zotero_database)
# Only update if necessary
if not force and stats[8] > self.last_update:
t = time.time()
self.last_update = stats[8]
self.index = {}
self.collection_index = []
self.search_cache = {}
# Copy the zotero database to the gnotero copy
shutil.copyfile(self.zotero_database, self.gnotero_database)
self.conn = sqlite3.connect(self.gnotero_database)
self.cur = self.conn.cursor()
# First create a list of deleted items, so we can ignore those later
deleted = []
self.cur.execute(self.deleted_query)
for item in self.cur.fetchall():
deleted.append(item[0])
# Retrieve information about date, publication, volume, issue and title
self.cur.execute(self.info_query)
for item in self.cur.fetchall():
item_id = item[0]
key = item[3]
if item_id not in deleted:
item_name = item[1]
# Parse date fields, because t = time.time() we only want a year or a 'special' date
if item_name == "date":
item_value = None
for sd in self.special_dates:
if sd in item[2].lower():
item_value = sd
break
if item_value == None:
item_value = item[2][-4:]
else:
item_value = item[2]
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].key = key
if item_name == "publicationTitle":
self.index[item_id].publication = item_value
elif item_name == "date":
self.index[item_id].date = item_value
elif item_name == "volume":
self.index[item_id].volume = item_value
elif item_name == "issue":
self.index[item_id].issue = item_value
elif item_name == "title":
self.index[item_id].title = item_value
# Retrieve author information
self.cur.execute(self.author_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
item_author = item[1].capitalize()
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].authors.append(item_author)
# Retrieve collection information
self.cur.execute(self.collection_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
item_collection = item[1]
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].collections.append(item_collection)
if item_collection not in self.collection_index:
self.collection_index.append(item_collection)
# Retrieve attachments
self.cur.execute(self.attachment_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
if item[1] != None:
att = item[1].encode("latin-1")
# If the attachment is stored in the Zotero folder, it is preceded
# by "storage:"
if att[:8] == "storage:":
item_attachment = att[8:]
attachment_id = item[2]
if item_attachment[-4:].lower() in self.attachment_ext:
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.cur.execute("select items.key from items where itemID = %d" % attachment_id)
key = self.cur.fetchone()[0]
self.index[item_id].fulltext = os.path.join(self.storage_path, key, item_attachment)
# If the attachment is linked, it is simply the full path to the attachment
else:
self.index[item_id].fulltext = att
self.cur.close()
print "libzotero.update(): indexing completed in %.3fs" % (time.time() - t)
def parse_query(self, query):
"""
Parses a text search query into a list of tuples,
which are acceptable for zotero_item.match()
"""
# Make sure that spaces are handled correctly after
# semicolons. E.g., Author: Mathot
while ": " in query:
query = query.replace(": ", ":")
# Parse the terms into a suitable format
terms = []
# Check if the criterium is type-specified, like "author: doe"
for term in shlex.split(query.strip().lower()):
s = term.split(":")
if len(s) == 2:
terms.append( (s[0].strip(), s[1].strip()) )
else:
terms.append( (None, term.strip()) )
return terms
def search(self, query):
"""
Search the zotero database
"""
self.update()
if query in self.search_cache:
print "libzotero.search(): retrieving results for '%s' from cache" % query
return self.search_cache[query]
t = time.time()
terms = self.parse_query(query)
results = []
for item_id, item in self.index.items():
if item.match(terms):
results.append(item)
self.search_cache[query] = results
print "libzotero.search(): search for '%s' completed in %.3fs" % (query, time.time() - t)
return results
def valid_location(path):
"""
Checks if a given path is a valid Zotero folder,
i.e., if it it contains zotero.sqlite
"""
return os.path.exists(os.path.join(path, "zotero.sqlite")) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v34.multiple_stats_cloudwatch.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "red",
"name": "CloudWatch Annotation Single Statistic",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-123456"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Maximum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Maximum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "green",
"name": "CloudWatch Annotation Empty Statistics",
"legacyOptions": {
"dimensions": {
"LoadBalancer": "my-lb"
},
"namespace": "AWS/ApplicationELB",
"prefixMatching": false,
"region": "us-west-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - InvalidStat",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "InvalidStat"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - null",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - 123",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": 123
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "purple",
"name": "Non-CloudWatch Annotation"
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Minimum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Minimum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "blue",
"name": "CloudWatch Annotation Multiple Statistics - Sum",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-789012"
},
"namespace": "AWS/RDS",
"prefixMatching": false,
"region": "us-west-2",
"statistic": "Sum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - Sum",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Sum"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - null",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "CloudWatch Annotation Invalid Statistics - Average",
"legacyOptions": {
"dimensions": {
"TableName": "my-table"
},
"namespace": "AWS/DynamoDB",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - Average",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": "Average"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "orange",
"name": "CloudWatch Annotation with Null in Statistics - ",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-null-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": ""
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - true",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": true
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"query": {
"kind": "prometheus",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "pink",
"name": "CloudWatch Annotation Only Invalid Statistics - [object Object]",
"legacyOptions": {
"dimensions": {
"InstanceId": "i-invalid-annotation"
},
"namespace": "AWS/EC2",
"prefixMatching": false,
"region": "us-east-1",
"statistic": {}
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "CloudWatch Single Query Multiple Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-123456"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"period": "300",
"region": "us-east-1",
"statistic": "Minimum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-10": {
"kind": "Panel",
"spec": {
"id": 10,
"title": "CloudWatch Query Missing Editor Fields",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-missing-fields"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-11": {
"kind": "Panel",
"spec": {
"id": 11,
"title": "CloudWatch Query with Expression (Code Mode)",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-with-expression"
},
"expression": "SEARCH('{AWS/EC2,InstanceId} MetricName=\"CPUUtilization\"', 'Average', 300)",
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-with-expression"
},
"expression": "SEARCH('{AWS/EC2,InstanceId} MetricName=\"CPUUtilization\"', 'Average', 300)",
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-12": {
"kind": "Panel",
"spec": {
"id": 12,
"title": "CloudWatch Insights Query Missing Editor Mode",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-insights"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-13": {
"kind": "Panel",
"spec": {
"id": 13,
"title": "CloudWatch Query with Null Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": ""
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-null-stats"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-14": {
"kind": "Panel",
"spec": {
"id": 14,
"title": "CloudWatch Query with Only Invalid Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": 123
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": true
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": {}
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"InstanceId": "i-invalid-only"
},
"metricEditorMode": 0,
"metricName": "CPUUtilization",
"metricQueryType": 0,
"namespace": "AWS/EC2",
"region": "us-east-1",
"statistic": []
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-15": {
"kind": "Panel",
"spec": {
"id": 15,
"title": "Non-CloudWatch Panel",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"expr": "cpu_usage"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "CloudWatch Single Query Single Statistic",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"LoadBalancer": "my-load-balancer"
},
"metricEditorMode": 0,
"metricName": "RequestCount",
"metricQueryType": 0,
"namespace": "AWS/ApplicationELB",
"region": "us-west-2",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "CloudWatch Query No Statistics Array",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"DBInstanceIdentifier": "my-db"
},
"metricEditorMode": 0,
"metricName": "DatabaseConnections",
"metricQueryType": 0,
"namespace": "AWS/RDS",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Mixed CloudWatch and Non-CloudWatch Queries",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"QueueName": "my-queue"
},
"metricEditorMode": 0,
"metricName": "ApproximateNumberOfMessages",
"metricQueryType": 0,
"namespace": "AWS/SQS",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"expr": "up"
}
},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"TopicName": "my-topic"
},
"metricEditorMode": 0,
"metricName": "NumberOfMessagesPublished",
"metricQueryType": 0,
"namespace": "AWS/SNS",
"region": "us-west-1",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"QueueName": "my-queue"
},
"metricEditorMode": 0,
"metricName": "ApproximateNumberOfMessages",
"metricQueryType": 0,
"namespace": "AWS/SQS",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "CloudWatch Query Empty Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"BucketName": "my-bucket"
},
"metricEditorMode": 0,
"metricName": "BucketSizeBytes",
"metricQueryType": 0,
"namespace": "AWS/S3",
"region": "us-east-1"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-6": {
"kind": "Panel",
"spec": {
"id": 6,
"title": "CloudWatch Query Invalid Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "InvalidStat"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "D",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"FunctionName": "my-function"
},
"metricEditorMode": 0,
"metricName": "Duration",
"metricQueryType": 0,
"namespace": "AWS/Lambda",
"region": "us-west-2",
"statistic": ""
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "E",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-8": {
"kind": "Panel",
"spec": {
"id": 8,
"title": "Nested CloudWatch Query Multiple Statistics",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Sum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"StreamName": "my-stream"
},
"metricEditorMode": 0,
"metricName": "IncomingRecords",
"metricQueryType": 0,
"namespace": "AWS/Kinesis",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "C",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-9": {
"kind": "Panel",
"spec": {
"id": 9,
"title": "CloudWatch Query with Existing Editor Mode",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"ClusterName": "my-cluster"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/ECS",
"period": "300",
"region": "us-east-1",
"statistic": "Average"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {
"dimensions": {
"ClusterName": "my-cluster"
},
"metricEditorMode": 1,
"metricName": "CPUUtilization",
"metricQueryType": 1,
"namespace": "AWS/ECS",
"period": "300",
"region": "us-east-1",
"statistic": "Maximum"
}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-6"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Collapsed Row with CloudWatch",
"collapse": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-8"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-9"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-10"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-11"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-12"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-13"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-14"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-15"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "CloudWatch Multiple Statistics Test Dashboard",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v2beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dashboards_from_v0_to_v2/v2beta1.v34.multiple_stats_cloudwatch.v2alpha1.json |
import {useRef} from 'react';
function Component(props) {
const ref = useRef();
return ref?.current;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.ref-optional.js |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.test;
import org.elasticsearch.gradle.LoggedExec;
import org.gradle.api.DefaultTask;
import org.gradle.api.file.ConfigurableFileCollection;
import org.gradle.api.file.FileCollection;
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Classpath;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.TaskAction;
import org.gradle.process.ExecOperations;
import org.gradle.workers.WorkAction;
import org.gradle.workers.WorkParameters;
import org.gradle.workers.WorkerExecutor;
import javax.inject.Inject;
@CacheableTask
public abstract class ClusterFeaturesMetadataTask extends DefaultTask {
private FileCollection classpath;
@OutputFile
public abstract RegularFileProperty getOutputFile();
@Classpath
public FileCollection getClasspath() {
return classpath;
}
public void setClasspath(FileCollection classpath) {
this.classpath = classpath;
}
@Inject
public abstract WorkerExecutor getWorkerExecutor();
@TaskAction
public void execute() {
getWorkerExecutor().noIsolation().submit(ClusterFeaturesMetadataWorkAction.class, params -> {
params.getClasspath().setFrom(getClasspath());
params.getOutputFile().set(getOutputFile());
});
}
public interface ClusterFeaturesWorkParameters extends WorkParameters {
ConfigurableFileCollection getClasspath();
RegularFileProperty getOutputFile();
}
public abstract static class ClusterFeaturesMetadataWorkAction implements WorkAction<ClusterFeaturesWorkParameters> {
private final ExecOperations execOperations;
@Inject
public ClusterFeaturesMetadataWorkAction(ExecOperations execOperations) {
this.execOperations = execOperations;
}
@Override
public void execute() {
LoggedExec.javaexec(execOperations, spec -> {
spec.getMainClass().set("org.elasticsearch.extractor.features.ClusterFeaturesMetadataExtractor");
spec.classpath(getParameters().getClasspath());
spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath());
});
}
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataTask.java |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .response_input_file import ResponseInputFile
from .response_input_text import ResponseInputText
from .response_input_image import ResponseInputImage
__all__ = ["ResponseFunctionToolCallOutputItem", "OutputOutputContentList"]
OutputOutputContentList: TypeAlias = Annotated[
Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
]
class ResponseFunctionToolCallOutputItem(BaseModel):
id: str
"""The unique ID of the function call tool output."""
call_id: str
"""The unique ID of the function tool call generated by the model."""
output: Union[str, List[OutputOutputContentList]]
"""
The output from the function call generated by your code. Can be a string or an
list of output content.
"""
type: Literal["function_call_output"]
"""The type of the function tool call output. Always `function_call_output`."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
""" | python | github | https://github.com/openai/openai-python | src/openai/types/responses/response_function_tool_call_output_item.py |
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import cache
class source:
def __init__(self):
self.base_link = 'http://pidtv.com'
self.moviesearch_link = '/%s-%s-full-hd-pidtv-free.html'
self.moviesearch_link_2 = '/%s-%s-pidtv-free.html'
self.tvsearch_link = '/wp-admin/admin-ajax.php'
self.tvsearch_link_2 = '/?s=%s'
def get_movie(self, imdb, title, year):
try:
title = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
query = self.moviesearch_link % (title, year)
query = urlparse.urljoin(self.base_link, query)
result = client.request(query, limit='5')
if result == None:
query = self.moviesearch_link_2 % (title, year)
query = urlparse.urljoin(self.base_link, query)
result = client.request(query, limit='5')
if result == None:
raise Exception()
url = re.findall('(?://.+?|)(/.+)', query)[0]
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def pubfilm_tvcache(self):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
year = re.findall('(\d{4})', premiered)[0]
season = '%01d' % int(season) ; episode = '%01d' % int(episode)
tvshowtitle = '%s %s: Season %s' % (data['tvshowtitle'], year, season)
url = cache.get(self.pidtv_tvcache, 120, tvshowtitle)
if url == None: raise Exception()
url += '?episode=%01d' % int(episode)
url = url.encode('utf-8')
return url
except:
return
def pidtv_tvcache(self, tvshowtitle):
try:
headers = {'X-Requested-With': 'XMLHttpRequest'}
post = urllib.urlencode({'aspp': tvshowtitle, 'action': 'ajaxsearchpro_search', 'options': 'qtranslate_lang=0&set_exactonly=checked&set_intitle=None&customset%5B%5D=post', 'asid': '1', 'asp_inst_id': '1_1'})
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
url = client.request(url, post=post, headers=headers)
url = zip(client.parseDOM(url, 'a', ret='href', attrs={'class': 'asp_res_url'}), client.parseDOM(url, 'a', attrs={'class': 'asp_res_url'}))
url = [(i[0], re.findall('(.+?: Season \d+)', i[1].strip())) for i in url]
url = [i[0] for i in url if len(i[1]) > 0 and tvshowtitle == i[1][0]][0]
'''
url = urlparse.urljoin(self.base_link, self.tvsearch_link_2)
url = url % urllib.quote_plus(tvshowtitle)
url = client.request(url)
url = zip(client.parseDOM(url, 'a', ret='href', attrs={'rel': '.+?'}), client.parseDOM(url, 'a', attrs={'rel': '.+?'}))
url = [i[0] for i in url if i[1] == tvshowtitle][0]
'''
url = urlparse.urljoin(self.base_link, url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
year = re.findall('(\d{4})', premiered)[0]
season = '%01d' % int(season) ; episode = '%01d' % int(episode)
tvshowtitle = '%s %s: Season %s' % (data['tvshowtitle'], year, season)
url = cache.get(self.pidtv_tvcache, 120, tvshowtitle)
if url == None: raise Exception()
url += '?episode=%01d' % int(episode)
url = url.encode('utf-8')
#return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try:
url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except:
pass
result = client.request(url)
url = zip(client.parseDOM(result, 'a', ret='href', attrs={'target': 'EZWebPlayer'}),
client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'}))
url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]
if content == 'episode':
url = [i for i in url if i[1] == '%01d' % int(episode)]
links = [client.replaceHTMLCodes(i[0]) for i in url]
for u in links:
try:
result = client.request(u)
result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
result = re.findall('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(.+?)"', result)
url = [{'url': i[0], 'quality': '1080p'} for i in result if '1080' in i[1]]
url += [{'url': i[0], 'quality': 'HD'} for i in result if '720' in i[1]]
for i in url:
sources.append(
{'source': 'gvideo', 'quality': i['quality'], 'provider': 'Pubfilm', 'url': i['url']})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url:
url = url.replace('http://', 'https://')
else:
url = url.replace('https://', 'http://')
return url
except:
return | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Database\Console\Seeds;
use Illuminate\Console\Command;
use Illuminate\Console\ConfirmableTrait;
use Illuminate\Console\Prohibitable;
use Illuminate\Database\ConnectionResolverInterface as Resolver;
use Illuminate\Database\Eloquent\Model;
use Symfony\Component\Console\Attribute\AsCommand;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputOption;
#[AsCommand(name: 'db:seed')]
class SeedCommand extends Command
{
use ConfirmableTrait, Prohibitable;
/**
* The console command name.
*
* @var string
*/
protected $name = 'db:seed';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Seed the database with records';
/**
* The connection resolver instance.
*
* @var \Illuminate\Database\ConnectionResolverInterface
*/
protected $resolver;
/**
* Create a new database seed command instance.
*
* @param \Illuminate\Database\ConnectionResolverInterface $resolver
*/
public function __construct(Resolver $resolver)
{
parent::__construct();
$this->resolver = $resolver;
}
/**
* Execute the console command.
*
* @return int
*/
public function handle()
{
if ($this->isProhibited() ||
! $this->confirmToProceed()) {
return Command::FAILURE;
}
$this->components->info('Seeding database.');
$previousConnection = $this->resolver->getDefaultConnection();
$this->resolver->setDefaultConnection($this->getDatabase());
Model::unguarded(function () {
$this->getSeeder()->__invoke();
});
if ($previousConnection) {
$this->resolver->setDefaultConnection($previousConnection);
}
return 0;
}
/**
* Get a seeder instance from the container.
*
* @return \Illuminate\Database\Seeder
*/
protected function getSeeder()
{
$class = $this->input->getArgument('class') ?? $this->input->getOption('class');
if (! str_contains($class, '\\')) {
$class = 'Database\\Seeders\\'.$class;
}
if ($class === 'Database\\Seeders\\DatabaseSeeder' &&
! class_exists($class)) {
$class = 'DatabaseSeeder';
}
return $this->laravel->make($class)
->setContainer($this->laravel)
->setCommand($this);
}
/**
* Get the name of the database connection to use.
*
* @return string
*/
protected function getDatabase()
{
$database = $this->input->getOption('database');
return $database ?: $this->laravel['config']['database.default'];
}
/**
* Get the console command arguments.
*
* @return array
*/
protected function getArguments()
{
return [
['class', InputArgument::OPTIONAL, 'The class name of the root seeder', null],
];
}
/**
* Get the console command options.
*
* @return array
*/
protected function getOptions()
{
return [
['class', null, InputOption::VALUE_OPTIONAL, 'The class name of the root seeder', 'Database\\Seeders\\DatabaseSeeder'],
['database', null, InputOption::VALUE_OPTIONAL, 'The database connection to seed'],
['force', null, InputOption::VALUE_NONE, 'Force the operation to run when in production'],
];
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Database/Console/Seeds/SeedCommand.php |
# -*- coding: utf8 -*-
###########################################################################
# This is part of the module phystricks
#
# phystricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phystricks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phystricks.py. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010-2017
# email: laurent@claessens-donadello.eu
from ObjectGraph import ObjectGraph
from Constructors import *
def sudoku_substitution(tableau,symbol_list=[ str(k) for k in range(-4,5) ]):
"""
From a string representing a sudoku grid,
1. remove empty lines
2. remove spaces
3. substitute 1..9 to the symbol_list
"""
import string
lines = tableau.split("\n")[1:]
n_lines=[ l.replace(" ","") for l in lines if len(l)!=0 ]
nn_lines=[]
for l in n_lines :
a=[]
for c in l.split(","):
if c in string.digits:
a.append( symbol_list[int(c)-1])
else :
a.append(c)
nn_lines.append(",".join(a))
n_tableau="\n".join(nn_lines)
return n_tableau
class SudokuGridGraph(ObjectGraph):
def __init__(self,question,length=1):
ObjectGraph.__init__(self,self)
self.question=sudoku_substitution(question)
self.length=length # length of a cell
def action_on_pspict(self,pspict):
import string
vlines=[]
hlines=[]
content=[]
numbering=[]
# Numbering (1,2,3, ... and A,B,C ...)
for i in range(0,9):
A=Point( (i+1)*self.length-self.length/2,self.length/2 )
A.parameters.symbol=""
A.put_mark(0,0,string.uppercase[i],pspict=pspict)
B=Point(-self.length/2,-i*self.length-self.length/2)
B.parameters.symbol=""
B.put_mark(0,0,string.digits[i+1],pspict=pspict)
numbering.append(A)
numbering.append(B)
# Grid
for i in range(0,10):
v=Segment(Point(i*self.length,0),Point(i*self.length,-9*self.length))
h=Segment(Point(0,-i*self.length),Point(9*self.length,-i*self.length))
# for the subgrid
if i%3==0 :
v.parameters.linewidth=2
h.parameters.linewidth=2
vlines.append(v)
hlines.append(h)
# Content of the cells
lines = self.question.split("\n")
for i,li in enumerate(lines):
for j,c in enumerate(li.split(",")):
A=Point(j*self.length+self.length/2,-i*self.length-self.length/2)
A.parameters.symbol=""
if c=="i":
A.put_mark(3*self.length/9,text="\ldots",pspict=pspict,position="N")
if c in [ str(k) for k in range(-9,10) ] :
A.put_mark(0,0,c,pspict=pspict)
content.append(A)
pspict.DrawGraphs(vlines,hlines,content,numbering)
def _math_bounding_box(self,pspict):
return BoundingBox()
def _bounding_box(self,pspict):
return BoundingBox()
def latex_code(self,language=None,pspict=None):
return "" | unknown | codeparrot/codeparrot-clean | ||
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR" | unknown | codeparrot/codeparrot-clean | ||
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2011, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
#ifndef INCLUDED_IMF_DEEP_SCAN_LINE_INPUT_FILE_H
#define INCLUDED_IMF_DEEP_SCAN_LINE_INPUT_FILE_H
//-----------------------------------------------------------------------------
//
// class DeepScanLineInputFile
//
//-----------------------------------------------------------------------------
#include "ImfThreading.h"
#include "ImfGenericInputFile.h"
#include "ImfNamespace.h"
#include "ImfForward.h"
#include "ImfExport.h"
#include "ImfDeepScanLineOutputFile.h"
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_ENTER
class DeepScanLineInputFile : public GenericInputFile
{
public:
//------------
// Constructor
//------------
IMF_EXPORT
DeepScanLineInputFile (const char fileName[],
int numThreads = globalThreadCount());
IMF_EXPORT
DeepScanLineInputFile (const Header &header, OPENEXR_IMF_INTERNAL_NAMESPACE::IStream *is,
int version, /*version field from file*/
int numThreads = globalThreadCount());
//-----------------------------------------
// Destructor -- deallocates internal data
// structures, but does not close the file.
//-----------------------------------------
IMF_EXPORT
virtual ~DeepScanLineInputFile ();
//------------------------
// Access to the file name
//------------------------
IMF_EXPORT
const char * fileName () const;
//--------------------------
// Access to the file header
//--------------------------
IMF_EXPORT
const Header & header () const;
//----------------------------------
// Access to the file format version
//----------------------------------
IMF_EXPORT
int version () const;
//-----------------------------------------------------------
// Set the current frame buffer -- copies the FrameBuffer
// object into the InputFile object.
//
// The current frame buffer is the destination for the pixel
// data read from the file. The current frame buffer must be
// set at least once before readPixels() is called.
// The current frame buffer can be changed after each call
// to readPixels().
//-----------------------------------------------------------
IMF_EXPORT
void setFrameBuffer (const DeepFrameBuffer &frameBuffer);
//-----------------------------------
// Access to the current frame buffer
//-----------------------------------
IMF_EXPORT
const DeepFrameBuffer & frameBuffer () const;
//---------------------------------------------------------------
// Check if the file is complete:
//
// isComplete() returns true if all pixels in the data window are
// present in the input file, or false if any pixels are missing.
// (Another program may still be busy writing the file, or file
// writing may have been aborted prematurely.)
//---------------------------------------------------------------
IMF_EXPORT
bool isComplete () const;
//---------------------------------------------------------------
// Read pixel data:
//
// readPixels(s1,s2) reads all scan lines with y coordinates
// in the interval [min (s1, s2), max (s1, s2)] from the file,
// and stores them in the current frame buffer.
//
// Both s1 and s2 must be within the interval
// [header().dataWindow().min.y, header.dataWindow().max.y]
//
// The scan lines can be read from the file in random order, and
// individual scan lines may be skipped or read multiple times.
// For maximum efficiency, the scan lines should be read in the
// order in which they were written to the file.
//
// readPixels(s) calls readPixels(s,s).
//
// If threading is enabled, readPixels (s1, s2) tries to perform
// decopmression of multiple scanlines in parallel.
//
//---------------------------------------------------------------
IMF_EXPORT
void readPixels (int scanLine1, int scanLine2);
IMF_EXPORT
void readPixels (int scanLine);
//---------------------------------------------------------------
// Extract pixel data from pre-read block
//
// readPixels(rawPixelData,frameBuffer,s1,s2) reads all scan lines with y coordinates
// in the interval [min (s1, s2), max (s1, s2)] from the data provided and
// stores them in the provided frameBuffer.
// the data can be obtained from a call to rawPixelData()
//
//
// Both s1 and s2 must be within the data specified
//
// you must provide a frameBuffer with a samplecountslice, which must have been read
// and the data valid - readPixels uses your sample count buffer to compute
// offsets to the data it needs
//
// This call does not block, and is thread safe for clients with an existing
// threading model. The InputFile's frameBuffer is not used in this call.
//
// This call is only provided for clients which have an existing threading model in place
// and unpredictable access patterns to the data.
// The fastest way to read an entire image is to enable threading,use setFrameBuffer then
// readPixels(header().dataWindow().min.y, header.dataWindow().max.y)
//
//---------------------------------------------------------------
IMF_EXPORT
void readPixels (const char * rawPixelData,
const DeepFrameBuffer & frameBuffer,
int scanLine1,
int scanLine2) const;
//----------------------------------------------
// Read a block of raw pixel data from the file,
// without uncompressing it (this function is
// used to implement OutputFile::copyPixels()).
// note: returns the entire payload of the relevant chunk of data, not including part number
// including compressed and uncompressed sizes
// on entry, if pixelDataSize is insufficiently large, no bytes are read (pixelData can safely be NULL)
// on exit, pixelDataSize is the number of bytes required to read the chunk
//
//----------------------------------------------
IMF_EXPORT
void rawPixelData (int firstScanLine,
char * pixelData,
Int64 &pixelDataSize);
//-------------------------------------------------
// firstScanLineInChunk() returns the row number of the first row that's stored in the
// same chunk as scanline y. Depending on the compression mode, this may not be the same as y
//
// lastScanLineInChunk() returns the row number of the last row that's stored in the same
// chunk as scanline y. Depending on the compression mode, this may not be the same as y.
// The last chunk in the file may be smaller than all the others
//
//------------------------------------------------
IMF_EXPORT
int firstScanLineInChunk(int y) const;
IMF_EXPORT
int lastScanLineInChunk (int y) const;
//-----------------------------------------------------------
// Read pixel sample counts into a slice in the frame buffer.
//
// readPixelSampleCounts(s1, s2) reads all the counts of
// pixel samples with y coordinates in the interval
// [min (s1, s2), max (s1, s2)] from the file, and stores
// them in the slice naming "sample count".
//
// Both s1 and s2 must be within the interval
// [header().dataWindow().min.y, header.dataWindow().max.y]
//
// readPixelSampleCounts(s) calls readPixelSampleCounts(s,s).
//
//-----------------------------------------------------------
IMF_EXPORT
void readPixelSampleCounts (int scanline1,
int scanline2);
IMF_EXPORT
void readPixelSampleCounts (int scanline);
//----------------------------------------------------------
// Read pixel sample counts into the provided frameBuffer
// using a block read of data read by rawPixelData
// for multi-scanline compression schemes, you must decode the entire block
// so scanline1=firstScanLineInChunk(y) and scanline2=lastScanLineInChunk(y)
//
// This call does not block, and is thread safe for clients with an existing
// threading model. The InputFile's frameBuffer is not used in this call.
//
// The fastest way to read an entire image is to enable threading in OpenEXR, use setFrameBuffer then
// readPixelSampleCounts(header().dataWindow().min.y, header.dataWindow().max.y)
//
//----------------------------------------------------------
IMF_EXPORT
void readPixelSampleCounts (const char * rawdata ,
const DeepFrameBuffer & frameBuffer,
int scanLine1 ,
int scanLine2) const;
struct Data;
private:
Data * _data;
DeepScanLineInputFile (InputPartData* part);
void initialize(const Header& header);
void compatibilityInitialize(OPENEXR_IMF_INTERNAL_NAMESPACE::IStream & is);
void multiPartInitialize(InputPartData* part);
friend class InputFile;
friend class MultiPartInputFile;
friend void DeepScanLineOutputFile::copyPixels(DeepScanLineInputFile &);
};
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_EXIT
#endif | c | github | https://github.com/opencv/opencv | 3rdparty/openexr/IlmImf/ImfDeepScanLineInputFile.h |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# utils/update_confusables.py - Utility to update definitions of unicode
# confusables
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os.path
import re
import sys
def _usage(program_name):
return 'usage: {}'.format(
program_name)
def _help(program_name):
return '{}\n\n'.format(_usage(program_name)) + \
'This script generates include/swift/Parse/Confusables.def from ' \
'utils/UnicodeData/confusables.txt.\n' \
'The latest version of the data file can be found at ' \
'ftp://ftp.unicode.org/Public/security/latest/confusables.txt.'
def main(args=sys.argv):
program_name = os.path.basename(args.pop(0))
if len(args) == 1 and args[0] in ['-h', '--help']:
print(_help(program_name))
return 0
charactersToCheck = [
u"(", u")", u"{",
u"}", u"[", u"]",
u".", u",", u":",
u";", u"=", u"@",
u"#", u"&", u"/",
u"|", u"\\", u"-",
u"*", u"+", u">",
u"<", u"!", u"?"
]
modifiedHex = [
hex(ord(char))[2:].zfill(4).upper() for char in charactersToCheck
]
basepath = os.path.dirname(__file__)
confusablesFilePath = os.path.abspath(
os.path.join(basepath, "UnicodeData/confusables.txt")
)
pairs = []
regex = r"(.+)\W+;\W+(.+)\W+;\W+MA*.[#*]*.[(].*[)](.+)\W→(.+)\W#.*"
with open(confusablesFilePath, 'r') as f:
pattern = re.compile(regex)
for line in f:
match = pattern.match(line)
if match is not None:
confusedString = match.group(1).replace(" ", "")
normalString = match.group(2).replace(" ", "")
confusedName = match.group(3).strip().title()
normalName = match.group(4).strip().replace("-", " ").title()
for hexValue in modifiedHex:
if hexValue == normalString:
confused = hex(int(confusedString, 16))
normal = hex(int(normalString, 16))
pairs.append((confused, confusedName,
normal, normalName))
defFilePath = os.path.abspath(
os.path.join(basepath, "..", "include/swift/Parse/Confusables.def")
)
with open(defFilePath, 'w') as f:
f.write("//===--- Confusables.def - Confusable unicode characters")
f.write(" ------------------===//")
header = '''
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2020 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
////////////////////////////////////////////////////////////////////////////////
// WARNING: This file is manually generated from
// utils/UnicodeData/confusables.txt and should not be directly modified.
// Run utils/generate_confusables.py to regenerate this file.
////////////////////////////////////////////////////////////////////////////////
'''
f.write(header)
f.write("// CONFUSABLE(CONFUSABLE_POINT, CONFUSABLE_NAME, " +
"BASE_POINT, BASE_NAME)\n\n")
for (confused, confusedName, expected, expectedName) in pairs:
# Ad-hoc substitutions for clarity
mappings = {"Solidus": "Forward Slash",
"Reverse Solidus": "Back Slash"}
newExpectedName = expectedName
if expectedName in mappings:
newExpectedName = mappings[expectedName]
f.write("CONFUSABLE(" + confused + ", " + '"' +
confusedName + '"' + ", " + expected + ", " +
'"' + newExpectedName + '"' + ")\n")
f.write("\n#undef CONFUSABLE\n")
if __name__ == '__main__':
main() | python | github | https://github.com/apple/swift | utils/generate_confusables.py |
# Django settings for frontend project.
import os
import common
from autotest_lib.client.common_lib import global_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
FULL_ADMIN = False
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql',
# 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_PORT = '' # Set to empty string for default.
# Not used with sqlite3.
c = global_config.global_config
_section = 'AUTOTEST_WEB'
DATABASE_HOST = c.get_config_value(_section, "host")
# Or path to database file if using sqlite3.
DATABASE_NAME = c.get_config_value(_section, "database")
# The following not used with sqlite3.
DATABASE_USER = c.get_config_value(_section, "user")
DATABASE_PASSWORD = c.get_config_value(_section, "password", default='')
DATABASE_READONLY_HOST = c.get_config_value(_section, "readonly_host",
default=DATABASE_HOST)
DATABASE_READONLY_USER = c.get_config_value(_section, "readonly_user",
default=DATABASE_USER)
if DATABASE_READONLY_USER != DATABASE_USER:
DATABASE_READONLY_PASSWORD = c.get_config_value(_section,
"readonly_password",
default='')
else:
DATABASE_READONLY_PASSWORD = DATABASE_PASSWORD
# prefix applied to all URLs - useful if requests are coming through apache,
# and you need this app to coexist with others
URL_PREFIX = 'afe/server/'
TKO_URL_PREFIX = 'new_tko/server/'
PLANNER_URL_PREFIX = 'planner/server/'
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'pn-t15u(epetamdflb%dqaaxw+5u&2#0u-jah70w1l*_9*)=n7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'frontend.apache_auth.ApacheAuthMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'frontend.shared.json_html_formatter.JsonToHtmlMiddleware',
)
ROOT_URLCONF = 'frontend.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.dirname(__file__) + '/templates')
)
INSTALLED_APPS = (
'frontend.afe',
'frontend.tko',
'frontend.planner',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
AUTHENTICATION_BACKENDS = (
'frontend.apache_auth.SimpleAuthBackend',
) | unknown | codeparrot/codeparrot-clean | ||
"""
HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21).
Based on wsgiref.simple_server which is part of the standard library since 2.5.
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE!
"""
from __future__ import unicode_literals
import os
import socket
import sys
import traceback
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django.utils.six.moves import socketserver
from wsgiref import simple_server
from wsgiref.util import FileWrapper # for backwards compatibility
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.core.wsgi import get_wsgi_application
from django.utils.importlib import import_module
__all__ = ['WSGIServer', 'WSGIRequestHandler']
def get_internal_wsgi_application():
"""
Loads and returns the WSGI application as configured by the user in
``settings.WSGI_APPLICATION``. With the default ``startproject`` layout,
this will be the ``application`` object in ``projectname/wsgi.py``.
This function, and the ``WSGI_APPLICATION`` setting itself, are only useful
for Django's internal servers (runserver, runfcgi); external WSGI servers
should just be configured to point to the correct application object
directly.
If settings.WSGI_APPLICATION is not set (is ``None``), we just return
whatever ``django.core.wsgi.get_wsgi_application`` returns.
"""
from django.conf import settings
app_path = getattr(settings, 'WSGI_APPLICATION')
if app_path is None:
return get_wsgi_application()
module_name, attr = app_path.rsplit('.', 1)
try:
mod = import_module(module_name)
except ImportError as e:
raise ImproperlyConfigured(
"WSGI application '%s' could not be loaded; "
"could not import module '%s': %s" % (app_path, module_name, e))
try:
app = getattr(mod, attr)
except AttributeError as e:
raise ImproperlyConfigured(
"WSGI application '%s' could not be loaded; "
"can't find '%s' in module '%s': %s"
% (app_path, attr, module_name, e))
return app
class WSGIServerException(Exception):
pass
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class WSGIServer(simple_server.WSGIServer, object):
"""BaseHTTPServer that implements the Python WSGI protocol"""
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
super(WSGIServer, self).__init__(*args, **kwargs)
def server_bind(self):
"""Override server_bind to store the server name."""
try:
super(WSGIServer, self).server_bind()
except Exception as e:
raise WSGIServerException(e)
self.setup_environ()
class WSGIRequestHandler(simple_server.WSGIRequestHandler, object):
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_static_prefix = urljoin(settings.STATIC_URL, 'admin/')
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
super(WSGIRequestHandler, self).__init__(*args, **kwargs)
def address_string(self):
# Short-circuit parent method to not call socket.getfqdn
return self.client_address[0]
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if (self.path.startswith(self.admin_static_prefix)
or self.path == '/favicon.ico'):
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
def run(addr, port, wsgi_handler, ipv6=False, threading=False):
server_address = (addr, port)
if threading:
httpd_cls = type(str('WSGIServer'), (socketserver.ThreadingMixIn, WSGIServer), {})
else:
httpd_cls = WSGIServer
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever() | unknown | codeparrot/codeparrot-clean | ||
"""
Classes for including text in a figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import math
import warnings
import contextlib
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, maxdict
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch
from matplotlib.patches import FancyArrowPatch, Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Affine2D, Bbox, Transform
from matplotlib.transforms import BboxBase, BboxTransformTo
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.artist import allow_rasterization
from matplotlib.backend_bases import RendererBase
from matplotlib.textpath import TextPath
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
@contextlib.contextmanager
def _wrap_text(textobj):
"""
Temporarily inserts newlines to the text if the wrap option is enabled.
"""
if textobj.get_wrap():
old_text = textobj.get_text()
try:
textobj.set_text(textobj._get_wrapped_text())
yield textobj
finally:
textobj.set_text(old_text)
else:
yield textobj
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Text="""
========================== ================================================
Property Value
========================== ================================================
alpha float or None
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a
pad in points; if a boxstyle is supplied as
a string, then pad is instead a fraction
of the font size
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family ['serif' | 'sans-serif' | 'cursive' |
'fantasy' | 'monospace']
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties
instance
horizontalalignment or ha ['center' | 'right' | 'left']
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string e.g.,
['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
rotation_mode [ None | 'anchor']
size or fontsize [size in points | relative size e.g., 'smaller',
'x-large']
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
usetex [True | False | None]
variant ['normal' | 'small-caps']
verticalalignment or va ['center' | 'top' | 'bottom' | 'baseline' |
'center_baseline' ]
visible [True | False]
weight or fontweight ['normal' | 'bold' | 'heavy' | 'light' |
'ultrabold' | 'ultralight']
wrap [True | False]
x float
y float
zorder any number
========================== ===============================================
""")
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = mtransforms.Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
_cached = maxdict(50)
def __str__(self):
return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self._x, self._y = x, y
if color is None:
color = rcParams['text.color']
if fontproperties is None:
fontproperties = FontProperties()
elif is_string_like(fontproperties):
fontproperties = FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self.set_usetex(usetex)
self.set_wrap(wrap)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
def update(self, kwargs):
"""
Update properties from a dictionary.
"""
bbox = kwargs.pop('bbox', None)
super(Text, self).update(kwargs)
if bbox:
self.set_bbox(bbox) # depends on font properties
def __getstate__(self):
d = super(Text, self).__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible() or self._renderer is None:
return False, {}
l, b, w, h = self.get_window_extent().bounds
r, t = l + w, b + h
x, y = mouseevent.x, mouseevent.y
inside = (l <= x <= r and b <= y <= t)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_unitless_position()
return self.get_transform().transform_point((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def set_rotation_mode(self, m):
"""
set text rotation mode. If "anchor", the un-rotated text
will first aligned according to their *ha* and
*va*, and then will be rotated with the alignement
reference point as a origin. If None (default), the text will be
rotated first then will be aligned.
"""
if m is None or m in ["anchor", "default"]:
self._rotation_mode = m
else:
raise ValueError("Unknown rotation_mode : %s" % repr(m))
self.stale = True
def get_rotation_mode(self):
"get text rotation mode"
return self._rotation_mode
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
self.stale = True
def _get_layout(self, renderer):
"""
return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
key = self.get_prop_tup(renderer=renderer)
if key in self._cached:
return self._cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self.get_text().split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
self._fontproperties,
ismath=False)
offsety = (lp_h - lp_bl) * self._linespacing
baseline = 0
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line, self.get_usetex())
if clean_line:
w, h, d = renderer.get_text_width_height_descent(clean_line,
self._fontproperties,
ismath=ismath)
else:
w, h, d = 0, 0, 0
# For multiline text, increase the line spacing when the
# text net-height(excluding baseline) is larger than that
# of a "l" (e.g., use of superscripts), which seems
# what TeX does.
h = max(h, lp_h)
d = max(d, lp_bl)
whs[i] = w, h
baseline = (h - d) - thisy
thisy -= max(offsety, (h - d) * self._linespacing)
horizLayout[i] = thisx, thisy, w, h
thisy -= d
width = max(width, w)
descent = d
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax - ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines) > 1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
cornersHoriz[:, 1] -= descent
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + width / 2.0)
elif halign == 'right':
offsetx = (xmin + width)
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + height / 2.0)
elif valign == 'top':
offsety = (ymin + height)
elif valign == 'baseline':
offsety = (ymin + height) - baseline
elif valign == 'center_baseline':
offsety = ymin + height - baseline / 2.0
else:
offsety = ymin
else:
xmin1, ymin1 = cornersHoriz[0]
xmax1, ymax1 = cornersHoriz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
elif valign == 'center_baseline':
offsety = (ymin1 + ymax1 - baseline) / 2.0
else:
offsety = ymin1
offsetx, offsety = M.transform_point((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, list(zip(lines, whs, xs, ys)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a FancyBboxPatch, e.g., facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
The default boxstyle is 'square'. The mutation
scale of the FancyBboxPatch is set to the fontsize.
ACCEPTS: FancyBboxPatch prop dict
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if is_string_like(boxstyle) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch(
(0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_unitless_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBboxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox_patch:
bbox = self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
super(Text, self).set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
super(Text, self).set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
super(Text, self).set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""
Returns the wrapping state for the text.
"""
return self._wrap
def set_wrap(self, wrap):
"""
Sets the wrapping state for the text.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Returns the maximum line width for wrapping text based on the
current orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360,
x0,
y0,
figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Returns the distance from the given points, to the boundaries
of a rotated box in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = y0 / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = x0 / math.cos(math.radians(quad))
h2 = y0 / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = x0 / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Returns the width of a given text string, in pixels.
"""
w, h, d = self._renderer.get_text_width_height_descent(
text,
self.get_fontproperties(),
False)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text with new lines added, so that
the text is wrapped relative to the parent figure.
"""
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_str = ""
line = ""
for word in self.get_text().split(' '):
# New lines in the user's test need to force a split, so that it's
# not using the longest current line width in the line being built
sub_words = word.split('\n')
for i in range(len(sub_words)):
current_width = self._get_rendered_text_width(
line + ' ' + sub_words[i])
# Split long lines, and each newline found in the current word
if current_width > line_width or i > 0:
wrapped_str += line + '\n'
line = ""
if line == "":
line = sub_words[i]
else:
line += ' ' + sub_words[i]
return wrapped_str + line
@allow_rasterization
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text().strip() == '':
return
renderer.open_group('text', self.get_gid())
with _wrap_text(self) as textobj:
bbox, info, descent = textobj._get_layout(renderer)
trans = textobj.get_transform()
# don't use textobj.get_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(textobj.convert_xunits(textobj._x))
posy = float(textobj.convert_yunits(textobj._y))
if not np.isfinite(posx) or not np.isfinite(posy):
raise ValueError("posx and posy should be finite values")
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if textobj._bbox_patch:
textobj._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(textobj.get_color())
gc.set_alpha(textobj.get_alpha())
gc.set_url(textobj._url)
textobj._set_gc_clip(gc)
angle = textobj.get_rotation()
for line, wh, x, y in info:
mtext = textobj if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = textobj.is_math_text(line,
self.get_usetex())
if textobj.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
textobj.get_path_effects(), renderer)
else:
textrenderer = renderer
if textobj.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
textobj._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
textobj._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties()
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_unitless_position()
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self.figure.dpi, id(renderer or self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible():
return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self.get_text().strip() == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
To change the position of the bounding box.
ACCEPTS: any matplotlib color
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
self.stale = True
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._horizontalalignment = align
self.stale = True
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
self.stale = True
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
'monospace' ]
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' |
'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
'extra-condensed' | 'condensed' | 'semi-condensed' |
'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
self.stale = True
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' %
str(legal))
self._verticalalignment = align
self.stale = True
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
self.stale = True
@staticmethod
def is_math_text(s, usetex=None):
"""
Returns a cleaned string and a boolean flag.
The flag indicates if the given string *s* contains any mathtext,
determined by counting unescaped dollar signs. If no mathtext
is present, the cleaned string has its dollar signs unescaped.
If usetex is on, the flag always has the value "TeX".
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
if usetex is None:
usetex = rcParams['text.usetex']
if usetex:
if s == ' ':
s = r'\ '
return s, 'TeX'
if cbook.is_math_text(s):
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
self.stale = True
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
def set_usetex(self, usetex):
"""
Set this `Text` object to render using TeX (or not).
If `None` is given, the option will be reset to use the value of
`rcParams['text.usetex']`
"""
if usetex is None:
self._usetex = rcParams['text.usetex']
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""
Return whether this `Text` object will render using TeX.
If the user has not manually set this value, it will default to
the value of `rcParams['text.usetex']`
"""
if self._usetex is None:
return rcParams['text.usetex']
else:
return self._usetex
docstring.interpd.update(Text=artist.kwdoc(Text))
docstring.dedent_interpd(Text.__init__)
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possibly unitized) as was
# specified with set_x and set_y
return self._dashx, self._dashy
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self, renderer=renderer)]
props.extend([self._x, self._y, self._dashlength,
self._dashdirection, self._dashrotation, self._dashpad,
self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
self.stale = False
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_unitless_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi * (angle / 180.0 + dashdirection - 1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy + dashpush * cd
c2 = cxy + (dashpush + dashlength) * cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta / cos_theta
dx = w
dy = w * tan_theta
if dy > h or dy < -h:
dy = h
dx = h / tan_theta
cwd = np.array([dx, dy]) / 2
cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
cw = c2 + (dashdirection * 2 - 1) * cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
self.stale = True
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
self.stale = True
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
self.stale = True
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
self.stale = True
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
self.stale = True
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
self.stale = True
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
class OffsetFrom(object):
'Callable helper class for working with `Annotation`'
def __init__(self, artist, ref_coord, unit="points"):
'''
Parameters
----------
artist : `Artist`, `BboxBase`, or `Transform`
The object to compute the offset from.
ref_coord : length 2 sequence
If `artist` is an `Artist` or `BboxBase`, this values is
the location to of the offset origin in fractions of the
`artist` bounding box.
If `artist` is a transform, the offset origin is the
transform applied to this value.
unit : {'points, 'pixels'}
The screen units to use (pixels or points) for the offset
input.
'''
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
'''
The unit for input to the transform used by ``__call__``
Parameters
----------
unit : {'points', 'pixels'}
'''
if unit not in ["points", "pixels"]:
raise ValueError("'unit' must be one of [ 'points' | 'pixels' ]")
self._unit = unit
def get_unit(self):
'The unit for input to the transform used by ``__call__``'
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
'''
Return the offset transform.
Parameters
----------
renderer : `RendererBase`
The renderer to use to compute the offset
Returns
-------
transform : `Transform`
Maps (x, y) in pixel or point units to screen units
relative to the given artist.
'''
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, BboxBase):
l, b, w, h = self._artist.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform_point(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc, sc).translate(x, y)
return tr
class _AnnotationBase(object):
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
tr = self._get_xy_transform(renderer, s)
x1, y1 = tr.transform_point((x, y))
return x1, y1
def _get_xy_transform(self, renderer, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, s1)
tr2 = self._get_xy_transform(renderer, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
if six.callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
if isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not is_string_like(s):
raise RuntimeError("unknown coordinate type : %s" % (s,))
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
xy0 = bbox0.bounds[:2]
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
from matplotlib.transforms import Affine2D
if unit == "points":
# dots per points
dpp = self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp, dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp,
dpp)
elif unit == "fraction":
w, h = bbox0.bounds[2:]
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
return x, y (in display coordinate) that is to be used for a reference
of any offset coordinate
"""
if isinstance(self.xycoords, tuple):
s1, s2 = self.xycoords
if ((is_string_like(s1) and s1.split()[0] == "offset") or
(is_string_like(s2) and s2.split()[0] == "offset")):
raise ValueError("xycoords should not be an offset coordinate")
x, y = self.xy
x1, y1 = self._get_xy(renderer, x, y, s1)
x2, y2 = self._get_xy(renderer, x, y, s2)
return x1, y2
elif (is_string_like(self.xycoords) and
self.xycoords.split()[0] == "offset"):
raise ValueError("xycoords should not be an offset coordinate")
else:
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
#raise RuntimeError("must be defined by the derived class")
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside
the axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"Return the pixel position of the annotated point."
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer, xy_pixel):
"""
given the xy pixel coordinate, check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
if not self.axes.contains_point(xy_pixel):
return False
return True
def draggable(self, state=None, use_blit=False):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the annotation on the canvas with
the mouse. The DraggableAnnotation helper instance is returned if
draggable is on.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
def __str__(self):
return "Annotation(%g,%g,%s)" % (self.xy[0],
self.xy[1],
repr(self._text))
@docstring.dedent_interpd
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
'''
Annotate the point ``xy`` with text ``s``.
Additional kwargs are passed to `~matplotlib.text.Text`.
Parameters
----------
s : str
The text of the annotation
xy : iterable
Length 2 sequence specifying the *(x,y)* point to annotate
xytext : iterable, optional
Length 2 sequence specifying the *(x,y)* to place the text
at. If None, defaults to ``xy``.
xycoords : str, Artist, Transform, callable or tuple, optional
The coordinate system that ``xy`` is given in.
For a `str` the allowed values are:
================= ===============================================
Property Description
================= ===============================================
'figure points' points from the lower left of the figure
'figure pixels' pixels from the lower left of the figure
'figure fraction' fraction of figure from lower left
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' fraction of axes from lower left
'data' use the coordinate system of the object being
annotated (default)
'polar' *(theta,r)* if not native 'data' coordinates
================= ===============================================
If a `~matplotlib.artist.Artist` object is passed in the units are
fraction if it's bounding box.
If a `~matplotlib.transforms.Transform` object is passed
in use that to transform ``xy`` to screen coordinates
If a callable it must take a
`~matplotlib.backend_bases.RendererBase` object as input
and return a `~matplotlib.transforms.Transform` or
`~matplotlib.transforms.Bbox` object
If a `tuple` must be length 2 tuple of str, `Artist`,
`Transform` or callable objects. The first transform is
used for the *x* coordinate and the second for *y*.
See :ref:`plotting-guide-annotation` for more details.
Defaults to ``'data'``
textcoords : str, `Artist`, `Transform`, callable or tuple, optional
The coordinate system that ``xytext`` is given, which
may be different than the coordinate system used for
``xy``.
All ``xycoords`` values are valid as well as the following
strings:
================= =========================================
Property Description
================= =========================================
'offset points' offset (in points) from the *xy* value
'offset pixels' offset (in pixels) from the *xy* value
================= =========================================
defaults to the input of ``xycoords``
arrowprops : dict, optional
If not None, properties used to draw a
`~matplotlib.patches.FancyArrowPatch` arrow between ``xy`` and
``xytext``.
If `arrowprops` does not contain the key ``'arrowstyle'`` the
allowed keys are:
========== ======================================================
Key Description
========== ======================================================
width the width of the arrow in points
headwidth the width of the base of the arrow head in points
headlength the length of the arrow head in points
shrink fraction of total length to 'shrink' from both ends
? any key to :class:`matplotlib.patches.FancyArrowPatch`
========== ======================================================
If the `arrowprops` contains the key ``'arrowstyle'`` the
above keys are forbidden. The allowed values of
``'arrowstyle'`` are:
============ =============================================
Name Attrs
============ =============================================
``'-'`` None
``'->'`` head_length=0.4,head_width=0.2
``'-['`` widthB=1.0,lengthB=0.2,angleB=None
``'|-|'`` widthA=1.0,widthB=1.0
``'-|>'`` head_length=0.4,head_width=0.2
``'<-'`` head_length=0.4,head_width=0.2
``'<->'`` head_length=0.4,head_width=0.2
``'<|-'`` head_length=0.4,head_width=0.2
``'<|-|>'`` head_length=0.4,head_width=0.2
``'fancy'`` head_length=0.4,head_width=0.4,tail_width=0.4
``'simple'`` head_length=0.5,head_width=0.5,tail_width=0.2
``'wedge'`` tail_width=0.3,shrink_factor=0.5
============ =============================================
Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
=============== ==================================================
Key Description
=============== ==================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ==================================================
Defaults to None
annotation_clip : bool, optional
Controls the visibility of the annotation when it goes
outside the axes area.
If `True`, the annotation will only be drawn when the
``xy`` is inside the axes. If `False`, the annotation will
always be drawn regardless of its position.
The default is `None`, which behave as `True` only if
*xycoords* is "data".
Returns
-------
Annotation
'''
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
warnings.warn("You have used the `textcoords` kwarg, but not "
"the `xytext` kwarg. This can lead to surprising "
"results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
Text.__init__(self, x, y, s, **kwargs)
self.arrowprops = arrowprops
self.arrow = None
if arrowprops:
if "arrowstyle" in arrowprops:
arrowprops = self.arrowprops.copy()
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
shapekeys = ('width', 'headwidth', 'headlength',
'shrink', 'frac')
arrowprops = dict()
for key, val in self.arrowprops.items():
if key not in shapekeys:
arrowprops[key] = val # basic Patch properties
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**arrowprops)
else:
self.arrow_patch = None
def contains(self, event):
contains, tinfo = Text.contains(self, event)
if self.arrow is not None:
in_arrow, _ = self.arrow.contains(event)
contains = contains or in_arrow
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(event)
contains = contains or in_patch
return contains, tinfo
@property
def xyann(self):
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
@property
def anncoords(self):
return self._textcoords
@anncoords.setter
def anncoords(self, coords):
self._textcoords = coords
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
""""Update the pixel positions of the annotated point and the
text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xytext(renderer, xy_pixel)
def _update_position_xytext(self, renderer, xy_pixel):
"""Update the pixel positions of the annotation text and the arrow
patch.
"""
# generate transformation,
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
ox0, oy0 = self._get_xy_display()
ox1, oy1 = xy_pixel
if self.arrowprops:
x0, y0 = xy_pixel
l, b, w, h = Text.get_window_extent(self, renderer).bounds
r = l + w
t = b + h
xc = 0.5 * (l + r)
yc = 0.5 * (b + t)
d = self.arrowprops.copy()
ms = d.pop("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in d:
# Approximately simulate the YAArrow.
# Pop its kwargs:
shrink = d.pop('shrink', 0.0)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
# Ignore frac--it is useless.
frac = d.pop('frac', None)
if frac is not None:
warnings.warn(
"'frac' option in 'arrowprops' is no longer supported;"
" use 'headlength' to set the head length in points.")
headlength = d.pop('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the x,y corner of the text bbox closest to point
# annotated
xpos = ((l, 0), (xc, 0.5), (r, 1))
ypos = ((b, 0), (yc, 0.5), (t, 1))
dsu = [(abs(val[0] - x0), val) for val in xpos]
dsu.sort()
_, (x, relposx) = dsu[0]
dsu = [(abs(val[0] - y0), val) for val in ypos]
dsu.sort()
_, (y, relposy) = dsu[0]
self._arrow_relpos = (relposx, relposy)
r = np.hypot((y - y0), (x - x0))
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = shrink_pts
self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = Text.get_window_extent(self, renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrunk by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
pad = renderer.points_to_pixels(4)
if self.get_text().strip() == "":
self.arrow_patch.set_patchA(None)
return
bbox = Text.get_window_extent(self, renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on(False)
self.arrow_patch.set_patchA(r)
@allow_rasterization
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self._update_position_xytext(renderer, xy_pixel)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text and arrow annotation, in display units.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure. The
*dpi* used defaults to self.figure.dpi; the renderer dpi is
irrelevant.
'''
if not self.get_visible():
return Bbox.unit()
arrow = self.arrow
arrow_patch = self.arrow_patch
text_bbox = Text.get_window_extent(self, renderer=renderer)
bboxes = [text_bbox]
if self.arrow is not None:
bboxes.append(arrow.get_window_extent(renderer=renderer))
elif self.arrow_patch is not None:
bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
return Bbox.union(bboxes)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gateway tests - Testing various methods on a Big image when
renderingEngine.load() etc throws MissingPyramidException
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import omero
import pytest
class TestPyramid (object):
@pytest.fixture(autouse=True)
def setUp(self, author_testimg_generated):
self.image = author_testimg_generated
def testThrowException(self):
""" test that image._prepareRE() throws MissingPyramidException """
self.image._conn.createRenderingEngine = lambda: MockRenderingEngine()
try:
self.image._prepareRE()
assert False, "_prepareRE should have thrown an exception"
except omero.ConcurrencyException, ce:
print "Handling MissingPyramidException with backoff: %s secs" \
% (ce.backOff/1000)
def testPrepareRenderingEngine(self):
"""
We need image._prepareRenderingEngine() to raise
MissingPyramidException
"""
self.image._conn.createRenderingEngine = lambda: MockRenderingEngine()
try:
self.image._prepareRenderingEngine()
assert False, \
"_prepareRenderingEngine() should have thrown an exception"
except omero.ConcurrencyException, ce:
print "Handling MissingPyramidException with backoff: %s secs" \
% (ce.backOff/1000)
def testGetChannels(self):
""" Missing Pyramid shouldn't stop us from getting Channel Info """
self.image._conn.createRenderingEngine = lambda: MockRenderingEngine()
channels = self.image.getChannels()
for c in channels:
print c.getLabel()
def testGetChannelsNoRe(self):
""" With noRE, getChannels() shouldn't need rendering Engine """
self.image._conn.createRenderingEngine = lambda: None
channels = self.image.getChannels(noRE=True)
assert len(channels) > 0
for c in channels:
print c.getLabel()
def testGetRdefId(self):
""" getRenderingDefId() silently returns None with Missing Pyramid """
self.image._conn.createRenderingEngine = lambda: MockRenderingEngine()
assert self.image.getRenderingDefId() is None
class MockRenderingEngine(object):
""" Should throw on re.load() """
def lookupPixels(self, id, ctx=None):
pass
def lookupRenderingDef(self, id, ctx=None):
pass
def loadRenderingDef(self, id, ctx=None):
pass
def resetDefaultSettings(self, save=True, ctx=None):
pass
def getRenderingDefId(self, ctx=None):
return 1
def load(self, ctx=None):
e = omero.ConcurrencyException("MOCK MissingPyramidException")
# 3 hours
e.backOff = (3 * 60 * 60 * 1000) + (20 * 60 * 1000) + (45 * 1000)
raise e | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
*/
"use strict";
const RuntimeGlobals = require("../RuntimeGlobals");
const RuntimeModule = require("../RuntimeModule");
const Template = require("../Template");
/** @typedef {import("../Chunk")} Chunk */
/** @typedef {import("../Chunk").ChunkChildOfTypeInOrder} ChunkChildOfTypeInOrder */
/** @typedef {import("../Compilation")} Compilation */
class ChunkPrefetchStartupRuntimeModule extends RuntimeModule {
/**
* @param {ChunkChildOfTypeInOrder[]} startupChunks chunk ids to trigger when chunks are loaded
*/
constructor(startupChunks) {
super("startup prefetch", RuntimeModule.STAGE_TRIGGER);
/** @type {ChunkChildOfTypeInOrder[]} */
this.startupChunks = startupChunks;
}
/**
* @returns {string | null} runtime code
*/
generate() {
const { startupChunks } = this;
const compilation = /** @type {Compilation} */ (this.compilation);
const chunk = /** @type {Chunk} */ (this.chunk);
const { runtimeTemplate } = compilation;
return Template.asString(
startupChunks.map(
({ onChunks, chunks }) =>
`${RuntimeGlobals.onChunksLoaded}(0, ${JSON.stringify(
// This need to include itself to delay execution after this chunk has been fully loaded
onChunks.filter((c) => c === chunk).map((c) => c.id)
)}, ${runtimeTemplate.basicFunction(
"",
chunks.size < 3
? Array.from(
chunks,
(c) =>
`${RuntimeGlobals.prefetchChunk}(${JSON.stringify(c.id)});`
)
: `${JSON.stringify(Array.from(chunks, (c) => c.id))}.map(${
RuntimeGlobals.prefetchChunk
});`
)}, 5);`
)
);
}
}
module.exports = ChunkPrefetchStartupRuntimeModule; | javascript | github | https://github.com/webpack/webpack | lib/prefetch/ChunkPrefetchStartupRuntimeModule.js |
'use strict';
const { promisify } = require('util');
const { readFile } = require('fs');
const sleep = promisify(setTimeout);
const read = promisify(readFile);
const common = require('../common.js');
const {
createHook,
executionAsyncResource,
executionAsyncId,
AsyncLocalStorage,
} = require('async_hooks');
const { createServer } = require('http');
const bench = common.createBenchmark(main, {
type: ['async-resource', 'destroy', 'async-local-storage'],
asyncMethod: ['callbacks', 'async'],
path: '/',
connections: 500,
duration: 5,
n: [1e6],
});
function buildCurrentResource(getServe) {
const server = createServer(getServe(getCLS, setCLS));
const hook = createHook({ init });
const cls = Symbol('cls');
hook.enable();
return {
server,
close,
};
function getCLS() {
const resource = executionAsyncResource();
if (!resource[cls]) {
return null;
}
return resource[cls].state;
}
function setCLS(state) {
const resource = executionAsyncResource();
if (!resource[cls]) {
resource[cls] = { state };
} else {
resource[cls].state = state;
}
}
function init(asyncId, type, triggerAsyncId, resource) {
const cr = executionAsyncResource();
if (cr !== null) {
resource[cls] = cr[cls];
}
}
function close() {
hook.disable();
server.close();
}
}
function buildDestroy(getServe) {
const transactions = new Map();
const server = createServer(getServe(getCLS, setCLS));
const hook = createHook({ init, destroy });
hook.enable();
return {
server,
close,
};
function getCLS() {
const asyncId = executionAsyncId();
return transactions.has(asyncId) ? transactions.get(asyncId) : null;
}
function setCLS(value) {
const asyncId = executionAsyncId();
transactions.set(asyncId, value);
}
function init(asyncId, type, triggerAsyncId, resource) {
transactions.set(asyncId, getCLS());
}
function destroy(asyncId) {
transactions.delete(asyncId);
}
function close() {
hook.disable();
server.close();
}
}
function buildAsyncLocalStorage(getServe) {
const asyncLocalStorage = new AsyncLocalStorage();
const server = createServer((req, res) => {
asyncLocalStorage.run({}, () => {
getServe(getCLS, setCLS)(req, res);
});
});
return {
server,
close,
};
function getCLS() {
const store = asyncLocalStorage.getStore();
if (store === undefined) {
return null;
}
return store.state;
}
function setCLS(state) {
const store = asyncLocalStorage.getStore();
if (store === undefined) {
return;
}
store.state = state;
}
function close() {
asyncLocalStorage.disable();
server.close();
}
}
function getServeAwait(getCLS, setCLS) {
return async function serve(req, res) {
setCLS(Math.random());
await sleep(10);
await read(__filename);
if (res.destroyed) return;
res.setHeader('content-type', 'application/json');
res.end(JSON.stringify({ cls: getCLS() }));
};
}
function getServeCallbacks(getCLS, setCLS) {
return function serve(req, res) {
setCLS(Math.random());
setTimeout(() => {
readFile(__filename, () => {
if (res.destroyed) return;
res.setHeader('content-type', 'application/json');
res.end(JSON.stringify({ cls: getCLS() }));
});
}, 10);
};
}
const types = {
'async-resource': buildCurrentResource,
'destroy': buildDestroy,
'async-local-storage': buildAsyncLocalStorage,
};
const asyncMethods = {
'callbacks': getServeCallbacks,
'async': getServeAwait,
};
function main({ type, asyncMethod, connections, duration, path }) {
const { server, close } = types[type](asyncMethods[asyncMethod]);
server
.listen(common.PORT)
.on('listening', () => {
bench.http({
path,
connections,
duration,
}, () => {
close();
});
});
} | javascript | github | https://github.com/nodejs/node | benchmark/async_hooks/async-resource-vs-destroy.js |
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.utils.serializers
-------------------------
Implements custom serializers.
"""
import marshal
from six.moves import cPickle as pickle
from zlib import compress, decompress
__all__ = ['ZlibMarshal',
'serialize_via_marshal',
'deserialize_via_marshal',
'serialize_via_pickle',
'deserialize_via_pickle']
class ZlibMarshal(object):
"""Combines zlib and marshal libraries."""
@staticmethod
def loads(astring):
"""Decompress and deserialize string into a Python object via marshal."""
return marshal.loads(decompress(astring))
@staticmethod
def dumps(obj):
"""Serialize Python object via marshal into a compressed string."""
return compress(marshal.dumps(obj))
# Provides legacy API functions.
serialize_via_marshal = ZlibMarshal.dumps
deserialize_via_marshal = ZlibMarshal.loads
class ZlibPickle(object):
"""Combines zlib and pickle libraries."""
@staticmethod
def loads(astring):
"""Decompress and deserialize string into a Python object via pickle"""
return pickle.loads(decompress(astring))
@staticmethod
def dumps(obj):
"""Serialize Python object via pickle into a compressed string."""
return compress(pickle.dumps(obj))
# Provides legacy API functions.
serialize_via_pickle = ZlibPickle.dumps
deserialize_via_pickle = ZlibPickle.loads | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Setup Tool
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
def index():
""" Show the index """
return dict()
# -----------------------------------------------------------------------------
def deployment():
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
#s3db.configure("setup_deployment", onvalidation=validate_deployment)
crud_form = S3SQLCustomForm("name",
"distro",
"remote_user",
"secret_key",
"access_key",
"private_key",
"webserver_type",
"db_type",
"db_password",
"db_type",
"db_password",
"repo_url",
"template",
S3SQLInlineComponent("server",
label = T("Server Role"),
fields = ["role", "host_ip", "hostname"],
),
S3SQLInlineComponent("instance",
label = T("Instance Type"),
fields = ["type", "url", "prepop_options"],
#filterby=dict(field = "type",
#options = ["prod", "demo"]
#),
multiple = False,
),
)
s3db.configure("setup_deployment", crud_form=crud_form)
def prep(r):
if r.method in ("create", None):
s3.scripts.append("/%s/static/scripts/S3/s3.setup.js" % appname)
if r.interactive:
if r.component and r.id:
# Set up the prepop options according to the template
prepop_options = s3db.setup_get_prepop_options(r.record.template)
db.setup_instance.prepop_options.requires = IS_IN_SET(prepop_options, multiple=True)
# No new servers once deployment is created
s3db.configure("setup_server",
insertable = False
)
# Check if no scheduler task is pending
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
((sctable.status != "COMPLETED") & \
(sctable.status != "FAILED"))
rows = db(query).select(itable.scheduler_id,
join = itable.on(itable.scheduler_id == sctable.id)
)
if rows:
# Disable creation of new instances
s3db.configure("setup_instance",
insertable = False
)
elif r.component.name == "instance":
if r.method in (None, "create"):
# Remove deployed instances from drop down
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
(sctable.status == "COMPLETED")
rows = db(query).select(itable.type,
join = itable.on(itable.scheduler_id == sctable.id)
)
types = {1: "prod", 2: "test", 3: "demo", 4: "dev"}
for row in rows:
del types[row.type]
itable.type.requires = IS_IN_SET(types)
return True
s3.prep = prep
def postp(r, output):
if r.component is None:
if r.method in (None, "read") and r.id:
# get scheduler status for the last queued task
itable = db.setup_instance
sctable = db.scheduler_task
query = (db.setup_instance.deployment_id == r.id)
row = db(query).select(sctable.id,
sctable.status,
join = itable.on(itable.scheduler_id==sctable.id),
orderby = itable.scheduler_id
).last()
item_append = output["item"][0].append
item_append(TR(TD(LABEL("Status"), _class="w2p_fl")))
item_append(TR(TD(row.status)))
if row.status == "FAILED":
resource = s3db.resource("scheduler_run")
task = db(resource.table.task_id == row.id).select().first()
item_append(TR(TD(LABEL("Traceback"), _class="w2p_fl")))
item_append(TR(TD(task.traceback)))
item_append(TR(TD(LABEL("Output"), _class="w2p_fl")))
item_append(TR(TD(task.run_output)))
elif r.component.name == "instance":
if r.method in (None, "read"):
s3.actions = [{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "clean",
"deployment": r.id,
}
),
"_class": "action-btn",
"label": "Clean"
},
{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "eden",
"deployment": r.id
}
),
"_class": "action-btn",
"label": "Upgrade Eden"
},
]
return output
s3.postp = postp
return s3_rest_controller(rheader=s3db.setup_rheader)
# -----------------------------------------------------------------------------
def management():
try:
_id = get_vars["instance"]
deployment_id = get_vars["deployment"]
_type = get_vars["type"]
except:
session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
# Check if management task already running
exists = s3db.setup_management_exists(_type, _id, deployment_id)
if exists:
current.session.error = T("A management task is running for the instance")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# Check if instance was successfully deployed
ttable = s3db.scheduler_task
itable = s3db.setup_instance
query = (ttable.status == "COMPLETED") & \
(itable.id == _id)
success = db(query).select(itable.id,
join=ttable.on(ttable.id == itable.scheduler_id),
limitby=(0, 1)).first()
if success:
# add the task to scheduler
current.s3task.schedule_task("setup_management",
args = [_type, _id, deployment_id],
timeout = 3600,
repeats = 1,
)
current.session.flash = T("Task queued in scheduler")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
else:
current.session.error = T("The instance was not successfully deployed")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# -----------------------------------------------------------------------------
def prepop_setting():
if request.ajax:
template = request.post_vars.get("template")
return json.dumps(s3db.setup_get_prepop_options(template))
# -----------------------------------------------------------------------------
def refresh():
try:
id = request.args[0]
except:
current.session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
result = s3db.setup_refresh(id)
if result["success"]:
current.session.flash = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
else:
current.session.error = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
# -----------------------------------------------------------------------------
def upgrade_status():
if request.ajax:
_id = request.post_vars.get("id")
status = s3db.setup_upgrade_status(_id)
if status:
return json.dumps(status) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmLocatorFilter.py
-------------------------
Date : May 2017
Copyright : (C) 2017 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'May 2017'
__copyright__ = '(C) 2017, Nyall Dawson'
from qgis.core import (QgsApplication,
QgsProcessingAlgorithm,
QgsProcessingFeatureBasedAlgorithm,
QgsLocatorFilter,
QgsLocatorResult,
QgsProcessing,
QgsWkbTypes,
QgsMapLayerType,
QgsFields,
QgsStringUtils)
from processing.gui.MessageBarProgress import MessageBarProgress
from processing.gui.MessageDialog import MessageDialog
from processing.gui.AlgorithmDialog import AlgorithmDialog
from processing.gui.AlgorithmExecutor import execute_in_place
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
class AlgorithmLocatorFilter(QgsLocatorFilter):
def __init__(self, parent=None):
super(AlgorithmLocatorFilter, self).__init__(parent)
def clone(self):
return AlgorithmLocatorFilter()
def name(self):
return 'processing_alg'
def displayName(self):
return self.tr('Processing Algorithms')
def priority(self):
return QgsLocatorFilter.Low
def prefix(self):
return 'a'
def flags(self):
return QgsLocatorFilter.FlagFast
def fetchResults(self, string, context, feedback):
# collect results in main thread, since this method is inexpensive and
# accessing the processing registry is not thread safe
for a in QgsApplication.processingRegistry().algorithms():
if a.flags() & QgsProcessingAlgorithm.FlagHideFromToolbox:
continue
if not ProcessingConfig.getSetting(ProcessingConfig.SHOW_ALGORITHMS_KNOWN_ISSUES) and \
a.flags() & QgsProcessingAlgorithm.FlagKnownIssues:
continue
result = QgsLocatorResult()
result.filter = self
result.displayString = a.displayName()
result.icon = a.icon()
result.userData = a.id()
result.score = 0
if (context.usingPrefix and not string):
self.resultFetched.emit(result)
if not string:
return
string = string.lower()
tagScore = 0
tags = [*a.tags(), a.provider().name()]
if a.group():
tags.append(a.group())
for t in tags:
if string in t.lower():
tagScore = 1
break
result.score = QgsStringUtils.fuzzyScore(result.displayString, string) * 0.5 + tagScore * 0.5
if result.score > 0:
self.resultFetched.emit(result)
def triggerResult(self, result):
alg = QgsApplication.processingRegistry().createAlgorithmById(result.userData)
if alg:
ok, message = alg.canExecute()
if not ok:
dlg = MessageDialog()
dlg.setTitle(self.tr('Missing dependency'))
dlg.setMessage(message)
dlg.exec_()
return
dlg = alg.createCustomParametersWidget(parent=iface.mainWindow())
if not dlg:
dlg = AlgorithmDialog(alg, parent=iface.mainWindow())
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
class InPlaceAlgorithmLocatorFilter(QgsLocatorFilter):
def __init__(self, parent=None):
super().__init__(parent)
def clone(self):
return InPlaceAlgorithmLocatorFilter()
def name(self):
return 'edit_features'
def displayName(self):
return self.tr('Edit Selected Features')
def priority(self):
return QgsLocatorFilter.Low
def prefix(self):
return 'ef'
def flags(self):
return QgsLocatorFilter.FlagFast
def fetchResults(self, string, context, feedback):
# collect results in main thread, since this method is inexpensive and
# accessing the processing registry/current layer is not thread safe
if iface.activeLayer() is None or iface.activeLayer().type() != QgsMapLayerType.VectorLayer:
return
for a in QgsApplication.processingRegistry().algorithms():
if not a.flags() & QgsProcessingAlgorithm.FlagSupportsInPlaceEdits:
continue
if not a.supportInPlaceEdit(iface.activeLayer()):
continue
result = QgsLocatorResult()
result.filter = self
result.displayString = a.displayName()
result.icon = a.icon()
result.userData = a.id()
result.score = 0
if (context.usingPrefix and not string):
self.resultFetched.emit(result)
if not string:
return
string = string.lower()
tagScore = 0
tags = [*a.tags(), a.provider().name()]
if a.group():
tags.append(a.group())
for t in tags:
if string in t.lower():
tagScore = 1
break
result.score = QgsStringUtils.fuzzyScore(result.displayString, string) * 0.5 + tagScore * 0.5
if result.score > 0:
self.resultFetched.emit(result)
def triggerResult(self, result):
config = {'IN_PLACE': True}
alg = QgsApplication.processingRegistry().createAlgorithmById(result.userData, config)
if alg:
ok, message = alg.canExecute()
if not ok:
dlg = MessageDialog()
dlg.setTitle(self.tr('Missing dependency'))
dlg.setMessage(message)
dlg.exec_()
return
in_place_input_parameter_name = 'INPUT'
if hasattr(alg, 'inputParameterName'):
in_place_input_parameter_name = alg.inputParameterName()
if [d for d in alg.parameterDefinitions() if
d.name() not in (in_place_input_parameter_name, 'OUTPUT')]:
dlg = alg.createCustomParametersWidget(parent=iface.mainWindow())
if not dlg:
dlg = AlgorithmDialog(alg, True, parent=iface.mainWindow())
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
else:
feedback = MessageBarProgress(algname=alg.displayName())
parameters = {}
execute_in_place(alg, parameters, feedback=feedback) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Python modules
import sys, os, shutil, glob, io
# Aion modules
from Aion.utils.graphics import *
from Aion.utils.data import *
from Aion.shared.constants import *
# Third-party libraries
from androguard.session import Session
from androguard.misc import AXMLPrinter
class Garfield():
""" Garfield is a lazy stimulation engine based on fuzzing via Monkey(runner) and Genymotion """
def __init__(self, pathToAPK, APKType="goodware"):
if not os.path.exists(pathToAPK):
prettyPrint("Could not find the APK \"%s\"" % pathToAPK, "warning")
return None
self.APKPath = pathToAPK
self.APK, self.DEX, self.VMAnalysis = None, None, None
self.activitiesInfo, self.servicesInfo, self.receiversInfo = {}, {}, {}
self.runnerScript = ""
self.APKType = APKType
def analyzeAPK(self):
""" Uses androguard to retrieve metadata about the app e.g. activities, permissions, intent filters, etc. """
try:
prettyPrint("Analyzing app")
logEvent("Analyzing app: \"%s\"" % self.APKPath)
# 1. Load the APK using androguard
analysisSession = Session()
analysisSession.add(self.APKPath, open(self.APKPath).read())
# 2. Retrieve handles to APK and its dex code
self.APK = analysisSession.analyzed_apk.values()[0]
self.DEX = analysisSession.analyzed_dex.values()[0][0]
self.VMAnalysis = analysisSession.analyzed_dex.values()[0][1]
# 3. Retrieve information for each activity
prettyPrint("Analyzing activities")
self.activitiesInfo = analyzeActivities(self.APK, self.DEX)
# 4. Do the same for services and broadcast receivers
prettyPrint("Analyzing services")
self.servicesInfo = analyzeServices(self.APK, self.DEX)
prettyPrint("Analyzing broadcast receivers")
self.receiversInfo = analyzeReceivers(self.APK, self.DEX)
except Exception as e:
prettyPrintError(e)
return False
prettyPrint("Success")
return True
def generateRunnerScript(self, scriptPath="", runningTime=60):
"""Generates a python script to be run by Monkeyrunner"""
try:
# Check whether the APK has been analyzed first
if not self.APK:
prettyPrint("APK needs to be analyzed first", "warning")
return False
self.runnerScript = "%s/files/scripts/%s.py" % (getProjectDir(), getRandomAlphaNumeric()) if scriptPath == "" else scriptPath
print self.runnerScript
monkeyScript = open(self.runnerScript, "w")
# Preparation
monkeyScript.write("#!/usr/bin/python\n\n")
monkeyScript.write("from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice\n")
monkeyScript.write("import time, os, random\n\n")
monkeyScript.write("keyEvents = %s\n" % keyEvents)
monkeyScript.write("keyEventTypes = [MonkeyDevice.UP, MonkeyDevice.DOWN, MonkeyDevice.DOWN_AND_UP]\n")
monkeyScript.write("activityActions = %s\n" % activityActions)
monkeyScript.write("activities = %s\n" % self.activitiesInfo)
monkeyScript.write("services = %s\n" % self.servicesInfo)
monkeyScript.write("receivers = %s\n\n" % self.receiversInfo)
# Connect to the current device and install package
monkeyScript.write("print \"[*] Connecting to device.\"\n")
monkeyScript.write("device = MonkeyRunner.waitForConnection(\"[ANDROID_VIRTUAL_DEVICE_ID]\")\n")
monkeyScript.write("package = '%s'\n" % self.APK.package)
monkeyScript.write("print \"[*] Uninstalling package %s (if exists)\"\n" % self.APK.package)
monkeyScript.write("device.removePackage(package)\n")
monkeyScript.write("print \"[*] Installing package %s\"\n" % self.APK.package)
monkeyScript.write("device.installPackage('%s')\n" % self.APKPath)
# Configure introspy for hooking and monitoring
monkeyScript.write("print \"[*] Configuring Introspy\"\n")
monkeyScript.write("device.shell(\"echo 'GENERAL CRYPTO,KEY,HASH,FS,IPC,PREF,URI,WEBVIEW,SSL' > /data/data/%s/introspy.config\" % package)\n")
monkeyScript.write("device.shell(\"chmod 664 /data/data/%s/introspy.config\" % package)\n")
# Get a handle to a file to store the commands issued during runtime
monkeyScript.write("commandsFile = open(\"%s/files/scripts/%s_%s.command\", \"w\")\n" % (getProjectDir(), self.APK.package.replace('.','_'), getRandomAlphaNumeric()))
# Start app
#monkeyScript.write("mainActivity = '%s'\n" % APK.APK.get_main_activity())
#monkeyScript.write("device.startActivity(component=package + '/' + mainActivity)\n")
# Starting the fuzzing phase for [runningTime] seconds<F12>
monkeyScript.write("endTime = time.time() + %s\n" % runningTime)
monkeyScript.write("print \"[*] Fuzzing app for %s seconds\"\n" % runningTime)
monkeyScript.write("while time.time() < endTime:\n")
# 1. Choose a random component
monkeyScript.write("\tcomponentType = [\"activity\", \"service\", \"receiver\"][random.randint(0,2)]\n")
# 2.a. Activities
monkeyScript.write("\tif componentType == \"activity\":\n")
monkeyScript.write("\t\tcurrentActivity = activities.keys()[random.randint(0,len(activities)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting activity: %s\" % currentActivity\n")
monkeyScript.write("\t\tdevice.startActivity(component=package + '/' + currentActivity)\n")
monkeyScript.write("\t\tcommandsFile.write(\"device.startActivity('%s/%s')\\n\" % (package, currentActivity))\n")
# Choose an action
monkeyScript.write("\t\tcurrentAction = activityActions[random.randint(0,len(activityActions)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Current action: %s\" % currentAction\n")
# Touch in a random X,Y position on the screen
monkeyScript.write("\t\tif currentAction == \"touch\":\n")
monkeyScript.write("\t\t\twidth, height = int(device.getProperty(\"display.width\")), int(device.getProperty(\"display.height\"))\n")
monkeyScript.write("\t\t\tX, Y = random.randint(0, width-1), random.randint(0, height-1)\n")
monkeyScript.write("\t\t\tprint \"[*] Touching screen at (%s,%s)\" % (X,Y)\n")
monkeyScript.write("\t\t\teventType = keyEventTypes[random.randint(0,2)]\n")
monkeyScript.write("\t\t\tdevice.touch(X, Y, eventType)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.touch(%s, %s, %s)\\n\" % (X, Y, eventType))\n")
# Type something random
monkeyScript.write("\t\telif currentAction == \"type\":\n")
monkeyScript.write("\t\t\ttext = \"%s\"\n" % getRandomString(random.randint(0,100)))
monkeyScript.write("\t\t\tprint \"[*] Typing %s\" % text\n")
monkeyScript.write("\t\t\tdevice.type(text)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.type('%s')\\n\" % text)\n")
# Press a random key up/down
monkeyScript.write("\t\telif currentAction == \"press\":\n")
monkeyScript.write("\t\t\taction = keyEvents[random.randint(0, len(keyEvents)-1)]\n")
monkeyScript.write("\t\t\taType = keyEventTypes[random.randint(0,2)]\n")
monkeyScript.write("\t\t\tprint \"[*] Pressing: %s as %s\" % (action, aType)\n")
monkeyScript.write("\t\t\tdevice.press(action, aType)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.press(%s, %s)\\n\" % (action, aType)) \n")
# Randomly drag the screen
monkeyScript.write("\t\telif currentAction == \"drag\":\n")
monkeyScript.write("\t\t\twidth, height = int(device.getProperty(\"display.width\")), int(device.getProperty(\"display.height\"))\n")
monkeyScript.write("\t\t\tstart = (random.randint(0, width-1), random.randint(0, height-1))\n")
monkeyScript.write("\t\t\tend = (random.randint(0, width-1), random.randint(0, height-1))\n")
monkeyScript.write("\t\t\tprint \"[*] Dragging screen from %s to %s\" % (start, end)\n")
monkeyScript.write("\t\t\tdevice.drag(start, end)\n")
monkeyScript.write("\t\t\tcommandsFile.write(\"device.drag(%s, %s)\\n\" % (start, end))\n")
# 2.b.Services
monkeyScript.write("\telif componentType == \"service\":\n")
monkeyScript.write("\t\tcurrentService = services.keys()[random.randint(0, len(services)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting Service: %s\" % currentService\n")
monkeyScript.write("\t\tif \"intent-filters\" in services[currentService].keys():\n")
monkeyScript.write("\t\t\tif \"action\" in services[currentService][\"intent-filters\"].keys():\n")
monkeyScript.write("\t\t\t\tintentAction = services[currentService][\"intent-filters\"][\"action\"][0]\n")
monkeyScript.write("\t\t\t\tprint \"[*] Broadcasting intent: %s\" % intentAction\n")
monkeyScript.write("\t\t\t\tdevice.broadcastIntent(currentService, intentAction)\n")
monkeyScript.write("\t\t\t\tcommandsFile.write(\"device.broadcastIntent('%s', '%s')\\n\" % (currentService, intentAction)) \n")
# 2.c. Broadcast receivers
monkeyScript.write("\telif componentType == \"receiver\":\n")
monkeyScript.write("\t\tcurrentReceiver = receivers.keys()[random.randint(0, len(receivers)-1)]\n")
monkeyScript.write("\t\tprint \"[*] Starting Receiver: %s\" % currentReceiver\n")
monkeyScript.write("\t\tif \"intent-filters\" in receivers[currentReceiver].keys():\n")
monkeyScript.write("\t\t\tif \"action\" in receivers[currentReceiver][\"intent-filters\"].keys():\n")
monkeyScript.write("\t\t\t\tintentAction = receivers[currentReceiver][\"intent-filters\"][\"action\"][0]\n")
monkeyScript.write("\t\t\t\tprint \"[*] Broadcasting intent: %s\" % intentAction\n")
monkeyScript.write("\t\t\t\tdevice.broadcastIntent(currentReceiver, intentAction)\n")
monkeyScript.write("\t\t\t\tcommandsFile.write(\"device.broadcastIntent('%s', '%s')\\n\" % (currentReceiver, intentAction))\n")
# Sleep for 0.5 a second
monkeyScript.write("\ttime.sleep(1)\n")
# Uninstall package (Still need to fetch the introspy.db file from app directory before uninstallation)
#monkeyScript.write("device.removePackage(package)\n")
monkeyScript.write("commandsFile.close()")
except Exception as e:
prettyPrintError(e)
return False
return True
def analyzeActivities(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve the elements within every activity """
try:
info = {}
for activity in APK.get_activities():
info[activity] = {}
# 1. Add the intent filters
info[activity]["intent-filters"] = APK.get_intent_filters("activity", activity)
# 2. Get all classes belonging to current activity
allClasses, tempList, layoutFiles = DEX.get_classes(), [], []
# 2.a. Get all classes that inherit class "Activity" i.e. corresponding to an activity
for c in allClasses:
if c.get_superclassname().lower().find("activity") != -1:
tempList.append(c)
# 2.b. Get classes belonging to CURRENT activity
info[activity]["classes"] = []
for c in tempList:
if c.get_name()[1:-1].replace('/','.') == activity:
info[activity]["classes"].append(c)
if loggingON():
prettyPrint("Activity: %s, class: %s" % (activity, c), "debug")
# 3. Get UI elements in every activity
# 3.a. Identify the layout file's ID in the class' setContentView function call
if len(info[activity]["classes"]) < 1:
prettyPrint("Could not retrieve any Activity classes. Skipping", "warning")
continue
source = info[activity]["classes"][0].get_source()
info[activity].pop("classes") # TODO: Do we really need a reference to the class object?
index1 = source.find("void onCreate(")
index2 = source.find("setContentView(", index1) + len("setContentView(")
layoutID = ""
while str.isdigit(source[index2]):
layoutID += source[index2]
index2 += 1
# layoutID retrieved?
if len(layoutID) < 1:
prettyPrint("Could not retrieve layout ID from activity class. Skipping", "warning")
continue
# 3.b. Look for the corresponding layout name in the R$layout file
layoutClass = DEX.get_class(str("L%s/R$layout;" % APK.package.replace('.','/')))
if layoutClass:
layoutContent = layoutClass.get_source()
eIndex = layoutContent.find(layoutID)
sIndex = layoutContent.rfind("int", 0, eIndex)
layoutName = layoutContent[sIndex+len("int"):eIndex].replace(' ','').replace('=','')
else:
# No layout class was found: Check the public.xml file
prettyPrint("Could not find a \"R$layout\" class. Checking \"public.xml\"", "warning")
apkResources = APK.get_android_resources()
publicResources = apkResources.get_public_resources(APK.package).split('\n')
layoutIDHex = hex(int(layoutID))
for line in publicResources:
if line.find(layoutIDHex) != -1:
sIndex = line.find("name=\"") + len("name=\"")
eIndex = line.find("\"", sIndex)
layoutName = line[sIndex:eIndex]
# 3.c. Retrieve layout file and get XML object
if len(layoutName) < 1:
prettyPrint("Could not retrieve a layout file for \"%s\". Skipping" % activity, "warning")
else:
if loggingON():
prettyPrint("Retrieving UI elements from %s.xml" % layoutName, "debug")
info[activity]["elements"] = _parseActivityLayout("res/layout/%s.xml" % layoutName, APK)
except Exception as e:
prettyPrintError(e)
return {}
return info
def analyzeServices(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve information about an app's services """
try:
info = {}
for service in APK.get_services():
info[service] = {}
info[service]["intent-filters"] = APK.get_intent_filters("service", service)
except Exception as e:
prettyPrintError(e)
return {}
return info
def analyzeReceivers(APK, DEX):
""" Analyzes the passed APK and DEX objects to retrieve information about an app's broadcast receivers """
try:
info = {}
for receiver in APK.get_receivers():
info[receiver] = {}
info[receiver]["intent-filters"] = APK.get_intent_filters("receiver", receiver)
except Exception as e:
prettyPrintError(e)
return {}
return info
def _parseActivityLayout(layoutFilePath, APK):
""" Parses an XML layout file of an activity and returns information about the found elements """
try:
elements = {}
# Read the contents of the layout file
activityXML = AXMLPrinter(APK.get_file(layoutFilePath)).get_xml_obj()
logEvent("Parsing the XML layout %s" % layoutFilePath)
# Iterate over the elements and parse them
for currentNode in activityXML.firstChild.childNodes:
if currentNode.nodeName == "Button" or currentNode.nodeName == "ImageButton" or currentNode.nodeName == "RadioButton":
# Handling buttons
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:clickable" in currentNode.attributes.keys():
attr["clickable"] = currentNode.attributes["android:clickable"].value
if "android:longClickable" in currentNode.attributes.keys():
attr["longclickable"] = currentNode.attributes["android:longClickable"].value
elements[eID] = attr
elif currentNode.nodeName == "CheckBox" or currentNode.nodeName == "CheckedTextView":
# Handling checkbox-like elements
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:checked" in currentNode.attributes.keys():
attr["checked"] = currentNode.attributes["android:checked"].value
elements[eID] = attr
elif currentNode.nodeName == "DatePicker":
# Handling date pickers
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:minDate" in currentNode.attributes.keys():
attr["mindate"] = currentNode.attributes["android:minDate"]
if "android:maxDate" in currentNode.attributes.keys():
attr["maxDate"] = currentNode.attributes["android:maxDate"]
elements[eID] = attr
elif currentNode.nodeName == "EditText":
# Handling edit texts
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:editable" in currentNode.attributes.keys():
attr["editable"] = currentNode.attributes["android:editable"]
if "android:inputType" in currentNode.attributes.keys():
attr["inputtype"] = currentNode.attributes["android:inputType"]
elements[eID] = attr
#elif currentNode.nodeName == "NumberPicker":
elif currentNode.nodeName == "RadioGroup":
# Handle radio group
# 1. Get radio buttons
buttons = currentNode.childNodes
for button in buttons:
attr = {}
eID = currentNode.attributes["android:id"].value
attr["type"] = currentNode.nodeName
if "android:onClick" in currentNode.attributes.keys():
attr["onclick"] = currentNode.attributes["android:onClick"].value
if "android:visibility" in currentNode.attributes.keys():
attr["visibility"] = currentNode.attributes["android:visibility"].value
if "android:clickable" in currentNode.attributes.keys():
attr["clickable"] = currentNode.attributes["android:clickable"].value
if "android:longClickable" in currentNode.attributes.keys():
attr["longclickable"] = currentNode.attributes["android:longClickable"].value
elements[eID] = attr
#elif currentNode.nodeName == "Spinner":
except Exception as e:
prettyPrintError(e)
return {}
return elements | unknown | codeparrot/codeparrot-clean | ||
"""
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from . import BASE_URL
from ..common.utils import click_css, confirm_prompt
from .utils import type_in_codemirror
class ContainerPage(PageObject):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/js/xblock/core.js which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
self.q(css='div.ui-loading.is-hidden').present and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def sidebar_visibility_message(self):
"""
Returns the text within the sidebar visibility section.
"""
return self.q(css='.bit-publishing .wrapper-visibility').first.text[0]
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
for attr in self.q(css='a.action-staff-lock>i').attrs('class'):
if 'fa-check-square-o' in attr:
return True
return False
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def preview(self):
"""
Clicks "Preview", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, 'a.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, 'a.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view')
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
def get_category_tab_names(self, category_type):
"""
Returns list of tab name in a category.
Arguments:
category_type (str): category type
Returns:
list
"""
self.q(css='.add-xblock-component-button[data-type={}]'.format(category_type)).first.click()
return self.q(css='.{}-type-tabs>li>a'.format(category_type)).text
def get_category_tab_components(self, category_type, tab_index):
"""
Return list of component names in a tab in a category.
Arguments:
category_type (str): category type
tab_index (int): tab index in a category
Returns:
list
"""
css = '#tab{tab_index} button[data-category={category_type}] span'.format(
tab_index=tab_index,
category_type=category_type
)
return self.q(css=css).html
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
VALIDATION_SELECTOR = '.xblock-message.validation'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'settings_tab': '.editor-modes .settings-button',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def author_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
(For blocks which implement a distinct author_view).
"""
return self.q(css=self._bounded_selector('.xblock-author_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
@property
def has_validation_message(self):
""" Is a validation warning/error/message shown? """
return self.q(css=self._bounded_selector(self.VALIDATION_SELECTOR)).present
def _validation_paragraph(self, css_class):
""" Helper method to return the <p> element of a validation warning """
return self.q(css=self._bounded_selector('{} p.{}'.format(self.VALIDATION_SELECTOR, css_class)))
@property
def has_validation_warning(self):
""" Is a validation warning shown? """
return self._validation_paragraph('warning').present
@property
def has_validation_error(self):
""" Is a validation error shown? """
return self._validation_paragraph('error').present
@property
# pylint: disable=invalid-name
def has_validation_not_configured_warning(self):
""" Is a validation "not configured" message shown? """
return self._validation_paragraph('not-configured').present
@property
def validation_warning_text(self):
""" Get the text of the validation warning. """
return self._validation_paragraph('warning').text[0]
@property
def validation_error_text(self):
""" Get the text of the validation error. """
return self._validation_paragraph('error').text[0]
@property
def validation_error_messages(self):
return self.q(css=self._bounded_selector('{} .xblock-message-item.error'.format(self.VALIDATION_SELECTOR))).text
@property
# pylint: disable=invalid-name
def validation_not_configured_warning_text(self):
""" Get the text of the validation "not configured" message. """
return self._validation_paragraph('not-configured').text[0]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
@property
def has_group_visibility_set(self):
return self.q(css=self._bounded_selector('.wrapper-xblock.has-group-visibility-set')).is_present()
@property
def has_duplicate_button(self):
"""
Returns true if this xblock has a 'duplicate' button
"""
return self.q(css=self._bounded_selector('a.duplicate-button'))
@property
def has_delete_button(self):
"""
Returns true if this xblock has a 'delete' button
"""
return self.q(css=self._bounded_selector('a.delete-button'))
@property
def has_edit_visibility_button(self):
"""
Returns true if this xblock has an 'edit visibility' button
:return:
"""
return self.q(css=self._bounded_selector('.visibility-button')).is_present()
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view', self._bounded_selector)
def edit_visibility(self):
"""
Clicks the edit visibility button for this xblock.
"""
return _click_edit(self, '.visibility-button', '.xblock-visibility_view', self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def open_settings_tab(self):
"""
If editing, click on the "Settings" tab
"""
self._click_button('settings_tab')
def set_field_val(self, field_display_name, field_value):
"""
If editing, set the value of a field.
"""
selector = '{} li.field label:contains("{}") + input'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).val(arguments[1]).change();"
self.browser.execute_script(script, selector, field_value)
def reset_field_val(self, field_display_name):
"""
If editing, reset the value of a field to its default.
"""
scope = '{} li.field label:contains("{}")'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).siblings('.setting-clear').click();"
self.browser.execute_script(script, scope)
def set_codemirror_text(self, text, index=0):
"""
Set the text of a CodeMirror editor that is part of this xblock's settings.
"""
type_in_codemirror(self, index, text, find_prefix='$("{}").find'.format(self.editor_selector))
def set_license(self, license_type):
"""
Uses the UI to set the course's license to the given license_type (str)
"""
css_selector = (
"ul.license-types li[data-license={license_type}] button"
).format(license_type=license_type)
self.wait_for_element_presence(
css_selector,
"{license_type} button is present".format(license_type=license_type)
)
self.q(css=css_selector).click()
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
def is_placeholder(self):
"""
Checks to see if the XBlock is rendered as a placeholder without a preview.
"""
return not self.q(css=self._bounded_selector('.wrapper-xblock article')).present
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, button_css, view_css, bounded_selector=lambda(x): x):
"""
Click on the first editing button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector(button_css)).first.click()
EmptyPromise(
lambda: page_object.q(css=view_css).present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object | unknown | codeparrot/codeparrot-clean | ||
/* tslint:disable */
/* eslint-disable */
export const memory: WebAssembly.Memory;
export function greeting(a: number, b: number, c: number): void;
export function __wbindgen_add_to_stack_pointer(a: number): number;
export function __wbindgen_malloc(a: number): number;
export function __wbindgen_realloc(a: number, b: number, c: number): number;
export function __wbindgen_free(a: number, b: number): void; | typescript | github | https://github.com/webpack/webpack | examples/wasm-bindgen-esm/pkg/hi_wasm_bg.wasm.d.ts |
# Copyright 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import print_function
import six
from .utils import Message, Spec
class Validator(object):
def __init__(self, scheme=None):
self._path = []
self.scheme = scheme
self.messages = []
self.passed = False
def run(self, data):
if not self.scheme:
return True
self._reset()
self._path.append('block')
self._check(data, self.scheme)
self._path.pop()
return self.passed
def _reset(self):
del self.messages[:]
del self._path[:]
self.passed = True
def _check(self, data, scheme):
if not data or not isinstance(data, dict):
self._error('Empty data or not a dict')
return
if isinstance(scheme, dict):
self._check_dict(data, scheme)
else:
self._check_var_key_dict(data, *scheme)
def _check_var_key_dict(self, data, key_type, value_scheme):
for key, value in six.iteritems(data):
if not isinstance(key, key_type):
self._error('Key type {!r} for {!r} not in valid types'.format(
type(value).__name__, key))
if isinstance(value_scheme, Spec):
self._check_dict(value, value_scheme)
elif not isinstance(value, value_scheme):
self._error('Value type {!r} for {!r} not in valid types'.format(
type(value).__name__, key))
def _check_dict(self, data, scheme):
for key, (types_, required, item_scheme) in six.iteritems(scheme):
try:
value = data[key]
except KeyError:
if required:
self._error('Missing required entry {!r}'.format(key))
continue
self._check_value(value, types_, item_scheme, label=key)
for key in set(data).difference(scheme):
self._warn('Ignoring extra key {!r}'.format(key))
def _check_list(self, data, scheme, label):
for i, item in enumerate(data):
self._path.append('{}[{}]'.format(label, i))
self._check(item, scheme)
self._path.pop()
def _check_value(self, value, types_, item_scheme, label):
if not isinstance(value, types_):
self._error('Value type {!r} for {!r} not in valid types'.format(
type(value).__name__, label))
if item_scheme:
if isinstance(value, list):
self._check_list(value, item_scheme, label)
elif isinstance(value, dict):
self._check(value, item_scheme)
def _error(self, msg):
self.messages.append(Message('.'.join(self._path), 'error', msg))
self.passed = False
def _warn(self, msg):
self.messages.append(Message('.'.join(self._path), 'warn', msg)) | unknown | codeparrot/codeparrot-clean | ||
"""Support for device tracking via Xfinity Gateways."""
import logging
from requests.exceptions import RequestException
import voluptuous as vol
from xfinity_gateway import XfinityGateway
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "10.0.0.1"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string}
)
def get_scanner(hass, config):
"""Validate the configuration and return an Xfinity Gateway scanner."""
gateway = XfinityGateway(config[DOMAIN][CONF_HOST])
scanner = None
try:
gateway.scan_devices()
scanner = XfinityDeviceScanner(gateway)
except (RequestException, ValueError):
_LOGGER.error(
"Error communicating with Xfinity Gateway. " "Check host: %s", gateway.host
)
return scanner
class XfinityDeviceScanner(DeviceScanner):
"""This class queries an Xfinity Gateway."""
def __init__(self, gateway):
"""Initialize the scanner."""
self.gateway = gateway
def scan_devices(self):
"""Scan for new devices and return a list of found MACs."""
connected_devices = []
try:
connected_devices = self.gateway.scan_devices()
except (RequestException, ValueError):
_LOGGER.error("Unable to scan devices. " "Check connection to gateway")
return connected_devices
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self.gateway.get_device_name(device) | unknown | codeparrot/codeparrot-clean | ||
//! Support inheriting generic parameters and predicates for function delegation.
//!
//! For more information about delegation design, see the tracking issue #118212.
use rustc_data_structures::debug_assert_matches;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::ty::{
self, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
use rustc_span::{ErrorGuaranteed, Span};
type RemapTable = FxHashMap<u32, u32>;
struct ParamIndexRemapper<'tcx> {
tcx: TyCtxt<'tcx>,
remap_table: RemapTable,
}
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ParamIndexRemapper<'tcx> {
fn cx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
if !ty.has_param() {
return ty;
}
if let ty::Param(param) = ty.kind()
&& let Some(index) = self.remap_table.get(¶m.index)
{
return Ty::new_param(self.tcx, *index, param.name);
}
ty.super_fold_with(self)
}
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
if let ty::ReEarlyParam(param) = r.kind()
&& let Some(index) = self.remap_table.get(¶m.index).copied()
{
return ty::Region::new_early_param(
self.tcx,
ty::EarlyParamRegion { index, name: param.name },
);
}
r
}
fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
if let ty::ConstKind::Param(param) = ct.kind()
&& let Some(idx) = self.remap_table.get(¶m.index)
{
let param = ty::ParamConst::new(*idx, param.name);
return ty::Const::new_param(self.tcx, param);
}
ct.super_fold_with(self)
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum FnKind {
Free,
AssocInherentImpl,
AssocTrait,
AssocTraitImpl,
}
fn fn_kind<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> FnKind {
debug_assert_matches!(tcx.def_kind(def_id), DefKind::Fn | DefKind::AssocFn);
let parent = tcx.parent(def_id);
match tcx.def_kind(parent) {
DefKind::Trait => FnKind::AssocTrait,
DefKind::Impl { of_trait: true } => FnKind::AssocTraitImpl,
DefKind::Impl { of_trait: false } => FnKind::AssocInherentImpl,
_ => FnKind::Free,
}
}
/// Given the current context(caller and callee `FnKind`), it specifies
/// the policy of predicates and generic parameters inheritance.
#[derive(Clone, Copy, Debug, PartialEq)]
enum InheritanceKind {
/// Copying all predicates and parameters, including those of the parent
/// container.
///
/// Boolean value defines whether the `Self` parameter or `Self: Trait`
/// predicate are copied. It's always equal to `false` except when
/// delegating from a free function to a trait method.
///
/// FIXME(fn_delegation): This often leads to type inference
/// errors. Support providing generic arguments or restrict use sites.
WithParent(bool),
/// The trait implementation should be compatible with the original trait.
/// Therefore, for trait implementations only the method's own parameters
/// and predicates are copied.
Own,
}
fn build_generics<'tcx>(
tcx: TyCtxt<'tcx>,
sig_id: DefId,
parent: Option<DefId>,
inh_kind: InheritanceKind,
) -> ty::Generics {
let mut own_params = vec![];
let sig_generics = tcx.generics_of(sig_id);
if let InheritanceKind::WithParent(has_self) = inh_kind
&& let Some(parent_def_id) = sig_generics.parent
{
let sig_parent_generics = tcx.generics_of(parent_def_id);
own_params.append(&mut sig_parent_generics.own_params.clone());
if !has_self {
own_params.remove(0);
}
}
own_params.append(&mut sig_generics.own_params.clone());
// Lifetime parameters must be declared before type and const parameters.
// Therefore, When delegating from a free function to a associated function,
// generic parameters need to be reordered:
//
// trait Trait<'a, A> {
// fn foo<'b, B>(...) {...}
// }
//
// reuse Trait::foo;
// desugaring:
// fn foo<'a, 'b, This: Trait<'a, A>, A, B>(...) {
// Trait::foo(...)
// }
own_params.sort_by_key(|key| key.kind.is_ty_or_const());
let (parent_count, has_self) = if let Some(def_id) = parent {
let parent_generics = tcx.generics_of(def_id);
let parent_kind = tcx.def_kind(def_id);
(parent_generics.count(), parent_kind == DefKind::Trait)
} else {
(0, false)
};
for (idx, param) in own_params.iter_mut().enumerate() {
param.index = (idx + parent_count) as u32;
// FIXME(fn_delegation): Default parameters are not inherited, because they are
// not permitted in functions. Therefore, there are 2 options here:
//
// - We can create non-default generic parameters.
// - We can substitute default parameters into the signature.
//
// At the moment, first option has been selected as the most general.
if let ty::GenericParamDefKind::Type { has_default, .. }
| ty::GenericParamDefKind::Const { has_default, .. } = &mut param.kind
{
*has_default = false;
}
}
let param_def_id_to_index =
own_params.iter().map(|param| (param.def_id, param.index)).collect();
ty::Generics {
parent,
parent_count,
own_params,
param_def_id_to_index,
has_self,
has_late_bound_regions: sig_generics.has_late_bound_regions,
}
}
fn build_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
sig_id: DefId,
parent: Option<DefId>,
inh_kind: InheritanceKind,
args: ty::GenericArgsRef<'tcx>,
) -> ty::GenericPredicates<'tcx> {
struct PredicatesCollector<'tcx> {
tcx: TyCtxt<'tcx>,
preds: Vec<(ty::Clause<'tcx>, Span)>,
args: ty::GenericArgsRef<'tcx>,
}
impl<'tcx> PredicatesCollector<'tcx> {
fn new(tcx: TyCtxt<'tcx>, args: ty::GenericArgsRef<'tcx>) -> PredicatesCollector<'tcx> {
PredicatesCollector { tcx, preds: vec![], args }
}
fn with_own_preds(
mut self,
f: impl Fn(DefId) -> ty::GenericPredicates<'tcx>,
def_id: DefId,
) -> Self {
let preds = f(def_id).instantiate_own(self.tcx, self.args);
self.preds.extend(preds);
self
}
fn with_preds(
mut self,
f: impl Fn(DefId) -> ty::GenericPredicates<'tcx> + Copy,
def_id: DefId,
) -> Self {
let preds = f(def_id);
if let Some(parent_def_id) = preds.parent {
self = self.with_own_preds(f, parent_def_id);
}
self.with_own_preds(f, def_id)
}
}
let collector = PredicatesCollector::new(tcx, args);
// `explicit_predicates_of` is used here to avoid copying `Self: Trait` predicate.
// Note: `predicates_of` query can also add inferred outlives predicates, but that
// is not the case here as `sig_id` is either a trait or a function.
let preds = match inh_kind {
InheritanceKind::WithParent(false) => {
collector.with_preds(|def_id| tcx.explicit_predicates_of(def_id), sig_id)
}
InheritanceKind::WithParent(true) => {
collector.with_preds(|def_id| tcx.predicates_of(def_id), sig_id)
}
InheritanceKind::Own => {
collector.with_own_preds(|def_id| tcx.predicates_of(def_id), sig_id)
}
}
.preds;
ty::GenericPredicates { parent, predicates: tcx.arena.alloc_from_iter(preds) }
}
fn build_generic_args<'tcx>(
tcx: TyCtxt<'tcx>,
sig_id: DefId,
def_id: LocalDefId,
args: ty::GenericArgsRef<'tcx>,
) -> ty::GenericArgsRef<'tcx> {
let caller_generics = tcx.generics_of(def_id);
let callee_generics = tcx.generics_of(sig_id);
let mut remap_table = FxHashMap::default();
for caller_param in &caller_generics.own_params {
let callee_index = callee_generics.param_def_id_to_index(tcx, caller_param.def_id).unwrap();
remap_table.insert(callee_index, caller_param.index);
}
let mut folder = ParamIndexRemapper { tcx, remap_table };
args.fold_with(&mut folder)
}
fn create_generic_args<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
sig_id: DefId,
) -> ty::GenericArgsRef<'tcx> {
let caller_kind = fn_kind(tcx, def_id.into());
let callee_kind = fn_kind(tcx, sig_id);
match (caller_kind, callee_kind) {
(FnKind::Free, FnKind::Free)
| (FnKind::Free, FnKind::AssocTrait)
| (FnKind::AssocInherentImpl, FnKind::Free)
| (FnKind::AssocTrait, FnKind::Free)
| (FnKind::AssocTrait, FnKind::AssocTrait) => {
let args = ty::GenericArgs::identity_for_item(tcx, sig_id);
build_generic_args(tcx, sig_id, def_id, args)
}
(FnKind::AssocTraitImpl, FnKind::AssocTrait) => {
let callee_generics = tcx.generics_of(sig_id);
let parent = tcx.parent(def_id.into());
let parent_args = tcx.impl_trait_header(parent).trait_ref.instantiate_identity().args;
let trait_args = ty::GenericArgs::identity_for_item(tcx, sig_id);
let method_args = tcx.mk_args(&trait_args[callee_generics.parent_count..]);
let method_args = build_generic_args(tcx, sig_id, def_id, method_args);
tcx.mk_args_from_iter(parent_args.iter().chain(method_args))
}
(FnKind::AssocInherentImpl, FnKind::AssocTrait) => {
let parent = tcx.parent(def_id.into());
let self_ty = tcx.type_of(parent).instantiate_identity();
let generic_self_ty = ty::GenericArg::from(self_ty);
let trait_args = ty::GenericArgs::identity_for_item(tcx, sig_id);
let trait_args = build_generic_args(tcx, sig_id, def_id, trait_args);
let args = std::iter::once(generic_self_ty).chain(trait_args.iter().skip(1));
tcx.mk_args_from_iter(args)
}
// For trait impl's `sig_id` is always equal to the corresponding trait method.
// For inherent methods delegation is not yet supported.
(FnKind::AssocTraitImpl, _)
| (_, FnKind::AssocTraitImpl)
| (_, FnKind::AssocInherentImpl) => unreachable!(),
}
}
// FIXME(fn_delegation): Move generics inheritance to the AST->HIR lowering.
// For now, generic parameters are not propagated to the generated call,
// which leads to inference errors:
//
// fn foo<T>(x: i32) {}
//
// reuse foo as bar;
// desugaring:
// fn bar<T>() {
// foo::<_>() // ERROR: type annotations needed
// }
pub(crate) fn inherit_generics_for_delegation_item<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
sig_id: DefId,
) -> ty::Generics {
let caller_kind = fn_kind(tcx, def_id.into());
let callee_kind = fn_kind(tcx, sig_id);
match (caller_kind, callee_kind) {
(FnKind::Free, FnKind::Free) | (FnKind::Free, FnKind::AssocTrait) => {
build_generics(tcx, sig_id, None, InheritanceKind::WithParent(true))
}
(FnKind::AssocTraitImpl, FnKind::AssocTrait) => {
build_generics(tcx, sig_id, Some(tcx.parent(def_id.into())), InheritanceKind::Own)
}
(FnKind::AssocInherentImpl, FnKind::AssocTrait)
| (FnKind::AssocTrait, FnKind::AssocTrait)
| (FnKind::AssocInherentImpl, FnKind::Free)
| (FnKind::AssocTrait, FnKind::Free) => build_generics(
tcx,
sig_id,
Some(tcx.parent(def_id.into())),
InheritanceKind::WithParent(false),
),
// For trait impl's `sig_id` is always equal to the corresponding trait method.
// For inherent methods delegation is not yet supported.
(FnKind::AssocTraitImpl, _)
| (_, FnKind::AssocTraitImpl)
| (_, FnKind::AssocInherentImpl) => unreachable!(),
}
}
pub(crate) fn inherit_predicates_for_delegation_item<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
sig_id: DefId,
) -> ty::GenericPredicates<'tcx> {
let args = create_generic_args(tcx, def_id, sig_id);
let caller_kind = fn_kind(tcx, def_id.into());
let callee_kind = fn_kind(tcx, sig_id);
match (caller_kind, callee_kind) {
(FnKind::Free, FnKind::Free) | (FnKind::Free, FnKind::AssocTrait) => {
build_predicates(tcx, sig_id, None, InheritanceKind::WithParent(true), args)
}
(FnKind::AssocTraitImpl, FnKind::AssocTrait) => build_predicates(
tcx,
sig_id,
Some(tcx.parent(def_id.into())),
InheritanceKind::Own,
args,
),
(FnKind::AssocInherentImpl, FnKind::AssocTrait)
| (FnKind::AssocTrait, FnKind::AssocTrait)
| (FnKind::AssocInherentImpl, FnKind::Free)
| (FnKind::AssocTrait, FnKind::Free) => build_predicates(
tcx,
sig_id,
Some(tcx.parent(def_id.into())),
InheritanceKind::WithParent(false),
args,
),
// For trait impl's `sig_id` is always equal to the corresponding trait method.
// For inherent methods delegation is not yet supported.
(FnKind::AssocTraitImpl, _)
| (_, FnKind::AssocTraitImpl)
| (_, FnKind::AssocInherentImpl) => unreachable!(),
}
}
fn check_constraints<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
sig_id: DefId,
) -> Result<(), ErrorGuaranteed> {
let mut ret = Ok(());
let mut emit = |descr| {
ret = Err(tcx.dcx().emit_err(crate::errors::UnsupportedDelegation {
span: tcx.def_span(def_id),
descr,
callee_span: tcx.def_span(sig_id),
}));
};
if tcx.fn_sig(sig_id).skip_binder().skip_binder().c_variadic {
// See issue #127443 for explanation.
emit("delegation to C-variadic functions is not allowed");
}
ret
}
pub(crate) fn inherit_sig_for_delegation_item<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
) -> &'tcx [Ty<'tcx>] {
let sig_id = tcx.hir_opt_delegation_sig_id(def_id).unwrap();
let caller_sig = tcx.fn_sig(sig_id);
if let Err(err) = check_constraints(tcx, def_id, sig_id) {
let sig_len = caller_sig.instantiate_identity().skip_binder().inputs().len() + 1;
let err_type = Ty::new_error(tcx, err);
return tcx.arena.alloc_from_iter((0..sig_len).map(|_| err_type));
}
let args = create_generic_args(tcx, def_id, sig_id);
// Bound vars are also inherited from `sig_id`.
// They will be rebound later in `lower_fn_ty`.
let sig = caller_sig.instantiate(tcx, args).skip_binder();
let sig_iter = sig.inputs().iter().cloned().chain(std::iter::once(sig.output()));
tcx.arena.alloc_from_iter(sig_iter)
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_hir_analysis/src/delegation.rs |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright(c)2013 NTT corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Unit tests for websocketproxy """
import os
import logging
import select
import shutil
import stubout
import subprocess
import tempfile
import time
import unittest
from websockify import websocketproxy
class MockSocket(object):
def __init__(*args, **kwargs):
pass
def shutdown(*args):
pass
def close(*args):
pass
class WebSocketProxyTest(unittest.TestCase):
def _init_logger(self, tmpdir):
name = 'websocket-unittest'
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = True
filename = "%s.log" % (name)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(handler)
def setUp(self):
"""Called automatically before each test."""
super(WebSocketProxyTest, self).setUp()
self.soc = ''
self.stubs = stubout.StubOutForTesting()
# Temporary dir for test data
self.tmpdir = tempfile.mkdtemp()
# Put log somewhere persistent
self._init_logger('./')
# Mock this out cause it screws tests up
self.stubs.Set(os, 'chdir', lambda *args, **kwargs: None)
def tearDown(self):
"""Called automatically after each test."""
self.stubs.UnsetAll()
shutil.rmtree(self.tmpdir)
super(WebSocketProxyTest, self).tearDown()
def _get_websockproxy(self, **kwargs):
return websocketproxy.WebSocketProxy(key=self.tmpdir,
web=self.tmpdir,
record=self.tmpdir,
**kwargs)
def test_run_wrap_cmd(self):
web_socket_proxy = self._get_websockproxy()
web_socket_proxy.__dict__["wrap_cmd"] = "wrap_cmd"
def mock_Popen(*args, **kwargs):
return '_mock_cmd'
self.stubs.Set(subprocess, 'Popen', mock_Popen)
web_socket_proxy.run_wrap_cmd()
self.assertEquals(web_socket_proxy.spawn_message, True)
def test_started(self):
web_socket_proxy = self._get_websockproxy()
web_socket_proxy.__dict__["spawn_message"] = False
web_socket_proxy.__dict__["wrap_cmd"] = "wrap_cmd"
def mock_run_wrap_cmd(*args, **kwargs):
web_socket_proxy.__dict__["spawn_message"] = True
self.stubs.Set(web_socket_proxy, 'run_wrap_cmd', mock_run_wrap_cmd)
web_socket_proxy.started()
self.assertEquals(web_socket_proxy.__dict__["spawn_message"], True)
def test_poll(self):
web_socket_proxy = self._get_websockproxy()
web_socket_proxy.__dict__["wrap_cmd"] = "wrap_cmd"
web_socket_proxy.__dict__["wrap_mode"] = "respawn"
web_socket_proxy.__dict__["wrap_times"] = [99999999]
web_socket_proxy.__dict__["spawn_message"] = True
web_socket_proxy.__dict__["cmd"] = None
self.stubs.Set(time, 'time', lambda: 100000000.000)
web_socket_proxy.poll()
self.assertEquals(web_socket_proxy.spawn_message, False)
def test_new_client(self):
web_socket_proxy = self._get_websockproxy()
web_socket_proxy.__dict__["verbose"] = "verbose"
web_socket_proxy.__dict__["daemon"] = None
web_socket_proxy.__dict__["client"] = "client"
self.stubs.Set(web_socket_proxy, 'socket', MockSocket)
def mock_select(*args, **kwargs):
ins = None
outs = None
excepts = "excepts"
return ins, outs, excepts
self.stubs.Set(select, 'select', mock_select)
self.assertRaises(Exception, web_socket_proxy.new_websocket_client) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# I, Adam Olsen, am the original author of this work. I hereby
# donate it into the public domain, and relinquish any rights I
# may have in it.
#
# I, Behdad Esfahbod, hereby disclaim any rights for my contributions
# to this code.
from __future__ import division
import sys
import cairo
import pygtk
pygtk.require('2.0')
import gtk
import gtk.gdk
import pango
import gobject
def generate_modes():
for align_desc, align in [('left', pango.ALIGN_LEFT),
('center', pango.ALIGN_CENTER), ('right', pango.ALIGN_RIGHT)]:
for extent_desc, extentindex in [('logical', 1), ('ink', 0)]:
for name in ['line', 'run', 'cluster', 'char']:
if name == 'char' and extent_desc == 'ink':
continue
desc = '%s %s %s' % (align_desc, extent_desc, name)
yield extentindex, name, align, desc
class ExtentDemo(gtk.Widget):
def __init__(self, text="""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor.\n\tسلامی چو بوی خوشِ آشنایی... بر آن ملّتِ دیرامدام دیرام..."""):
gtk.Widget.__init__(self)
self.text = "foo"
self.text = text
self.all_modes = list(generate_modes())
self.mode_num = 0;
self.x_margin = 5
self.y_margin = 5
self.x_offset = 0
self.y_offset = 25
self.font_size = 36
def do_realize(self):
self.set_flags(self.flags() | gtk.REALIZED)
self.window = gtk.gdk.Window(
self.get_parent_window(),
width=self.allocation.width,
height=self.allocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
event_mask=self.get_events() | gtk.gdk.EXPOSURE_MASK)
self.window.set_user_data(self)
self.style.attach(self.window)
self.style.set_background(self.window, gtk.STATE_NORMAL)
self.window.move_resize(*self.allocation)
def do_unrealize(self):
self.window.destroy()
def do_size_request(self, requisition):
width = 800
layout = self.get_layout(self.get_pango_context())
layout.set_width (pango.SCALE * (width - (self.x_offset + 2 * self.x_margin)))
height = layout.get_pixel_extents ()[1][3] + (self.y_offset + 2 * self.y_margin)
requisition.width = width
requisition.height = height
def do_expose_event(self, event):
context = self.window.cairo_create()
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
pangocontext = self.get_pango_context()
self.draw(context, pangocontext)
return False
def get_layout (self, pangocontext):
font = pango.FontDescription()
font.set_family("sans")
font.set_size(self.font_size * pango.SCALE)
layout = pango.Layout(pangocontext)
layout.set_font_description(font)
layout.set_text(self.text)
return layout
def draw(self, context, pangocontext):
context.set_source_rgb (1, 1, 1)
context.paint()
context.set_source_rgb (0, 0, 0)
context.translate (self.x_margin, self.y_margin)
extentindex, name, align, desc = self.all_modes[self.mode_num]
labellayout = pango.Layout(pangocontext)
labellayout.set_text('%i: %s' % (self.mode_num + 1, desc))
context.move_to(0, 0)
context.show_layout(labellayout)
context.translate (self.x_offset, self.y_offset)
layout = self.get_layout (pangocontext)
width = self.allocation.width - (self.x_offset + 2 * self.x_margin)
layout.set_width(width * pango.SCALE)
layout.set_alignment(align)
context.move_to(0, 0)
#context.layout_path(layout)
#context.fill()
context.show_layout(layout)
context.set_source_rgba(1, 0, 0, 0.5)
context.set_line_width (2)
x, y, width, height = layout.get_pixel_extents()[extentindex]
context.rectangle(x-1, y-1, width+2, height+2)
context.stroke()
context.set_source_rgba(0, 1, 0, 0.7)
context.set_line_width (1)
li = layout.get_iter()
while True:
extents = getattr(li, 'get_%s_extents' % name)()
if name != 'char':
extents = extents[extentindex]
x, y, width, height = self._descale(extents)
context.rectangle(x+.5, y+.5, width-1, height-1)
context.stroke()
if not getattr(li, 'next_%s' % name)():
break
def cycle_mode_forward(self):
self.mode_num += 1
if self.mode_num >= len(self.all_modes):
self.mode_num = 0
self.queue_draw()
def cycle_mode_backward(self):
self.mode_num -= 1
if self.mode_num < 0:
self.mode_num = len(self.all_modes) - 1
self.queue_draw()
def key_press_event(self, widget, event):
if event.string == ' ' or event.keyval == gtk.keysyms.Right:
self.cycle_mode_forward()
elif event.keyval == gtk.keysyms.BackSpace or event.keyval == gtk.keysyms.Left:
self.cycle_mode_backward()
elif event.string == 'q':
gtk.main_quit()
def _descale(self, rect):
return (i / pango.SCALE for i in rect)
def run(self):
window = gtk.Window()
window.add(self)
window.connect("destroy", gtk.main_quit)
window.connect("key-press-event", self.key_press_event)
window.show_all()
gtk.main()
gobject.type_register(ExtentDemo)
def main():
if len (sys.argv) > 2:
ed = ExtentDemo(sys.argv[2])
else:
ed = ExtentDemo()
if len (sys.argv) > 1:
mode = int(sys.argv[1])
while mode > 1:
mode -= 1
ed.cycle_mode()
ed.run()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
"""
Generic NAT Port mapping interface.
TODO: Example
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from twisted.internet.base import BasePort
# Public API
def get_port_mapper(proto="TCP"):
"""
Returns a L{NATMapper} instance, suited to map a port for
the given protocol. Defaults to TCP.
For the moment, only upnp mapper is available. It accepts both UDP and TCP.
@param proto: The protocol: 'TCP' or 'UDP'
@type proto: string
@return: A deferred called with a L{NATMapper} instance
@rtype: L{twisted.internet.defer.Deferred}
"""
import nattraverso.pynupnp
return nattraverso.pynupnp.get_port_mapper()
class NATMapper:
"""
Define methods to map port objects (as returned by twisted's listenXX).
This allows NAT to be traversed from incoming packets.
Currently the only implementation of this class is the UPnP Mapper, which
can map UDP and TCP ports, if an UPnP Device exists.
"""
def __init__(self):
raise NotImplementedError("Cannot instantiate the class")
def map(self, port):
"""
Create a mapping for the given twisted's port object.
The deferred will call back with a tuple (extaddr, extport):
- extaddr: The ip string of the external ip address of this host
- extport: the external port number used to map the given Port object
When called multiple times with the same Port,
callback with the existing mapping.
@param port: The port object to map
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def info(self, port):
"""
Returns the existing mapping for the given port object. That means map()
has to be called before.
@param port: The port object to retreive info from
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@raise ValueError: When there is no such existing mapping
@return: a tuple (extaddress, extport).
@see: L{map() function<map>}
"""
raise NotImplementedError
def unmap(self, port):
"""
Remove an existing mapping for the given twisted's port object.
@param port: The port object to unmap
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with None
@rtype: L{twisted.internet.defer.Deferred}
@raise ValueError: When there is no such existing mapping
"""
raise NotImplementedError
def get_port_mappings(self):
"""
Returns a deferred that will be called with a dictionnary of the
existing mappings.
The dictionnary structure is the following:
- Keys: tuple (protocol, external_port)
- protocol is "TCP" or "UDP".
- external_port is the external port number, as see on the
WAN side.
- Values:tuple (internal_ip, internal_port)
- internal_ip is the LAN ip address of the host.
- internal_port is the internal port number mapped
to external_port.
@return: A deferred called with the above defined dictionnary
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def _check_valid_port(self, port):
"""Various Port object validity checks. Raise a ValueError."""
if not isinstance(port, BasePort):
raise ValueError("expected a Port, got %r"%(port))
if not port.connected:
raise ValueError("Port %r is not listening"%(port))
loc_addr = port.getHost()
if loc_addr.port == 0:
raise ValueError("Port %r has port number of 0"%(port)) | unknown | codeparrot/codeparrot-clean | ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { CloseButton, Popover as ChakraPopover } from "@chakra-ui/react";
import * as React from "react";
export const CloseTrigger = React.forwardRef<HTMLButtonElement, ChakraPopover.CloseTriggerProps>(
(props, ref) => (
<ChakraPopover.CloseTrigger insetEnd="1" position="absolute" top="1" {...props} asChild ref={ref}>
<CloseButton size="sm" />
</ChakraPopover.CloseTrigger>
),
); | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/components/ui/Popover/CloseTrigger.tsx |
{
"definitions": {
"ExternalsType": {
"description": "Specifies the default type of externals ('amd*', 'umd*', 'system' and 'jsonp' depend on output.libraryTarget set to the same value).",
"enum": [
"var",
"module",
"assign",
"this",
"window",
"self",
"global",
"commonjs",
"commonjs2",
"commonjs-module",
"commonjs-static",
"amd",
"amd-require",
"umd",
"umd2",
"jsonp",
"system",
"promise",
"import",
"module-import",
"script",
"node-commonjs",
"asset",
"css-import",
"css-url"
]
},
"Remotes": {
"description": "Container locations and request scopes from which modules should be resolved and loaded at runtime. When provided, property name is used as request scope, otherwise request scope is automatically inferred from container location.",
"anyOf": [
{
"type": "array",
"items": {
"description": "Container locations and request scopes from which modules should be resolved and loaded at runtime.",
"anyOf": [
{
"$ref": "#/definitions/RemotesItem"
},
{
"$ref": "#/definitions/RemotesObject"
}
]
}
},
{
"$ref": "#/definitions/RemotesObject"
}
]
},
"RemotesConfig": {
"description": "Advanced configuration for container locations from which modules should be resolved and loaded at runtime.",
"type": "object",
"additionalProperties": false,
"properties": {
"external": {
"description": "Container locations from which modules should be resolved and loaded at runtime.",
"anyOf": [
{
"$ref": "#/definitions/RemotesItem"
},
{
"$ref": "#/definitions/RemotesItems"
}
]
},
"shareScope": {
"description": "The name of the share scope shared with this remote.",
"type": "string",
"minLength": 1
}
},
"required": ["external"]
},
"RemotesItem": {
"description": "Container location from which modules should be resolved and loaded at runtime.",
"type": "string",
"minLength": 1
},
"RemotesItems": {
"description": "Container locations from which modules should be resolved and loaded at runtime.",
"type": "array",
"items": {
"$ref": "#/definitions/RemotesItem"
}
},
"RemotesObject": {
"description": "Container locations from which modules should be resolved and loaded at runtime. Property names are used as request scopes.",
"type": "object",
"additionalProperties": {
"description": "Container locations from which modules should be resolved and loaded at runtime.",
"anyOf": [
{
"$ref": "#/definitions/RemotesConfig"
},
{
"$ref": "#/definitions/RemotesItem"
},
{
"$ref": "#/definitions/RemotesItems"
}
]
}
}
},
"title": "ContainerReferencePluginOptions",
"type": "object",
"additionalProperties": false,
"properties": {
"remoteType": {
"description": "The external type of the remote containers.",
"oneOf": [
{
"$ref": "#/definitions/ExternalsType"
}
]
},
"remotes": {
"$ref": "#/definitions/Remotes"
},
"shareScope": {
"description": "The name of the share scope shared with all remotes (defaults to 'default').",
"type": "string",
"minLength": 1
}
},
"required": ["remoteType", "remotes"]
} | json | github | https://github.com/webpack/webpack | schemas/plugins/container/ContainerReferencePlugin.json |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# SoundConverter - GNOME application for converting between audio formats.
# Copyright 2004 Lars Wirzenius
# Copyright 2005-2014 Gautier Portet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import time
import gobject
class BackgroundTask:
"""A background task.
To use: derive a subclass and define the methods started, and
finished. Then call the start() method when you want to start the task.
You must call done() when the processing is finished.
Call the abort() method if you want to stop the task before it finishes
normally."""
def __init__(self):
self.running = False
self.listeners = {}
self.progress = None
def start(self):
"""Start running the task. Call started()."""
self.emit('started')
self.running = True
self.run_start_time = time.time()
def add_listener(self, signal, listener):
"""Add a custom listener to the given signal.
Signals are 'started' and 'finished'"""
if signal not in self.listeners:
self.listeners[signal] = []
self.listeners[signal].append(listener)
def emit(self, signal):
"""Call the signal handlers.
Callbacks are called as gtk idle funcs to be sure
they are in the main thread."""
gobject.idle_add(getattr(self, signal))
if signal in self.listeners:
for listener in self.listeners[signal]:
gobject.idle_add(listener, self)
def emit_sync(self, signal):
"""Call the signal handlers.
Callbacks are called synchronously."""
getattr(self, signal)()
if signal in self.listeners:
for listener in self.listeners[signal]:
listener(self)
def done(self):
"""Call to end normally the task."""
self.run_finish_time = time.time()
if self.running:
self.emit_sync('finished')
self.running = False
def abort(self):
"""Stop task processing. finished() is not called."""
self.emit('aborted')
self.running = False
def aborted(self):
"""Called when the task is aborted."""
pass
def started(self):
"""Called when the task starts."""
pass
def finished(self):
"""Clean up the task after all work has been done."""
pass | unknown | codeparrot/codeparrot-clean | ||
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import datetime
import sys
from getpass import getpass
from optparse import OptionParser
from peewee import *
from peewee import print_
from peewee import __version__ as peewee_version
from playhouse.reflection import *
TEMPLATE = """from peewee import *%s
database = %s('%s', **%s)
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
"""
DATABASE_ALIASES = {
MySQLDatabase: ['mysql', 'mysqldb'],
PostgresqlDatabase: ['postgres', 'postgresql'],
SqliteDatabase: ['sqlite', 'sqlite3'],
}
DATABASE_MAP = dict((value, key)
for key in DATABASE_ALIASES
for value in DATABASE_ALIASES[key])
def make_introspector(database_type, database_name, **kwargs):
if database_type not in DATABASE_MAP:
err('Unrecognized database, must be one of: %s' %
', '.join(DATABASE_MAP.keys()))
sys.exit(1)
schema = kwargs.pop('schema', None)
DatabaseClass = DATABASE_MAP[database_type]
db = DatabaseClass(database_name, **kwargs)
return Introspector.from_database(db, schema=schema)
def print_models(introspector, tables=None, preserve_order=False,
include_views=False):
database = introspector.introspect(table_names=tables,
include_views=include_views)
print_(TEMPLATE % (
introspector.get_additional_imports(),
introspector.get_database_class().__name__,
introspector.get_database_name(),
repr(introspector.get_database_kwargs())))
def _print_table(table, seen, accum=None):
accum = accum or []
foreign_keys = database.foreign_keys[table]
for foreign_key in foreign_keys:
dest = foreign_key.dest_table
# In the event the destination table has already been pushed
# for printing, then we have a reference cycle.
if dest in accum and table not in accum:
print_('# Possible reference cycle: %s' % dest)
# If this is not a self-referential foreign key, and we have
# not already processed the destination table, do so now.
if dest not in seen and dest not in accum:
seen.add(dest)
if dest != table:
_print_table(dest, seen, accum + [table])
print_('class %s(BaseModel):' % database.model_names[table])
columns = database.columns[table].items()
if not preserve_order:
columns = sorted(columns)
primary_keys = database.primary_keys[table]
for name, column in columns:
skip = all([
name in primary_keys,
name == 'id',
len(primary_keys) == 1,
column.field_class in introspector.pk_classes])
if skip:
continue
if column.primary_key and len(primary_keys) > 1:
# If we have a CompositeKey, then we do not want to explicitly
# mark the columns as being primary keys.
column.primary_key = False
print_(' %s' % column.get_field())
print_('')
print_(' class Meta:')
print_(' table_name = \'%s\'' % table)
multi_column_indexes = database.multi_column_indexes(table)
if multi_column_indexes:
print_(' indexes = (')
for fields, unique in sorted(multi_column_indexes):
print_(' ((%s), %s),' % (
', '.join("'%s'" % field for field in fields),
unique,
))
print_(' )')
if introspector.schema:
print_(' schema = \'%s\'' % introspector.schema)
if len(primary_keys) > 1:
pk_field_names = sorted([
field.name for col, field in columns
if col in primary_keys])
pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
print_(' primary_key = CompositeKey(%s)' % pk_list)
elif not primary_keys:
print_(' primary_key = False')
print_('')
seen.add(table)
seen = set()
for table in sorted(database.model_names.keys()):
if table not in seen:
if not tables or table in tables:
_print_table(table, seen)
def print_header(cmd_line, introspector):
timestamp = datetime.datetime.now()
print_('# Code generated by:')
print_('# python -m pwiz %s' % cmd_line)
print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
print_('# Database: %s' % introspector.get_database_name())
print_('# Peewee version: %s' % peewee_version)
print_('')
def err(msg):
sys.stderr.write('\033[91m%s\033[0m\n' % msg)
sys.stderr.flush()
def get_option_parser():
parser = OptionParser(usage='usage: %prog [options] database_name')
ao = parser.add_option
ao('-H', '--host', dest='host')
ao('-p', '--port', dest='port', type='int')
ao('-u', '--user', dest='user')
ao('-P', '--password', dest='password', action='store_true')
engines = sorted(DATABASE_MAP)
ao('-e', '--engine', dest='engine', default='postgresql', choices=engines,
help=('Database type, e.g. sqlite, mysql or postgresql. Default '
'is "postgresql".'))
ao('-s', '--schema', dest='schema')
ao('-t', '--tables', dest='tables',
help=('Only generate the specified tables. Multiple table names should '
'be separated by commas.'))
ao('-v', '--views', dest='views', action='store_true',
help='Generate model classes for VIEWs in addition to tables.')
ao('-i', '--info', dest='info', action='store_true',
help=('Add database information and other metadata to top of the '
'generated file.'))
ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
help='Model definition column ordering matches source table.')
return parser
def get_connect_kwargs(options):
ops = ('host', 'port', 'user', 'schema')
kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
if options.password:
kwargs['password'] = getpass()
return kwargs
if __name__ == '__main__':
raw_argv = sys.argv
parser = get_option_parser()
options, args = parser.parse_args()
if len(args) < 1:
err('Missing required parameter "database"')
parser.print_help()
sys.exit(1)
connect = get_connect_kwargs(options)
database = args[-1]
tables = None
if options.tables:
tables = [table.strip() for table in options.tables.split(',')
if table.strip()]
introspector = make_introspector(options.engine, database, **connect)
if options.info:
cmd_line = ' '.join(raw_argv[1:])
print_header(cmd_line, introspector)
print_models(introspector, tables, options.preserve_order, options.views) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# CodeIgniter documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 28 07:24:38 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.phpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CodeIgniter'
copyright = u'2014 - 2016, British Columbia Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1.2'
# The full version, including alpha/beta/rc tags.
release = '3.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :php:func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. php:function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'ci'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Specifying a few options; just a starting point & we can play with it.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/ci-icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CodeIgniterdoc'
html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CodeIgniter.tex', u'CodeIgniter Documentation',
u'British Columbia Institute of Technology', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'codeigniter', u'CodeIgniter Documentation',
[u'British Columbia Institute of Technology'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'CodeIgniter'
epub_author = u'British Columbia Institute of Technology'
epub_publisher = u'British Columbia Institute of Technology'
epub_copyright = u'2014 - 2016, British Columbia Institute of Technology'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True | unknown | codeparrot/codeparrot-clean | ||
//===--- OutputLanguageMode.h - Output mode for clang printer ---*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_PRINTASCLANG_OUTPUTLANGUAGEMODE_H
#define SWIFT_PRINTASCLANG_OUTPUTLANGUAGEMODE_H
namespace swift {
enum class OutputLanguageMode { ObjC, Cxx, C };
} // end namespace swift
#endif | c | github | https://github.com/apple/swift | lib/PrintAsClang/OutputLanguageMode.h |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from mathutils import Matrix, Vector
#from math import copysign
import bpy
from bpy.props import IntProperty, FloatProperty
import bmesh.ops
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat, fullList, Matrix_generate
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata, pydata_from_bmesh
def is_matrix(lst):
return len(lst) == 4 and len(lst[0]) == 4
class SvExtrudeEdgesNode(bpy.types.Node, SverchCustomTreeNode):
''' Extrude edges '''
bl_idname = 'SvExtrudeEdgesNode'
bl_label = 'Extrude Edges'
bl_icon = 'OUTLINER_OB_EMPTY'
def sv_init(self, context):
self.inputs.new('VerticesSocket', "Vertices", "Vertices")
self.inputs.new('StringsSocket', 'Edges', 'Edges')
self.inputs.new('StringsSocket', 'Polygons', 'Polygons')
self.inputs.new('StringsSocket', 'ExtrudeEdges')
self.inputs.new('MatrixSocket', "Matrices")
self.outputs.new('VerticesSocket', 'Vertices')
self.outputs.new('StringsSocket', 'Edges')
self.outputs.new('StringsSocket', 'Polygons')
self.outputs.new('VerticesSocket', 'NewVertices')
self.outputs.new('StringsSocket', 'NewEdges')
self.outputs.new('StringsSocket', 'NewFaces')
def process(self):
# inputs
if not (self.inputs['Vertices'].is_linked and self.inputs['Polygons'].is_linked):
return
vertices_s = self.inputs['Vertices'].sv_get()
edges_s = self.inputs['Edges'].sv_get(default=[[]])
faces_s = self.inputs['Polygons'].sv_get(default=[[]])
matrices_s = self.inputs['Matrices'].sv_get(default=[[]])
if is_matrix(matrices_s[0]):
matrices_s = [Matrix_generate(matrices_s)]
else:
matrices_s = [Matrix_generate(matrices) for matrices in matrices_s]
extrude_edges_s = self.inputs['ExtrudeEdges'].sv_get(default=[[]])
result_vertices = []
result_edges = []
result_faces = []
result_ext_vertices = []
result_ext_edges = []
result_ext_faces = []
meshes = match_long_repeat([vertices_s, edges_s, faces_s, matrices_s, extrude_edges_s])
for vertices, edges, faces, matrices, extrude_edges in zip(*meshes):
if not matrices:
matrices = [Matrix()]
bm = bmesh_from_pydata(vertices, edges, faces)
if extrude_edges:
b_edges = []
for edge in extrude_edges:
b_edge = [e for e in bm.edges if set([v.index for v in e.verts]) == set(edge)]
b_edges.append(b_edge[0])
else:
b_edges = bm.edges
new_geom = bmesh.ops.extrude_edge_only(bm, edges=b_edges, use_select_history=False)['geom']
extruded_verts = [v for v in new_geom if isinstance(v, bmesh.types.BMVert)]
for vertex, matrix in zip(*match_long_repeat([extruded_verts, matrices])):
bmesh.ops.transform(bm, verts=[vertex], matrix=matrix, space=Matrix())
extruded_verts = [tuple(v.co) for v in extruded_verts]
extruded_edges = [e for e in new_geom if isinstance(e, bmesh.types.BMEdge)]
extruded_edges = [tuple(v.index for v in edge.verts) for edge in extruded_edges]
extruded_faces = [f for f in new_geom if isinstance(f, bmesh.types.BMFace)]
extruded_faces = [[v.index for v in edge.verts] for edge in extruded_faces]
new_vertices, new_edges, new_faces = pydata_from_bmesh(bm)
bm.free()
result_vertices.append(new_vertices)
result_edges.append(new_edges)
result_faces.append(new_faces)
result_ext_vertices.append(extruded_verts)
result_ext_edges.append(extruded_edges)
result_ext_faces.append(extruded_faces)
if self.outputs['Vertices'].is_linked:
self.outputs['Vertices'].sv_set(result_vertices)
if self.outputs['Edges'].is_linked:
self.outputs['Edges'].sv_set(result_edges)
if self.outputs['Polygons'].is_linked:
self.outputs['Polygons'].sv_set(result_faces)
if self.outputs['NewVertices'].is_linked:
self.outputs['NewVertices'].sv_set(result_ext_vertices)
if self.outputs['NewEdges'].is_linked:
self.outputs['NewEdges'].sv_set(result_ext_edges)
if self.outputs['NewFaces'].is_linked:
self.outputs['NewFaces'].sv_set(result_ext_faces)
def register():
bpy.utils.register_class(SvExtrudeEdgesNode)
def unregister():
bpy.utils.unregister_class(SvExtrudeEdgesNode)
if __name__ == '__main__':
register() | unknown | codeparrot/codeparrot-clean | ||
# Instruction Set
This document is a reference guide of the SIL instruction set. For an overview
of SIL and OSSA see the [SIL](SIL.md) document.
## Allocation and Deallocation
These instructions allocate and deallocate memory.
### alloc_stack
```
sil-instruction ::= 'alloc_stack' alloc-stack-option* sil-type (',' debug-var-attr)*
alloc-stack-option ::= '[dynamic_lifetime]'
alloc-stack-option ::= '[lexical]'
alloc-stack-option ::= '[var_decl]'
alloc-stack-option ::= '[moveable_value_debuginfo]'
%1 = alloc_stack $T
// %1 has type $*T
```
Allocates uninitialized memory that is sufficiently aligned on the stack
to contain a value of type `T`. The result of the instruction is the
address of the allocated memory.
`alloc_stack` always allocates memory on the stack even for
runtime-sized type.
`alloc_stack` is a stack allocation instruction. See the section above
on stack discipline. The corresponding stack deallocation instruction is
`dealloc_stack`.
The `dynamic_lifetime` attribute specifies that the initialization and
destruction of the stored value cannot be verified at compile time. This
is the case, e.g. for conditionally initialized objects.
The optional `lexical` attribute specifies that the operand corresponds
to a local variable with a lexical lifetime in the Swift source, so
special care must be taken when hoisting `destroy_addr`s. Compare to the
`var_decl` attribute. See [Variable Lifetimes](Ownership.md#variable-lifetimes).
The optional `var_decl` attribute specifies that the storage corresponds
to a local variable in the Swift source.
The optional `moveable_value_debuginfo` attribute specifies that when
emitting debug info, the code generator can not assume that the value in
the alloc_stack can be semantically valid over the entire function frame
when emitting debug info. NOTE: This is implicitly set to true if the
alloc_stack's type is non-copyable. This is just done to make SIL less
verbose.
The memory is not retainable. To allocate a retainable box for a value
type, use `alloc_box`.
`T` must not be a pack type. To allocate a pack, use `alloc_pack`.
### alloc_pack
```
sil-instruction ::= 'alloc_pack' sil-type
%1 = alloc_pack $Pack{Int, Float, repeat each T}
// %1 has type $*Pack{Int, Float, repeat each T}
```
Allocates uninitialized memory on the stack for a value pack of the
given type, which must be a pack type. The result of the instruction is
the address of the allocated memory.
`alloc_pack` is a stack allocation instruction. See the section above on
stack discipline. The corresponding stack deallocation instruction is
`dealloc_pack`.
### alloc_pack_metadata
```
sil-instruction ::= 'alloc_pack_metadata' $()
```
Inserted as the last SIL lowering pass of IRGen, indicates that the next
instruction may have on-stack pack metadata allocated on its behalf.
Notionally, `alloc_pack_metadata` is a stack allocation instruction. See
the section above on stack discipline. The corresponding stack
deallocation instruction is `dealloc_pack_metadata`.
Only valid in Lowered SIL.
### alloc_ref
```
sil-instruction ::= 'alloc_ref'
('[' 'bare' ']')?
('[' 'objc' ']')?
('[' 'stack' ']')?
('[' 'tail_elems' sil-type '*' sil-operand ']')*
sil-type
%1 = alloc_ref [stack] $T
%1 = alloc_ref [tail_elems $E * %2 : Builtin.Word] $T
// $T must be a reference type
// %1 has type $T
// $E is the type of the tail-allocated elements
// %2 must be of a builtin integer type
```
Allocates an object of reference type `T`. The object will be
initialized with retain count 1; its state will be otherwise
uninitialized. The optional `objc` attribute indicates that the object
should be allocated using Objective-C's allocation methods
(`+allocWithZone:`).
The optional `stack` attribute indicates that the object can be
allocated on the stack instead on the heap. In this case the instruction
must be balanced with a `dealloc_stack_ref` instruction to mark the end
of the object's lifetime. Note that the `stack` attribute only
specifies that stack allocation is possible. The final decision on stack
allocation is done during llvm IR generation. This is because the
decision also depends on the object size, which is not necessarily known
at SIL level.
The `bare` attribute indicates that the object header is not used
throughout the lifetime of the object. This means, no reference counting
operations are performed on the object and its metadata is not used. The
header of bare objects doesn't need to be initialized.
The optional `tail_elems` attributes specifies the amount of space to be
reserved for tail-allocated arrays of given element types and element
counts. If there are more than one `tail_elems` attributes then the tail
arrays are allocated in the specified order. The count-operand must be
of a builtin integer type. The instructions `ref_tail_addr` and
`tail_addr` can be used to project the tail elements. The `objc`
attribute cannot be used together with `tail_elems`.
### alloc_ref_dynamic
```
sil-instruction ::= 'alloc_ref_dynamic'
('[' 'objc' ']')?
('[' 'tail_elems' sil-type '*' sil-operand ']')*
sil-operand ',' sil-type
%1 = alloc_ref_dynamic %0 : $@thick T.Type, $T
%1 = alloc_ref_dynamic [objc] %0 : $@objc_metatype T.Type, $T
%1 = alloc_ref_dynamic [tail_elems $E * %2 : Builtin.Word] %0 : $@thick T.Type, $T
// $T must be a class type
// %1 has type $T
// $E is the type of the tail-allocated elements
// %2 must be of a builtin integer type
```
Allocates an object of class type `T` or a subclass thereof. The dynamic
type of the resulting object is specified via the metatype value `%0`.
The object will be initialized with retain count 1; its state will be
otherwise uninitialized.
The optional `tail_elems` and `objc` attributes have the same effect as
for `alloc_ref`. See `alloc_ref` for details.
### alloc_box
```
sil-instruction ::= 'alloc_box' alloc-box-option* sil-type (',' debug-var-attr)*
alloc-box-option ::= moveable_value_debuginfo
%1 = alloc_box $T
// %1 has type $@box T
```
Allocates a reference-counted `@box` on the heap large enough to hold a
value of type `T`, along with a retain count and any other metadata
required by the runtime. The result of the instruction is the
reference-counted `@box` reference that owns the box. The `project_box`
instruction is used to retrieve the address of the value inside the box.
The box will be initialized with a retain count of 1; the storage will
be uninitialized. The box owns the contained value, and releasing it to
a retain count of zero destroys the contained value as if by
`destroy_addr`. Releasing a box is undefined behavior if the box's
value is uninitialized. To deallocate a box whose value has not been
initialized, `dealloc_box` should be used.
The optional `moveable_value_debuginfo` attribute specifies that when
emitting debug info, the code generator can not assume that the value in
the alloc_stack can be semantically valid over the entire function frame
when emitting debug info. NOTE: This is implicitly set to true if the
alloc_stack's type is noncopyable. This is just done to make SIL less
verbose.
### alloc_global
```
sil-instruction ::= 'alloc_global' sil-global-name
alloc_global @foo
```
Initialize the storage for a global variable. This instruction has
undefined behavior if the global variable has already been initialized.
The type operand must be a lowered object type.
### get_async_continuation
```
sil-instruction ::= 'get_async_continuation' '[throws]'? sil-type
%0 = get_async_continuation $T
%0 = get_async_continuation [throws] $U
```
Begins a suspension of an `@async` function. This instruction can only
be used inside an `@async` function. The result of the instruction is an
`UnsafeContinuation<T>` value, where `T` is the formal type argument to
the instruction, or an `UnsafeThrowingContinuation<T>` if the
instruction carries the `[throws]` attribute. `T` must be a loadable
type. The continuation must be consumed by a `await_async_continuation`
terminator on all paths. Between `get_async_continuation` and
`await_async_continuation`, the following restrictions apply:
- The function cannot `return`, `throw`, `yield`, or `unwind`.
- There cannot be nested suspend points; namely, the function cannot
call another `@async` function, nor can it initiate another suspend
point with `get_async_continuation`.
The function suspends execution when the matching
`await_async_continuation` terminator is reached, and resumes execution
when the continuation is resumed. The continuation resumption operation
takes a value of type `T` which is passed back into the function when it
resumes execution in the `await_async_continuation` instruction's
`resume` successor block. If the instruction has the `[throws]`
attribute, it can also be resumed in an error state, in which case the
matching `await_async_continuation` instruction must also have an
`error` successor.
Within the enclosing SIL function, the result continuation is consumed
by the `await_async_continuation`, and cannot be referenced after the
`await_async_continuation` executes. Dynamically, the continuation value
must be resumed exactly once in the course of the program's execution;
it is undefined behavior to resume the continuation more than once.
Conversely, failing to resume the continuation will leave the suspended
async coroutine hung in its suspended state, leaking any resources it
may be holding.
### get_async_continuation_addr
```
sil-instruction ::= 'get_async_continuation_addr' '[throws]'? sil-type ',' sil-operand
%1 = get_async_continuation_addr $T, %0 : $*T
%1 = get_async_continuation_addr [throws] $U, %0 : $*U
```
Begins a suspension of an `@async` function, like
`get_async_continuation`, additionally binding a specific memory
location for receiving the value when the result continuation is
resumed. The operand must be an address whose type is the
maximally-abstracted lowered type of the formal resume type. The memory
must be uninitialized, and must remain allocated until the matching
`await_async_continuation` instruction(s) consuming the result
continuation have executed. The behavior is otherwise the same as
`get_async_continuation`, and the same restrictions apply on code
appearing between `get_async_continuation_addr` and
`await_async_continuation` as apply between `get_async_continuation` and
`await_async_continuation`. Additionally, the state of the memory
referenced by the operand is indefinite between the execution of
`get_async_continuation_addr` and `await_async_continuation`, and it is
undefined behavior to read or modify the memory during this time. After
the `await_async_continuation` resumes normally to its `resume`
successor, the memory referenced by the operand is initialized with the
resume value, and that value is then owned by the current function. If
`await_async_continuation` instead resumes to its `error` successor,
then the memory remains uninitialized.
### hop_to_executor
```
sil-instruction ::= 'hop_to_executor' sil-operand
hop_to_executor %0 : $T
// $T must be Builtin.Executor or conform to the Actor protocol
```
Ensures that all instructions, which need to run on the actor's
executor actually run on that executor. This instruction can only be
used inside an `@async` function.
Checks if the current executor is the one which is bound to the operand
actor. If not, begins a suspension point and enqueues the continuation
to the executor which is bound to the operand actor.
SIL generation emits this instruction with operands of actor type as
well as of type `Builtin.Executor`. The former are expected to be
lowered by the SIL pipeline, so that IR generation only operands of type
`Builtin.Executor` remain.
The operand is a guaranteed operand, i.e. not consumed.
### extract_executor
```
sil-instruction ::= 'extract_executor' sil-operand
%1 = extract_executor %0 : $T
// $T must be Builtin.Executor or conform to the Actor protocol
// %1 will be of type Builtin.Executor
```
Extracts the executor from the executor or actor operand. SIL generation
emits this instruction to produce executor values when needed (e.g., to
provide to a runtime function). It will be lowered away by the SIL
pipeline.
The operand is a guaranteed operand, i.e. not consumed.
### merge_isolation_region
```
sil-instruction :: 'merge_isolation_region' (sil-operand ',')+ sil-operand
%2 = merge_isolation_region %first : $*T, %second : $U
%2 = merge_isolation_region %first : $*T, %second : $U, %third : $H
```
Instruction that is only valid in Ownership SSA.
This instruction informs region isolation that all of the operands
should be considered to be artificially apart of the same region. It is
intended to be used to express region dependency when due to unsafe
code generation we have to traffic a non-Sendable value through computations
with Sendable values (causing us to not track the non-Sendable value)
but have to later express that a non-Sendable result of using the
Sendable value needs to be in the same region as the original
non-Sendable value. As an example of where this comes up, consider the
following code:
```
// objc code
@interface CallbackData : NSObject
@end
@interface Klass : NSObject
- (void)loadDataWithCompletionHandler:(void (^)(CallbackData * _Nullable, NSError * _Nullable))completionHandler;
@end
// swift code
extension Klass {
func loadCallbackData() async throws -> sending CallbackData {
try await loadData()
}
}
```
This lowers to:
```
%5 = alloc_stack $CallbackData // users: %26, %25, %31, %16, %7
%6 = objc_method %0 : $Klass, #Klass.loadData!foreign : (Klass) -> () async throws -> CallbackData, $@convention(objc_method) (Optional<@convention(block) (Optional<CallbackData>, Optional<NSError>) -> ()>, Klass) -> () // user: %20
%7 = get_async_continuation_addr [throws] CallbackData, %5 : $*CallbackData // users: %23, %8
%8 = struct $UnsafeContinuation<CallbackData, any Error> (%7 : $Builtin.RawUnsafeContinuation) // user: %14
%9 = alloc_stack $@block_storage Any // users: %22, %16, %10
%10 = project_block_storage %9 : $*@block_storage Any // user: %11
%11 = init_existential_addr %10 : $*Any, $CheckedContinuation<CallbackData, any Error> // user: %15
// function_ref _createCheckedThrowingContinuation<A>(_:)
%12 = function_ref @$ss34_createCheckedThrowingContinuationyScCyxs5Error_pGSccyxsAB_pGnlF : $@convention(thin) <τ_0_0> (UnsafeContinuation<τ_0_0, any Error>) -> @out CheckedContinuation<τ_0_0, any Error> // user: %14
%13 = alloc_stack $CheckedContinuation<CallbackData, any Error> // users: %21, %15, %14
%14 = apply %12<CallbackData>(%13, %8) : $@convention(thin) <τ_0_0> (UnsafeContinuation<τ_0_0, any Error>) -> @out CheckedContinuation<τ_0_0, any Error>
copy_addr [take] %13 to [init] %11 : $*CheckedContinuation<CallbackData, any Error> // id: %15
merge_isolation_region %9 : $*@block_storage Any, %5 : $*CallbackData // id: %16
// function_ref @objc completion handler block implementation for @escaping @callee_unowned @convention(block) (@unowned CallbackData?, @unowned NSError?) -> () with result type CallbackData
%17 = function_ref @$sSo12CallbackDataCSgSo7NSErrorCSgIeyByy_ABTz_ : $@convention(c) (@inout_aliasable @block_storage Any, Optional<CallbackData>, Optional<NSError>) -> () // user: %18
%18 = init_block_storage_header %9 : $*@block_storage Any, invoke %17 : $@convention(c) (@inout_aliasable @block_storage Any, Optional<CallbackData>, Optional<NSError>) -> (), type $@convention(block) (Optional<CallbackData>, Optional<NSError>) -> () // user: %19
%19 = enum $Optional<@convention(block) (Optional<CallbackData>, Optional<NSError>) -> ()>, #Optional.some!enumelt, %18 : $@convention(block) (Optional<CallbackData>, Optional<NSError>) -> () // user: %20
%20 = apply %6(%19, %0) : $@convention(objc_method) (Optional<@convention(block) (Optional<CallbackData>, Optional<NSError>) -> ()>, Klass) -> ()
```
Notice how without the [merge_isolation_region](#merge_isolation_region)
instruction (`%16`) there is no non-Sendable def-use chain from `%5`, the
indirect return value of the block, to the actual non-Sendable block
storage `%9`. This can result in region isolation not propagating
restrictions on usage from `%9` onto `%5` risking the creation of races.
Applying the previous discussion to this specific example, self (`%0`) is
non-Sendable and is bound to the current task. If we did not have the
[merge_isolation_region](#merge_isolation_region) instruction here, we
would not tie the return value `%5` to `%0` via `%9`. This would cause `%5` to
be treated as a disconnected value and thus be a valid sending return
value potentially allowing for `%5` in the caller of the function to be
sent to another isolation domain and introduce a race.
> **_Note:_** This is effectively the same purpose that
[mark_dependence](#mark_dependence) plays for memory dependence
(expressing memory dependence that the compiler cannot infer) except in
the world of region isolation. We purposely use a different instruction
since [mark_dependence](#mark_dependence) is often times used to create
a temporary dependence in between two values via the return value of
[mark_dependence](#mark_dependence). If
[mark_dependence](#mark_dependence) had the semantics of acting like a
region merge we would in contrast have from that point on a region
dependence in between the base and value of the
[mark_dependence](#mark_dependence) causing the
[mark_dependence](#mark_dependence) to have a less "local" effect
since all paths through that program point would have to maintain that
region dependence until the end of the function.
### dealloc_stack
```
sil-instruction ::= 'dealloc_stack' sil-operand
dealloc_stack %0 : $*T
// %0 must be of $*T type
```
Deallocates memory previously allocated by `alloc_stack`. The allocated
value in memory must be uninitialized or destroyed prior to being
deallocated.
`dealloc_stack` is a stack deallocation instruction. See the section on
Stack Discipline above. The operand must be an `alloc_stack`
instruction.
### dealloc_pack
```
sil-instruction ::= 'dealloc_pack' sil-operand
dealloc_pack %0 : $*Pack{Int, Float, repeat each T}
// %0 must be the result of `alloc_pack $Pack{Int, Float, repeat each T}`
```
Deallocates memory for a pack value previously allocated by
`alloc_pack`. If the pack elements are direct, they must be
uninitialized or destroyed prior to being deallocated.
`dealloc_pack` is a stack deallocation instruction. See the section on
Stack Discipline above. The operand must be an `alloc_pack` instruction.
### dealloc_pack_metadata
```
sil-instruction ::= 'dealloc_pack_metadata' sil-operand
dealloc_pack_metadata $0 : $*()
```
Inserted as the last SIL lowering pass of IRGen, indicates that the
on-stack pack metadata emitted on behalf of its operand (actually on
behalf of the instruction after its operand) must be cleaned up here.
`dealloc_pack_metadata` is a stack deallocation instruction. See the
section on Stack Discipline above. The operand must be an
`alloc_pack_metadata` instruction.
Only valid in Lowered SIL.
### dealloc_box
```
sil-instruction ::= 'dealloc_box' '[dead_end]'? sil-operand
dealloc_box %0 : $@box T
```
Deallocates a box, bypassing the reference counting mechanism. The box
variable must have a retain count of one. The boxed type must match the
type passed to the corresponding `alloc_box` exactly, or else undefined
behavior results.
This does not destroy the boxed value. The contents of the value must
have been fully uninitialized or destroyed before `dealloc_box` is
applied.
The optional `dead_end` attribute specifies that this instruction was
created during lifetime completion and is eligible for deletion during
OSSA lowering.
### project_box
```
sil-instruction ::= 'project_box' sil-operand
%1 = project_box %0 : $@box T
// %1 has type $*T
```
Given a `@box T` reference, produces the address of the value inside the
box.
### dealloc_stack_ref
```
sil-instruction ::= 'dealloc_stack_ref' sil-operand
dealloc_stack_ref %0 : $T
// $T must be a class type
// %0 must be an 'alloc_ref [stack]' instruction
```
Marks the deallocation of the stack space for an `alloc_ref [stack]`.
### dealloc_ref
```
sil-instruction ::= 'dealloc_ref' sil-operand
dealloc_ref %0 : $T
// $T must be a class type
```
Deallocates an uninitialized class type instance, bypassing the
reference counting mechanism.
The type of the operand must match the allocated type exactly, or else
undefined behavior results.
The instance must have a retain count of one.
This does not destroy stored properties of the instance. The contents of
stored properties must be fully uninitialized at the time `dealloc_ref`
is applied.
The `stack` attribute indicates that the instruction is the balanced
deallocation of its operand which must be a `alloc_ref [stack]`. In this
case the instruction marks the end of the object's lifetime but has no
other effect.
### dealloc_partial_ref
```
sil-instruction ::= 'dealloc_partial_ref' sil-operand sil-metatype
dealloc_partial_ref %0 : $T, %1 : $U.Type
// $T must be a class type
// $T must be a subclass of U
```
Deallocates a partially-initialized class type instance, bypassing the
reference counting mechanism.
The type of the operand must be a supertype of the allocated type, or
else undefined behavior results.
The instance must have a retain count of one.
All stored properties in classes more derived than the given metatype
value must be initialized, and all other stored properties must be
uninitialized. The initialized stored properties are destroyed before
deallocating the memory for the instance.
This does not destroy the reference type instance. The contents of the
heap object must have been fully uninitialized or destroyed before
`dealloc_ref` is applied.
## Debug Information
Debug information is generally associated with allocations (alloc_stack
or alloc_box) by having a Decl node attached to the allocation with a
SILLocation. For declarations that have no allocation we have explicit
instructions for doing this. This is used by 'let' declarations, which
bind a value to a name and for var decls who are promoted into
registers. The decl they refer to is attached to the instruction with a
SILLocation.
### debug_value
```
sil-instruction ::= debug_value sil-debug-value-option* sil-operand (',' debug-var-attr)* advanced-debug-var-attr* (',' 'expr' debug-info-expr)?
sil-debug-value-option ::= [poison]
sil-debug-value-option ::= [moveable_value_debuginfo]
sil-debug-value-option ::= [trace]
debug_value %1 : $Int
```
This indicates that the value of a declaration has changed value to the
specified operand. The declaration in question is identified by either
the SILLocation attached to the debug_value instruction or the
SILLocation specified in the advanced debug variable attributes.
If the `moveable_value_debuginfo` flag is set, then one knows that the
debug_value's operand is moved at some point of the program, so one can
not model the debug_value using constructs that assume that the value is
live for the entire function (e.x.: llvm.dbg.declare). NOTE: This is
implicitly set to true if the alloc_stack's type is noncopyable. This
is just done to make SIL less verbose.
```
debug-var-attr ::= 'var'
debug-var-attr ::= 'let'
debug-var-attr ::= 'name' string-literal
debug-var-attr ::= 'argno' integer-literal
```
There are a number of attributes that provide details about the source
variable that is being described, including the name of the variable.
For function and closure arguments `argno` is the number of the function
argument starting with 1. A compiler-generated source variable will be
marked `implicit` and optimizers are free to remove it even in -Onone.
If the '[poison]' flag is set, then all references within this debug
value will be overwritten with a sentinel at this point in the program.
This is used in debug builds when shortening non-trivial value lifetimes
to ensure the debugger cannot inspect invalid memory. `debug_value`
instructions with the poison flag are not generated until OSSA is
lowered. They are not expected to be serialized within the module, and
the pipeline is not expected to do any significant code motion after
lowering.
```
advanced-debug-var-attr ::= '(' 'name' string-literal (',' sil-instruction-source-info)? ')'
advanced-debug-var-attr ::= 'type' sil-type
```
Advanced debug variable attributes represent source locations and the
type of the source variable when it was originally declared. It is
useful when we're indirectly associating the SSA value with the source
variable (via SIL DIExpression, for example) in which case SSA value's
type is different from that of source variable.
```
debug-info-expr ::= di-expr-operand (':' di-expr-operand)*
di-expr-operand ::= di-expr-operator (':' sil-operand)*
di-expr-operator ::= 'op_fragment'
di-expr-operator ::= 'op_tuple_fragment'
di-expr-operator ::= 'op_deref'
```
SIL debug info expression (SIL DIExpression) is a powerful method to
connect SSA value with the source variable in an indirect fashion.
Di-expression in SIL uses a stack based execution model to evaluate the
expression and apply on the associated (SIL) SSA value before connecting
it with the debug variable. For instance, given the following SIL code:
```
debug_value %a : $*Int, name "x", expr op_deref
```
It means: "You can get the value of source variable 'x' by
*dereferencing* SSA value `%a`". The `op_deref` is a SIL DIExpression
operator that represents "dereference". If there are multiple SIL
DIExpression operators (or arguments), they are evaluated from left to
right:
```
debug_value %b : $**Int, name "y", expr op_deref:op_deref
```
In the snippet above, two `op_deref` operators will be applied on SSA
value `%b` sequentially.
Note that normally when the SSA value has an address type, there will be
a `op_deref` in the SIL DIExpression. Because there is no pointer in
Swift so you always need to dereference an address-type SSA value to get
the value of a source variable. However, if the SSA value is a
`alloc_stack`, the `debug_value` is used to indicate the *declaration*
of a source variable. Or, you can say, used to specify the location
(memory address) of the source variable. Therefore, we don't need to
add a `op_deref` in this case:
```
%a = alloc_stack $Int, ...
debug_value %a : $*Int, name "my_var"
```
The `op_fragment` operator is used to specify the SSA value of a
specific field in an aggregate-type source variable. This SIL
DIExpression operator takes a field declaration - which references the
desired sub-field in source variable - as its argument. Here is an
example:
```
struct MyStruct {
var x: Int
var y: Int
}
...
debug_value %1 : $Int, var, (name "the_struct", loc "file.swift":8:7), type $MyStruct, expr op_fragment:#MyStruct.y, loc "file.swift":9:4
```
In the snippet above, source variable "the_struct" has an aggregate
type `$MyStruct` and we use a SIL DIExpression with `op_fragment`
operator to associate `%1` to the `y` member variable (via the
`#MyStruct.y` directive) inside "the_struct". Note that the extra
source location directive follows right after `name "the_struct"`
indicate that "the_struct" was originally declared in line 8, but not
until line 9 - the current `debug_value` instruction's source
location - does member `y` got updated with SSA value `%1`.
For tuples, it works similarly, except we use `op_tuple_fragment`, which
takes two arguments: the tuple type and the index. If our struct was
instead a tuple, we would have:
```
debug_value %1 : $Int, var, (name "the_tuple", loc "file.swift":8:7), type $(x: Int, y: Int), expr op_tuple_fragment:$(x: Int, y: Int):1, loc "file.swift":9:4
```
It is worth noting that a SIL DIExpression is similar to
[!DIExpression](https://www.llvm.org/docs/LangRef.html#diexpression) in
LLVM debug info metadata. While LLVM represents `!DIExpression` are a
list of 64-bit integers, SIL DIExpression can have elements with various
types, like AST nodes or strings.
The `[trace]` flag is available for compiler unit testing. It is not
produced during normal compilation. It is used combination with internal
logging and optimization controls to select specific values to trace or
to transform. For example, liveness analysis combines all "traced"
values into a single live range with multiple definitions. This exposes
corner cases that cannot be represented by passing valid SIL through the
pipeline.
### debug_step
```
sil-instruction ::= debug_step
debug_step
```
This instruction is inserted by Onone optimizations as a replacement for
deleted instructions to ensure that it's possible to set a breakpoint
on its location.
It is code-generated to a NOP instruction.
## Testing
### specify_test
```
sil-instruction ::= 'specify_test' string-literal
specify_test "parsing @trace[3] @function[other].block[2].instruction[1]"
```
Exists only for writing FileCheck tests. Specifies a list of test
arguments which should be used in order to run a particular test "in
the context" of the function containing the instruction.
Parsing of these test arguments is done via
`parseTestArgumentsFromSpecification`.
The following types of test arguments are supported:
- boolean: `true` `false`
- unsigned integer: 0...ULONG_MAX
- string
- value: `%name`
- function:
- `@function` <-- the current function
- `@function[uint]` <-- function at index `uint`
- `@function[name]` <-- function named `name`
- block:
- `@block` <-- the block containing the specify_test instruction
- `@block[+uint]` <-- the block `uint` blocks after the containing block
- `@block[-uint]` <-- the block `uint` blocks before the containing block \
- `@block[uint]` <-- the block at index `uint`
- `@{function}.{block}` <-- the indicated block in the indicated function Example: `@function[foo].block[2]`
- trace:
- `@trace` <-- the first `debug_value [trace]` in the current function
- `@trace[uint]` <-- the `debug_value [trace]` at index `uint`
- value:
- `@{instruction}.result` <-- the first result of the instruction
- `@{instruction}.result[uint]` <-- the result at index `uint` produced by the instruction
- `@{function}.{trace}` <-- the indicated trace in the indicated function Example: `@function[bar].trace`
- argument:
- `@argument` <-- the first argument of the current block
- `@argument[uint]` <-- the argument at index `uint` of the current block
- `@{block}.{argument}` <-- the indicated argument in the indicated block
- `@{function}.{argument}` <-- the indicated argument in the entry block of the indicated function
- instruction:
- `@instruction` <-- the instruction after* the specify_test instruction
- `@instruction[+uint]` <-- the instruction `uint` instructions after the `specify_test` instruction
- `@instruction[-uint]` <-- the instruction `uint` instructions before the `specify_test` instruction
- `@instruction[uint]` <-- the instruction at index `uint`
- `@{function}.{instruction}` <-- the indicated instruction in the indicated function Example: `@function[baz].instruction[19]`
- `@{block}.{instruction}` <-- the indicated instruction in the indicated block Example: `@function[bam].block.instruction`
- operand:
- `@operand` <-- the first operand
- `@operand[uint]` <-- the operand at index `uint`
- `@{instruction}.{operand}` <-- the indicated operand of the indicated instruction Examples:
`@block[19].instruction[2].operand[3]`,
`@function[2].instruction.operand`
Not counting instructions that are deleted when processing functions for tests. The following instructions currently are deleted:
- `specify_test`
- `debug_value [trace]`
## Profiling
### increment_profiler_counter
```
sil-instruction ::= 'increment_profiler_counter' int-literal ',' string-literal ',' 'num_counters' int-literal ',' 'hash' int-literal
increment_profiler_counter 1, "$foo", num_counters 3, hash 0
```
Increments a given profiler counter for a given PGO function name. This
is lowered to the `llvm.instrprof.increment` LLVM intrinsic. This
instruction is emitted when profiling is enabled, and enables features
such as code coverage and profile-guided optimization.
## Accessing Memory
### load
```
sil-instruction ::= 'load' load-ownership-kind? sil-operand
load-ownership-kind ::= 'trivial'
load-ownership-kind ::= 'copy'
load-ownership-kind ::= 'take'
%1 = load %0 : $*T
// %0 must be of a $*T address type for loadable type $T
// %1 will be of type $T
```
Loads the value at address `%0` from memory. `T` must be a loadable
type. An unqualified load does not affect the reference count, if any, of the loaded
value; the value must be retained explicitly if necessary. It is
undefined behavior to load from uninitialized memory or to load from an
address that points to deallocated storage.
In OSSA the ownership kind specifies how to handle ownership:
- **trivial**: the loaded value is trivial and no further action must be taken
than to load the raw bits of the value
- **copy**: the loaded value is copied (e.g., retained) and the original value
stays in the memory location.
- **take**: the value is _moved_ from the memory location without copying.
After the `load`, the memory location remains uninitialized.
### store
```
sil-instruction ::= 'store' sil-value 'to' store-ownership-kind? sil-operand
store-ownership-kind ::= '[trivial]'
store-ownership-kind ::= '[init]'
store-ownership-kind ::= '[assign]'
store %0 to [init] %1 : $*T
// $T must be a loadable type
```
Stores the value `%0` to memory at address `%1`. The type of %1 is `*T`
and the type of `%0` is `T`, which must be a loadable type. This will
overwrite the memory at `%1`.
In OSSA the ownership kind specifies how to handle ownership:
- **trivial**: the stored value is trivial and no further action must be taken
than to store the raw bits of the value
- **init**: the memory is assumed to be _not_ initialized. The (non-trivial)
value is consumed by the instruction an stored to memory.
- **assign**: the memory is assumed to be initialized. Before storing the new
value, the existing memory value is destroyed. The new (non-trivial)
value is consumed by the instruction an stored to memory.
### load_borrow
```
sil-instruction ::= 'load_borrow' sil-value
%1 = load_borrow %0 : $*T
// $T must be a loadable type
```
Loads the value `%1` from the memory location `%0`. The
[load_borrow](#load_borrow) instruction creates a borrowed scope in
which a read-only borrow value `%1` can be used to read the value stored
in `%0`. The end of scope is delimited by an [end_borrow](#end_borrow)
instruction. All [load_borrow](#load_borrow) instructions must be paired
with exactly one [end_borrow](#end_borrow) instruction along any path
through the program. Until [end_borrow](#end_borrow), it is illegal to
invalidate or store to `%0`.
### store_borrow
```
sil-instruction ::= 'store_borrow' sil-value 'to' sil-operand
%2 = store_borrow %0 to %1 : $*T
// $T must be a loadable type
// %1 must be an alloc_stack $T
// %2 is the return address
```
Stores the value `%0` to a stack location `%1`, which must be an
`alloc_stack $T`. The stack location must not be modified by other
instructions than `store_borrow`. All uses of the store_borrow
destination `` `%1 `` should be via the store_borrow return address `%2`
except dealloc_stack. The stored value is alive until the `end_borrow`.
During its lifetime, the stored value must not be modified or destroyed.
The source value `%0` is borrowed (i.e. not copied) and its borrow scope
must outlive the lifetime of the stored value.
Notionally, the outer borrow scope ensures that there's something to be
addressed. The inner borrow scope provides the address to work with.
### begin_borrow
```
sil-instruction ::= 'begin_borrow' '[lexical]'? sil-operand
%1 = begin_borrow %0 : $T
```
Given a value `%0` with [Owned](#owned) or [Guaranteed](#guaranteed)
ownership, produces a new same typed value with
[Guaranteed](#guaranteed) ownership: `%1`. `%1` is guaranteed to have a
lifetime ending use (e.x.: [end_borrow](#end_borrow)) along all paths
that do not end in [Dead End Blocks](#dead-end-blocks). This
[begin_borrow](#begin_borrow) and the lifetime ending uses of `%1` are
considered to be liveness requiring uses of `%0` and as such in the
region in between this borrow and its lifetime ending use, `%0` must be
live. This makes sense semantically since `%1` is modeling a new value
with a dependent lifetime on `%0`.
The optional `lexical` attribute specifies that the operand corresponds
to a local variable with a lexical lifetime in the Swift source, so
special care must be taken when moving the end_borrow. Compare to the
`var_decl` attribute. See [Variable Lifetimes](Ownership.md#variable-lifetimes).
The optional `pointer_escape` attribute specifies that a pointer to the
operand escapes within the borrow scope introduced by this begin_borrow.
The optional `var_decl` attribute specifies that the operand corresponds
to a local variable in the Swift source.
This instruction is only valid in functions in Ownership SSA form.
### end_borrow
```
sil-instruction ::= 'end_borrow' sil-operand
// somewhere earlier
// %1 = begin_borrow %0
end_borrow %1 : $T
```
Ends the scope for which the [Guaranteed](#guaranteed) ownership
possessing SILValue `%1` is borrowed from the SILValue `%0`. Must be
paired with at most 1 borrowing instruction (like
[load_borrow](#load_borrow), [begin_borrow](#begin_borrow)) along any
path through the program. In the region in between the borrow
instruction and the [end_borrow](#end_borrow), the original SILValue can
not be modified. This means that:
1. If `%0` is an address, `%0` cannot be written to.
2. If `%0` is a non-trivial value, `%0` cannot be destroyed.
We require that `%1` and `%0` have the same type ignoring
SILValueCategory.
This instruction is only valid in functions in Ownership SSA form.
### borrowed from
```
sil-instruction ::= 'borrowed' sil-operand 'from' '(' (sil-operand (',' sil-operand)*)? ')'
bb1(%1 : @owned $T, %2 : @reborrow $T):
%3 = borrowed %2 : $T from (%1, %0)
// %0 is an enclosing value, defined in a block, which dominates bb1
// %3 has type $T and guaranteed ownership
```
Declares the set of enclosing values for a reborrow or forwarded guaranteed phi argument.
An enclosing value is either a dominating enclosing value (`%0`) or an adjacent
phi-argument in the same block (`%1`). In case of an adjacent phi, all
incoming values of the adjacent phi must be enclosing values for the
corresponding incoming value of the argument in all predecessor
blocks.
The borrowed operand (`%2`) must be a reborrow or forwarded guaranteed phi
argument and is forwarded to the instruction result.
The list of enclosing values (operands after `from`) can be empty if the
borrowed operand stems from a borrow introducer with no enclosing value,
e.g. a `load_borrow`.
Reborrow and forwarded guaranteed phi arguments must not have other users than
borrowed-from instructions.
This instruction is only valid in functions in Ownership SSA form.
### end_lifetime
```
sil-instruction ::= 'end_lifetime' sil-operand
// Consumes %0 without destroying it
end_lifetime %0 : $T
// Consumes the memory location %1 without destroying it
end_lifetime %1 : $*T
```
This instruction signifies the end of it's operand's lifetime to the
ownership verifier. It is inserted by the compiler in instances where it
could be illegal to insert a destroy operation. Example: if the operand
had an `undef` value.
The instruction accepts an object or address type.
If its argument is an address type, it's an
identity projection. This instruction is valid only in OSSA and is
lowered to a no-op when lowering to non-OSSA.
### extend_lifetime
```
sil-instruction ::= 'extend_lifetime' sil-operand
// Indicate that %0's linear lifetime extends to this point
extend_lifetime %0 : $X
```
Indicates that a value's linear lifetime extends to this point.
Inserted by OSSACompleteLifetime(AvailabilityBoundary) in order to
provide the invariant that a value is either consumed OR has an
`extend_lifetime` user on all paths and furthermore that all
uses are within the boundary defined by that set of instructions (the
consumes and the `extend_lifetime`s).
### assign
```
sil-instruction ::= 'assign' sil-value 'to' sil-operand
assign %0 to %1 : $*T
// $T must be a loadable type
```
Represents an abstract assignment of the value `%0` to memory at address
`%1` without specifying whether it is an initialization or a normal
store. The type of %1 is `*T` and the type of `%0` is `T`, which must be
a loadable type. This will overwrite the memory at `%1` and destroy the
value currently held there.
The purpose of the [assign](#assign) instruction is to simplify the
definitive initialization analysis on loadable variables by removing
what would otherwise appear to be a load and use of the current value.
It is produced by SILGen, which cannot know which assignments are meant
to be initializations. If it is deemed to be an initialization, it can
be replaced with a [store](#store); otherwise, it must be replaced with
a sequence that also correctly destroys the current value.
This instruction is only valid in Raw SIL and is rewritten as
appropriate by the definitive initialization pass.
### assign_or_init
```
sil-instruction ::= 'assign_or_init' mode? attached-property ',' self-or-local ',' sil-operand ',' 'value' ',' sil-operand ',' 'init' sil-operand ',' 'set' sil-operand
mode ::= '[init]' | '[assign]'
attached-property ::= '#' sil-decl-ref
self-or-local ::= 'self' | 'local'
// Nominal Context:
assign_or_init #MyStruct.x, self %A, value %V, init %I, set %S
// Local Context (only emitted with compiler synthesized thunks currently):
assign_or_init #x, local %L, value %V, init %I, set %S
```
Assigns or initializes a computed property with an attached init accessor.
This instruction is emitted during SILGen without an explicit mode.
The definitive initialization (DI) pass resolves the mode and rewrites
the instruction accordingly:
- `[init]`: In this mode, the init accessor `%I` is called with `%V`
as an argument.
- `[assign]`: In this mode, the setter function `%S` is called with `%V`
as an argument.
This instruction is only valid in Raw SIL and is rewritten as appropriate by
the DI pass.
Operand Roles:
- `attached-property`: The property being written to. For nominal contexts, this
refers to a property with an attached init accessor (e.g. `#MyStruct.x`). For local
contexts, it refers to a local variable name (e.g. `#x`).
- `self-or-local`:
- `self %A`: Refers to the instance of the type that owns the property with the
attached init accessor.
- `local %L`: Indicates the assignment is to a local variable (`%L`) rather than
a property of a nominal type. While init accessors are not currently available to be
used in local contexts in user-authored code, the compiler can synthesize an `assign_or_init`
in local contexts using an init accessor thunk in special cases.
- `value %V`: The input value passed to either the `init` or `set` function, depending on
the selected DI mode.
- `init %I`: A partially applied function implementing the property's init accessor.
- `set %S`: A partially applied function implementing the property's setter.
### mark_uninitialized
```
sil-instruction ::= 'mark_uninitialized' '[' mu_kind ']' sil-operand
mu_kind ::= 'var'
mu_kind ::= 'rootself'
mu_kind ::= 'crossmodulerootself'
mu_kind ::= 'derivedself'
mu_kind ::= 'derivedselfonly'
mu_kind ::= 'delegatingself'
mu_kind ::= 'delegatingselfallocated'
%2 = mark_uninitialized [var] %1 : $*T
// $T must be an address
```
Indicates that a symbolic memory location is uninitialized, and must be
explicitly initialized before it escapes or before the current function
returns. This instruction returns its operands, and all accesses within
the function must be performed against the return value of the
mark_uninitialized instruction.
The kind of mark_uninitialized instruction specifies the type of data
the mark_uninitialized instruction refers to:
- `var`: designates the start of a normal variable live range
- `rootself`: designates `self` in a struct, enum, or root class
- `crossmodulerootself`: same as `rootself`, but in a case where it's not really safe to treat `self` as a root because the original module might add more stored properties. This is only used for Swift 4 compatibility.
- `derivedself`: designates `self` in a derived (non-root) class
- `derivedselfonly`: designates `self` in a derived (non-root) class
whose stored properties have already been initialized
- `delegatingself`: designates `self` on a struct, enum, or class in a
delegating constructor (one that calls self.init)
- `delegatingselfallocated`: designates `self` on a class convenience
initializer's initializing entry point
- `out`: designates an indirectly returned result.
The purpose of the `mark_uninitialized` instruction is to enable
definitive initialization analysis.
It is produced by SILGen, and is only valid in Raw SIL. It is rewritten
as appropriate by the definitive initialization pass.
### mark_function_escape
```
sil-instruction ::= 'mark_function_escape' sil-operand (',' sil-operand)
mark_function_escape %1 : $*T
```
Indicates that a function definition closes over a symbolic memory
location. This instruction is variadic, and all of its operands must be
addresses.
The purpose of the `mark_function_escape` instruction is to enable
definitive initialization analysis for global variables and instance
variables, which are not represented as box allocations.
It is produced by SILGen, and is only valid in Raw SIL. It is rewritten
as appropriate by the definitive initialization pass.
### mark_uninitialized_behavior
```
init-case ::= sil-value sil-apply-substitution-list? '(' sil-value ')' ':' sil-type
set-case ::= sil-value sil-apply-substitution-list? '(' sil-value ')' ':' sil-type
sil-instruction ::= 'mark_uninitialized_behavior' init-case set-case
mark_uninitialized_behavior %init<Subs>(%storage) : $T -> U,
%set<Subs>(%self) : $V -> W
```
Indicates that a logical property is uninitialized at this point and
needs to be initialized by the end of the function and before any escape
point for this instruction. Assignments to the property trigger the
behavior's `init` or `set` logic based on the logical initialization
state of the property.
It is expected that the `init-case` is passed some sort of storage and
the `set` case is passed `self`.
This is only valid in Raw SIL.
### unchecked_ownership
```
sil-instruction ::= 'unchecked_ownership' sil-operand
unchecked_ownership %1 : $T
```
unchecked_ownership disables the ownership verification of it's operand. This used in cases
we cannot resolve ownership until a mandatory pass runs. This is only valid in Raw SIL.
### copy_addr
```
sil-instruction ::= 'copy_addr' '[take]'? sil-value
'to' '[init]'? sil-operand
copy_addr [take] %0 to [init] %1 : $*T
// %0 and %1 must be of the same $*T address type
```
Loads the value at address `%0` from memory and assigns a copy of it
back into memory at address `%1`. A bare `copy_addr` instruction when
`T` is a non-trivial type:
```
copy_addr %0 to %1 : $*T
```
is equivalent to:
```
%new = load %0 : $*T // Load the new value from the source
%old = load %1 : $*T // Load the old value from the destination
strong_retain %new : $T // Retain the new value
strong_release %old : $T // Release the old
store %new to %1 : $*T // Store the new value to the destination
```
except that `copy_addr` may be used even if `%0` is of an address-only
type. The `copy_addr` may be given one or both of the `[take]` or
`[init]` attributes:
- `[take]` destroys the value at the source address in the course of
the copy.
- `[init]` indicates that the destination address is uninitialized.
Without the attribute, the destination address is treated as already
initialized, and the existing value will be destroyed before the new
value is stored.
The three attributed forms thus behave like the following loadable type
operations:
```
// take-assignment
copy_addr [take] %0 to %1 : $*T
// is equivalent to:
%new = load %0 : $*T
%old = load %1 : $*T
// no retain of %new!
strong_release %old : $T
store %new to %1 : $*T
// copy-initialization
copy_addr %0 to [init] %1 : $*T
// is equivalent to:
%new = load %0 : $*T
strong_retain %new : $T
// no load/release of %old!
store %new to %1 : $*T
// take-initialization
copy_addr [take] %0 to [init] %1 : $*T
// is equivalent to:
%new = load %0 : $*T
// no retain of %new!
// no load/release of %old!
store %new to %1 : $*T
```
If `T` is a trivial type, then `copy_addr` is always equivalent to its
take-initialization form.
It is illegal in non-Raw SIL to apply `copy_addr [init]` to a value that
is move only.
### explicit_copy_addr
```
sil-instruction ::= 'explicit_copy_addr' '[take]'? sil-value
'to' '[init]'? sil-operand
explicit_copy_addr [take] %0 to [init] %1 : $*T
// %0 and %1 must be of the same $*T address type
```
This instruction is exactly the same as [copy_addr](#copy_addr) except
that it has special behavior for move only types. Specifically, an
`explicit_copy_addr` is viewed as a `copy_addr` that
is allowed on values that are move only. This is only used by a move
checker after it has emitted an error diagnostic to preserve the general
`copy_addr [init]` ban in Canonical SIL on move only types.
### destroy_addr
```
sil-instruction ::= 'destroy_addr' sil-operand
destroy_addr %0 : $*T
// %0 must be of an address $*T type
```
Destroys the value in memory at address `%0`. If `T` is a non-trivial
type, This is equivalent to:
```
%1 = load %0
strong_release %1
```
except that `destroy_addr` may be used even if `%0` is of an
address-only type. This does not deallocate memory; it only destroys the
pointed-to value, leaving the memory uninitialized.
If `T` is a trivial type, then `destroy_addr` can be safely eliminated.
However, a memory location `%a` must not be accessed after
`destroy_addr %a` (which has not yet been eliminated) regardless of its
type.
### tuple_addr_constructor
```
sil-instruction ::= 'tuple_addr_constructor' sil-tuple-addr-constructor-init sil-operand 'with' sil-tuple-addr-constructor-elements
sil-tuple-addr-constructor-init ::= init|assign
sil-tuple-addr-constructor-elements ::= '(' (sil-operand (',' sil-operand)*)? ')'
// %destAddr has the type $*(Type1, Type2, Type3). Note how we convert all of the types
// to their address form.
%1 = tuple_addr_constructor [init] %destAddr : $*(Type1, Type2, Type3) with (%a : $Type1, %b : $*Type2, %c : $Type3)
```
Creates a new tuple in memory from an exploded list of object and
address values. The SSA values form the leaf elements of the exploded
tuple. So for a simple tuple that only has top level tuple elements,
then the instruction lowers as follows:
```
%1 = tuple_addr_constructor [init] %destAddr : $*(Type1, Type2, Type3) with (%a : $Type1, %b : $*Type2, %c : $Type3)
```
-->
```
%0 = tuple_element_addr %destAddr : $*(Type1, Type2, Type3), 0
store %a to [init] %0 : $*Type1
%1 = tuple_element_addr %destAddr : $*(Type1, Type2, Type3), 1
copy_addr %b to [init] %1 : $*Type2
%2 = tuple_element_addr %destAddr : $*(Type1, Type2, Type3), 2
store %2 to [init] %2 : $*Type3
```
A `tuple_addr_constructor` is lowered similarly with each
`store`/`copy_addr` being changed to their dest assign form.
In contrast, if we have a more complicated form of tuple with
sub-tuples, then we read one element from the list as we process the
tuple recursively from left to right. So for instance we would lower as
follows a more complicated tuple:
```
%1 = tuple_addr_constructor [init] %destAddr : $*((), (Type1, ((), Type2)), Type3) with (%a : $Type1, %b : $*Type2, %c : $Type3)
```
->
```
%0 = tuple_element_addr %destAddr : $*((), (Type1, ((), Type2)), Type3), 1
%1 = tuple_element_addr %0 : $*(Type1, ((), Type2)), 0
store %a to [init] %1 : $*Type1
%2 = tuple_element_addr %0 : $*(Type1, ((), Type2)), 1
%3 = tuple_element_addr %2 : $*((), Type2), 1
copy_addr %b to [init] %3 : $*Type2
%4 = tuple_element_addr %destAddr : $*((), (Type1, ((), Type2)), Type3), 2
store %c to [init] %4 : $*Type3
```
This instruction exists to enable for SILGen to init and assign RValues
into tuples with a single instruction. Since an RValue is a potentially
exploded tuple, we are forced to use our representation here. If SILGen
instead just uses separate address projections and stores when it sees
such an aggregate, diagnostic SIL passes can not tell the difference
semantically in between initializing a tuple in parts or at once:
```
var arg = (Type1(), Type2())
// This looks the same at the SIL level...
arg = (a, b)
// to assigning in pieces even though we have formed a new tuple.
arg.0 = a
arg.1 = a
```
### index_addr
```
sil-instruction ::= 'index_addr' ('[' 'stack_protection' ']')? sil-operand ',' sil-operand
%2 = index_addr %0 : $*T, %1 : $Builtin.Int<n>
// %0 must be of an address type $*T
// %1 must be of a builtin integer type
// %2 will be of type $*T
```
Given an address that references into an array of values, returns the
address of the `%1`-th element relative to `%0`. The address must
reference into a contiguous array. It is undefined to try to reference
offsets within a non-array value, such as fields within a homogeneous
struct or tuple type, or bytes within a value, using `index_addr`.
(`Int8` address types have no special behavior in this regard, unlike
`char*` or `void*` in C.) It is also undefined behavior to index out of
bounds of an array, except to index the "past-the-end" address of the
array.
The `stack_protection` flag indicates that stack protection is done for
the pointer origin.
### tail_addr
```
sil-instruction ::= 'tail_addr' sil-operand ',' sil-operand ',' sil-type
%2 = tail_addr %0 : $*T, %1 : $Builtin.Int<n>, $E
// %0 must be of an address type $*T
// %1 must be of a builtin integer type
// %2 will be of type $*E
```
Given an address of an array of `%1` values, returns the address of an
element which is tail-allocated after the array. This instruction is
equivalent to `index_addr` except that the resulting address is
aligned-up to the tail-element type `$E`.
This instruction is used to project the N-th tail-allocated array from
an object which is created by an `alloc_ref` with multiple `tail_elems`.
The first operand is the address of an element of the (N-1)-th array,
usually the first element. The second operand is the number of elements
until the end of that array. The result is the address of the first
element of the N-th array.
It is undefined behavior if the provided address, count and type do not
match the actual layout of tail-allocated arrays of the underlying
object.
### index_raw_pointer
```
sil-instruction ::= 'index_raw_pointer' sil-operand ',' sil-operand
%2 = index_raw_pointer %0 : $Builtin.RawPointer, %1 : $Builtin.Int<n>
// %0 must be of $Builtin.RawPointer type
// %1 must be of a builtin integer type
// %2 will be of type $Builtin.RawPointer
```
Given a `Builtin.RawPointer` value `%0`, returns a pointer value at the
byte offset `%1` relative to `%0`.
### bind_memory
```
sil-instruction ::= 'bind_memory' sil-operand ',' sil-operand 'to' sil-type
%token = bind_memory %0 : $Builtin.RawPointer, %1 : $Builtin.Word to $T
// %0 must be of $Builtin.RawPointer type
// %1 must be of $Builtin.Word type
// %token is an opaque $Builtin.Word representing the previously bound types
// for this memory region.
```
Binds memory at `Builtin.RawPointer` value `%0` to type `$T` with enough
capacity to hold `%1` values. See SE-0107: UnsafeRawPointer.
Produces a opaque token representing the previous memory state for
memory binding semantics. This abstract state includes the type that the
memory was previously bound to along with the size of the affected
memory region, which can be derived from `%1`. The token cannot, for
example, be used to retrieve a metatype. It only serves a purpose when
used by `rebind_memory`, which has no static type information. The token
dynamically passes type information from the first bind_memory into a
chain of rebind_memory operations.
Example:
```
%_ = bind_memory %0 : $Builtin.RawPointer, %numT : $Builtin.Word to $T // holds type 'T'
%token0 = bind_memory %0 : $Builtin.RawPointer, %numU : $Builtin.Word to $U // holds type 'U'
%token1 = rebind_memory %0 : $Builtin.RawPointer, %token0 : $Builtin.Word // holds type 'T'
%token2 = rebind_memory %0 : $Builtin.RawPointer, %token1 : $Builtin.Word // holds type 'U'
```
### rebind_memory
```
sil-instruction ::= 'rebind_memory' sil-operand ' 'to' sil-value
%out_token = rebind_memory %0 : $Builtin.RawPointer to %in_token
// %0 must be of $Builtin.RawPointer type
// %in_token represents a cached set of bound types from a prior memory state.
// %out_token is an opaque $Builtin.Word representing the previously bound
// types for this memory region.
```
This instruction's semantics are identical to `bind_memory`, except
that the types to which memory will be bound, and the extent of the
memory region is unknown at compile time. Instead, the bound-types are
represented by a token that was produced by a prior memory binding
operation. `%in_token` must be the result of `bind_memory` or
`rebind_memory`.
### begin_access
```
sil-instruction ::= 'begin_access' '[' sil-access ']' '[' sil-enforcement ']' '[no_nested_conflict]'? '[builtin]'? sil-operand ':' sil-type
sil-access ::= init
sil-access ::= read
sil-access ::= modify
sil-access ::= deinit
sil-enforcement ::= unknown
sil-enforcement ::= static
sil-enforcement ::= dynamic
sil-enforcement ::= unsafe
sil-enforcement ::= signed
%1 = begin_access [read] [unknown] %0 : $*T
// %0 must be of $*T type.
```
Begins an access to the target memory.
The operand must be a *root address derivation*:
- a function argument,
- an `alloc_stack` instruction,
- a `project_box` instruction,
- a `global_addr` instruction,
- a `ref_element_addr` instruction, or
- another `begin_access` instruction.
It will eventually become a basic structural rule of SIL that no memory
access instructions can be directly applied to the result of one of
these instructions; they can only be applied to the result of a
`begin_access` on them. For now, this rule will be conditional based on
compiler settings and the SIL stage.
An access is ended with a corresponding `end_access`. Accesses must be
uniquely ended on every control flow path which leads to either a
function exit or back to the `begin_access` instruction. The set of
active accesses must be the same on every edge into a basic block.
An `init` access takes uninitialized memory and initializes it. It must
always use `static` enforcement.
An `deinit` access takes initialized memory and leaves it uninitialized.
It must always use `static` enforcement.
`read` and `modify` accesses take initialized memory and leave it
initialized. They may use `unknown` enforcement only in the `raw` SIL
stage.
A `no_nested_conflict` access has no potentially conflicting access
within its scope (on any control flow path between it and its
corresponding `end_access`). Consequently, the access will not need to
be tracked by the runtime for the duration of its scope. This access may
still conflict with an outer access scope; therefore may still require
dynamic enforcement at a single point.
A `signed` access is for pointers that are signed in architectures that
support pointer signing.
A `builtin` access was emitted for a user-controlled Builtin (e.g. the
standard library's KeyPath access). Non-builtin accesses are
auto-generated by the compiler to enforce formal access that derives
from the language. A `builtin` access is always fully enforced
regardless of the compilation mode because it may be used to enforce
access outside of the current module.
### end_access
```
sil-instruction ::= 'end_access' ( '[' 'abort' ']' )? sil-operand
```
Ends an access. The operand must be a `begin_access` instruction.
If the `begin_access` is `init` or `deinit`, the `end_access` may be an
`abort`, indicating that the described transition did not in fact take
place.
### begin_unpaired_access
```
sil-instruction ::= 'begin_unpaired_access' '[' sil-access ']' '[' sil-enforcement ']' '[no_nested_conflict]'? '[builtin]'? sil-operand : sil-type, sil-operand : $*Builtin.UnsafeValueBuffer
sil-access ::= init
sil-access ::= read
sil-access ::= modify
sil-access ::= deinit
sil-enforcement ::= unknown
sil-enforcement ::= static
sil-enforcement ::= dynamic
sil-enforcement ::= unsafe
%2 = begin_unpaired_access [read] [dynamic] %0 : $*T, %1 : $*Builtin.UnsafeValueBuffer
// %0 must be of $*T type.
```
Begins an access to the target memory. This has the same semantics and
obeys all the same constraints as `begin_access`. With the following
exceptions:
- `begin_unpaired_access` has an additional operand for the scratch
buffer used to uniquely identify this access within its scope.
- An access initiated by `begin_unpaired_access` must end with
`end_unpaired_access` unless it has the `no_nested_conflict` flag. A
`begin_unpaired_access` with `no_nested_conflict` is effectively an
instantaneous access with no associated scope.
- The associated `end_unpaired_access` must use the same scratch
buffer.
### end_unpaired_access
```
sil-instruction ::= 'end_unpaired_access' ( '[' 'abort' ']' )? '[' sil-enforcement ']' sil-operand : $*Builtin.UnsafeValueBuffer
sil-enforcement ::= unknown
sil-enforcement ::= static
sil-enforcement ::= dynamic
sil-enforcement ::= unsafe
end_unpaired_access [dynamic] %0 : $*Builtin.UnsafeValueBuffer
```
Ends an access. This has the same semantics and constraints as
`end_access` with the following exceptions:
- The single operand refers to the scratch buffer that uniquely
identified the access with this scope.
- The enforcement level is reiterated, since the corresponding
`begin_unpaired_access` may not be statically discoverable. It must
be identical to the `begin_unpaired_access` enforcement.
## Reference Counting
These instructions handle reference counting of heap objects. The _retain_ and
_release_ family of instructions are only available in non-OSSA. They are
lowered from OSSA's _copy and _destroy_ operations.
After lowering OSSA, retain and release operations, are never implicit in
SIL and always must be explicitly performed where needed. Retains and
releases on the value may be freely moved, and balancing retains and
releases may be deleted, so long as an owning retain count is maintained
for the uses of the value.
All reference-counting operations are defined to work correctly on null
references (whether strong, unowned, or weak). A non-null reference must
actually refer to a valid object of the indicated type (or a subtype).
Address operands are required to be valid and non-null.
### strong_retain
```
sil-instruction ::= 'strong_retain' sil-operand
strong_retain %0 : $T
// $T must be a reference type
```
Increases the strong retain count of the heap object referenced by `%0`.
This instruction is _not_ available in OSSA.
### strong_release
```
strong_release %0 : $T
// $T must be a reference type.
```
Decrements the strong reference count of the heap object referenced by
`%0`. If the release operation brings the strong reference count of the
object to zero, the object is destroyed and `@weak` references are
cleared. When both its strong and unowned reference counts reach zero,
the object's memory is deallocated.
This instruction is _not_ available in OSSA.
### begin_dealloc_ref
```
%2 = begin_dealloc_ref %0 : $T of %1 : $V
// $T and $V must be reference types where $T is or is derived from $V
// %1 must be an alloc_ref or alloc_ref_dynamic instruction
```
Explicitly sets the state of the object referenced by `%0` to
deallocated. This is the same operation what's done by a strong_release
immediately before it calls the deallocator of the object.
It is expected that the strong reference count of the object is one.
Furthermore, no other thread may increment the strong reference count
during execution of this instruction.
Marks the beginning of a de-virtualized destructor of a class. Returns
the reference operand. Technically, the returned reference is the same
as the operand. But it's important that optimizations see the result as
a different SSA value than the operand. This is important to ensure the
correctness of `ref_element_addr [immutable]` for let-fields, because in
the destructor of a class its let-fields are not immutable anymore.
The first operand `%0` must be physically the same reference as the
second operand `%1`. The second operand has no ownership or code
generation implications and it's purpose is purly to enforce that the
object allocation is present in the same function and trivially visible
from the `begin_dealloc_ref` instruction.
### end_init_let_ref
```
%1 = end_init_let_ref %0 : $T
// $T must be a reference type.
```
Marks the point where all let-fields of a class are initialized.
Returns the reference operand. Technically, the returned reference is
the same as the operand. But it's important that optimizations see the
result as a different SSA value than the operand. This is important to
ensure the correctness of `ref_element_addr [immutable]` for let-fields,
because in the initializer of a class, its let-fields are not immutable,
yet.
### strong_copy_unowned_value
```
sil-instruction ::= 'strong_copy_unowned_value' sil-operand
%1 = strong_copy_unowned_value %0 : $@unowned T
// %1 will be a strong @owned value of type $T.
// $T must be a reference type
```
Asserts that the strong reference count of the heap object referenced by
`%0` is still positive, then increments the reference count and returns
a new strong reference to `%0`. The intention is that this instruction
is used as a "safe ownership conversion" from `unowned` to `strong`.
### strong_retain_unowned
```
sil-instruction ::= 'strong_retain_unowned' sil-operand
strong_retain_unowned %0 : $@unowned T
// $T must be a reference type
```
Asserts that the strong reference count of the heap object referenced by
`%0` is still positive, then increases it by one.
This instruction is _not_ available in OSSA.
### unowned_retain
```
sil-instruction ::= 'unowned_retain' sil-operand
unowned_retain %0 : $@unowned T
// $T must be a reference type
```
Increments the unowned reference count of the heap object underlying
`%0`.
This instruction is _not_ available in OSSA.
### unowned_release
```
sil-instruction ::= 'unowned_release' sil-operand
unowned_release %0 : $@unowned T
// $T must be a reference type
```
Decrements the unowned reference count of the heap object referenced by
`%0`. When both its strong and unowned reference counts reach zero, the
object's memory is deallocated.
This instruction is _not_ available in OSSA.
### load_weak
```
sil-instruction ::= 'load_weak' '[take]'? sil-operand
load_weak [take] %0 : $*@sil_weak Optional<T>
// $T must be an optional wrapping a reference type
```
Increments the strong reference count of the heap object held in the
operand, which must be an initialized weak reference. The result is
value of type `$Optional<T>`, except that it is `null` if the heap
object has begun deallocation.
If `[take]` is specified then the underlying weak reference is
invalidated implying that the weak reference count of the loaded value
is decremented. If `[take]` is not specified then the underlying weak
reference count is not affected by this operation (i.e. it is a +0 weak
ref count operation). In either case, the strong reference count will be
incremented before any changes to the weak reference count.
This operation must be atomic with respect to the final `strong_release`
on the operand heap object. It need not be atomic with respect to
`store_weak`/`weak_copy_value` or `load_weak`/`strong_copy_weak_value`
operations on the same address.
### strong_copy_weak_value
```
sil-instruction ::= 'strong_copy_weak_value' sil-operand
%1 = strong_copy_weak_value %0 : $@sil_weak Optional<T>
// %1 will be a strong @owned value of type $Optional<T>.
// $T must be a reference type
// $@sil_weak Optional<T> must be address-only
```
Only valid in opaque values mode. Lowered by AddressLowering to
load_weak.
If the heap object referenced by `%0` has not begun deallocation,
increments its strong reference count and produces the value
`Optional.some` holding the object. Otherwise, produces the value
`Optional.none`.
This operation must be atomic with respect to the final `strong_release`
on the operand heap object. It need not be atomic with respect to
`store_weak`/`weak_copy_value` or `load_weak`/`strong_copy_weak_value`
operations on the same address.
### store_weak
```
sil-instruction ::= 'store_weak' sil-value 'to' '[init]'? sil-operand
store_weak %0 to [init] %1 : $*@sil_weak Optional<T>
// $T must be an optional wrapping a reference type
```
Initializes or reassigns a weak reference. The operand may be `nil`.
If `[init]` is given, the weak reference must currently either be
uninitialized or destroyed. If it is not given, the weak reference must
currently be initialized. After the evaluation:
- The value that was originally referenced by the weak reference will
have its weak reference count decremented by 1.
- If the optionally typed operand is non-nil, the strong reference
wrapped in the optional has its weak reference count incremented
by 1. In contrast, the reference's strong reference count is not
touched.
This operation must be atomic with respect to the final `strong_release`
on the operand (source) heap object. It need not be atomic with respect
to `store_weak`/`weak_copy_value` or
`load_weak`/`strong_copy_weak_value` operations on the same address.
### weak_copy_value
```
sil-instruction ::= 'weak_copy_value' sil-operand
%1 = weak_copy_value %0 : $Optional<T>
// %1 will be an @owned value of type $@sil_weak Optional<T>.
// $T must be a reference type
// $@sil_weak Optional<T> must be address-only
```
Only valid in opaque values mode. Lowered by AddressLowering to
store_weak.
If `%0` is non-nil, produces the value `@sil_weak Optional.some` holding
the object and increments the weak reference count by 1. Otherwise,
produces the value `Optional.none` wrapped in a `@sil_weak` box.
This operation must be atomic with respect to the final `strong_release`
on the operand (source) heap object. It need not be atomic with respect
to `store_weak`/`weak_copy_value` or
`load_weak`/`strong_copy_weak_value` operations on the same address.
### load_unowned
```
sil-instruction ::= 'load_unowned' '[take]'? sil-operand
%1 = load_unowned [take] %0 : $*@sil_unowned T
// T must be a reference type
```
Increments the strong reference count of the object stored at `%0`.
Decrements the unowned reference count of the object stored at `%0` if
`[take]` is specified. Additionally, the storage is invalidated.
Requires that the strong reference count of the heap object stored at
`%0` is positive. Otherwise, traps.
This operation must be atomic with respect to the final `strong_release`
on the operand (source) heap object. It need not be atomic with respect
to `store_unowned`/`unowned_copy_value` or
`load_unowned`/`strong_copy_unowned_value` operations on the same
address.
### store_unowned
```
sil-instruction ::= 'store_unowned' sil-value 'to' '[init]'? sil-operand
store_unowned %0 to [init] %1 : $*@sil_unowned T
// T must be a reference type
```
Increments the unowned reference count of the object at `%0`.
Decrements the unowned reference count of the object previously stored
at `%1` if `[init]` is not specified.
The storage must be initialized iff `[init]` is not specified.
This operation must be atomic with respect to the final `strong_release`
on the operand (source) heap object. It need not be atomic with respect
to `store_unowned`/`unowned_copy_value` or
`load_unowned`/`strong_copy_unowned_value` operations on the same
address.
### unowned_copy_value
```
sil-instruction ::= 'unowned_copy_value' sil-operand
%1 = unowned_copy_value %0 : $T
// %1 will be an @owned value of type $@sil_unowned T.
// $T must be a reference type
// $@sil_unowned T must be address-only
```
Only valid in opaque values mode. Lowered by AddressLowering to
store_unowned.
Increments the unowned reference count of the object at `%0`.
Wraps the operand in an instance of `@sil_unowned`.
This operation must be atomic with respect to the final `strong_release`
on the operand (source) heap object. It need not be atomic with respect
to `store_unowned`/`unowned_copy_value` or
`load_unowned`/`strong_copy_unowned_value` operations on the same
address.
### fix_lifetime
```
sil-instruction :: 'fix_lifetime' sil-operand
fix_lifetime %0 : $T
// Fix the lifetime of a value %0
fix_lifetime %1 : $*T
// Fix the lifetime of the memory object referenced by %1
```
Acts as a use of a value operand, or of the value in memory referenced
by an address operand. Optimizations may not move operations that would
destroy the value, such as `release_value`, `strong_release`,
`copy_addr [take]`, or `destroy_addr`, past this instruction.
### mark_dependence
```
sil-instruction :: 'mark_dependence' mark-dep-option? sil-operand 'on' sil-operand
mark-dep-option ::= '[nonescaping]'
mark-dep-option ::= '[unresolved]'
%2 = mark_dependence %value : $*T on %base : $Builtin.NativeObject
```
`%base` must not be identical to `%value`.
The value of the result depends on the value of `%base`.
Operations that would destroy `%base` must not be moved before any
instructions that depend on the result of this instruction, exactly as
if the address had been directly derived from that operand (e.g. using
`ref_element_addr`).
The result is the forwarded value of `%value`. If `%value` is an
address, then result is also an address, and the semantics are the
same as the non-address form: the dependency is on any value derived
from the resulting address. The value could also be a
Builtin.RawPointer or a struct containing the same, in which case,
pointed-to values have a dependency if they are derived from this
instruction's result. Note that in-memory values are only dependent on
base if they are derived from this instruction's result. In this
example, the load of `%dependent_value` depends on `%base`, but the
load of `%independent_value` does not:
```
%dependent_address = mark_dependence %original_address on %base
%dependent_value = load [copy] %dependent_address
%independent_value = load %original_address
destroy_value %base
```
`%base` may have either object or address type. If it is an address,
then the dependency is on the current value stored at the address.
The optional `nonescaping` attribute indicates that the lifetime
guarantee is statically verifiable via a def-use walk starting at this
instruction's result. No value derived from a nonescaping
`mark_dependence` may have a bitwise escape (conversion to
UnsafePointer) or pointer escape (unknown use). The `unresolved`
attribute indicates that this verification is required but has not yet
been diagnosed.
`mark_dependence` may only have a non-`Escapable` result if it also
has a `nonescaping` or `unresolved` attribute. A non-`Escapable`
`mark_dependence` extends the lifetime of `%base` through copies of
`%value` and values transitively forwarded from those copies. If
`%value` is an address, then that includes loads from the
address. None of those values may be used by a bitwise escape
(conversion to UnsafePointer) or pointer escape (unknown use). In this
example, the apply depends on `%base` because `%value` has a
non-`Escapable` type:
```
%dependent_address = mark_dependence [nonescaping] %value : %*NonescapableType on %base
%dependent_value = load %dependent_address
%copied_value = copy_value %dependent_value
apply %f(%dependent_value)
destroy_value %base
```
### mark_dependence_addr
```
sil-instruction :: 'mark_dependence_addr' mark-dep-option? sil-operand 'on' sil-operand
mark-dep-option ::= '[nonescaping]'
mark-dep-option ::= '[unresolved]'
mark_dependence_addr [nonescaping] %address : $*T on %base : $Builtin.NativeObject
```
The in-memory value at `%address` depends on the value of `%base`.
Operations that would destroy `%base` must not be moved before any
instructions that depend on that value, exactly as if the location at
`%address` aliases `%base` on all paths reachable from this instruction.
In this example, the load of `%dependent_value` depends on `%base`:
```
mark_dependence_addr %address on %base
%dependent_value = load [copy] %address
destroy_value %base
```
`%base` may have either object or address type. If it is an address,
then the dependency is on the current value stored at the address.
The optional `nonescaping` attribute indicates that the lifetime
guarantee is statically verifiable via a data flow over all paths
reachable from this instruction considering all addresses that may
alias with `%address`. No aliasing address may be used by a bitwise
escape (conversion to UnsafePointer) or pointer escape (unknown
use). The `unresolved` attribute indicates that this verification is
required but has not yet been diagnosed.
`mark_dependence_addr` may only have a non-`Escapable` `%address` if
it also has a `nonescaping` or `unresolved` attribute. A
non-`Escapable` `mark_dependence_addr` extends the lifetime of `%base`
through values loaded from the memory location at `%address` and
through any transitively forwarded or copied values. None of those
values may be used by a bitwise escape (conversion to UnsafePointer)
or pointer escape (unknown use). In this example, the apply depends on
`%base` because `%address` has a non-`Escapable` type:
```
mark_dependence_addr [nonescaping] %address : %*NonescapableType on %base
%dependent_value = load %address
%copied_value = copy_value %dependent_value
apply %f(%dependent_value)
destroy_value %base
```
### is_unique
```
sil-instruction ::= 'is_unique' sil-operand
%1 = is_unique %0 : $*T
// $T must be a reference-counted type
// %1 will be of type Builtin.Int1
```
Checks whether %0 is the address of a unique reference to a memory object.
Returns 1 if the strong reference count is 1, and 0 if the strong reference
count is greater than 1.
A discussion of the semantics can be found in the [ARC Optimization](ARC-Optimization.md) document.
### begin_cow_mutation
```
sil-instruction ::= 'begin_cow_mutation' '[native]'? sil-operand
(%1, %2) = begin_cow_mutation %0 : $C
// $C must be a reference-counted type
// %1 will be of type Builtin.Int1
// %2 will be of type C
```
Checks whether `%0` is a unique reference to a memory object. Returns 1 in
the first result if the strong reference count is 1, and 0 if the strong
reference count is greater than 1.
Returns the reference operand in the second result. The returned
reference can be used to mutate the object. Technically, the returned
reference is the same as the operand. But it's important that
optimizations see the result as a different SSA value than the operand.
This is important to ensure the correctness of
`ref_element_addr [immutable]`.
The operand is consumed and the second result is returned as owned.
The optional `native` attribute specifies that the operand has native
Swift reference counting.
For details see [Copy-on-Write Representation](SIL.md#Copy-on-Write-Representation).
### end_cow_mutation
```
sil-instruction ::= 'end_cow_mutation' '[keep_unique]'? sil-operand
%1 = end_cow_mutation %0 : $C
// $C must be a reference-counted type
// %1 will be of type C
```
Marks the end of the mutation of a reference counted object. Returns the
reference operand. Technically, the returned reference is the same as
the operand. But it's important that optimizations see the result as a
different SSA value than the operand. This is important to ensure the
correctness of `ref_element_addr [immutable]`.
The operand is consumed and the result is returned as owned. The result
is guaranteed to be uniquely referenced.
The optional `keep_unique` attribute indicates that the optimizer must
not replace this reference with a not uniquely reference object.
For details see [Copy-on-Write Representation](SIL.md#Copy-on-Write-Representation).
### end_cow_mutation_addr
```
sil-instruction ::= 'end_cow_mutation_addr' sil-operand
end_cow_mutation_addr %0 : $*T
// %0 must be of an address $*T type
```
This instruction marks the end of mutation of an address. The address could be
an opaque archetype, a struct, tuple or enum type and the end_cow_mutation_addr
will apply to all members contained within it.
It is currently only generated in cases where we maybe deriving a MutableSpan from
`%0` since it is not possible to schedule an `end_cow_mutation` in the standard
library automatically for Array.mutableSpan etc.
### destroy_not_escaped_closure
```
sil-instruction ::= 'destroy_not_escaped_closure' sil-operand
%1 = destroy_not_escaped_closure %0 : $@callee_guaranteed () -> ()
// %0 must be an escaping swift closure.
// %1 will be of type Builtin.Int1
```
Checks if the closure context escaped and then destroys the context.
The escape-check is done by checking if its reference count is exactly 1.
Returns true if it is.
### copy_block
```
sil-instruction :: 'copy_block' sil-operand
%1 = copy_block %0 : $@convention(block) T -> U
```
Performs a copy of an Objective-C block. Unlike retains of other
reference-counted types, this can produce a different value from the
operand if the block is copied from the stack to the heap.
### copy_block_without_escaping
```
sil-instruction :: 'copy_block_without_escaping' sil-operand 'withoutEscaping' sil-operand
%1 = copy_block %0 : $@convention(block) T -> U withoutEscaping %1 : $T -> U
```
Performs a copy of an Objective-C block. Unlike retains of other
reference-counted types, this can produce a different value from the
operand if the block is copied from the stack to the heap.
Additionally, consumes the `withoutEscaping` operand `%1` which is the
closure sentinel. SILGen emits these instructions when it passes
@noescape swift closures to Objective C. A mandatory SIL pass will
lower this instruction into a `copy_block` and a
`is_escaping`/`cond_fail`/`destroy_value` at the end of the lifetime of
the objective c closure parameter to check whether the sentinel closure
was escaped.
## Literals
These instructions bind SIL values to literal constants or to global
entities.
### function_ref
```
sil-instruction ::= 'function_ref' sil-function-name ':' sil-type
%1 = function_ref @function : $@convention(thin) T -> U
// $@convention(thin) T -> U must be a thin function type
// %1 has type $T -> U
```
Creates a reference to a SIL function.
### dynamic_function_ref
```
sil-instruction ::= 'dynamic_function_ref' sil-function-name ':' sil-type
%1 = dynamic_function_ref @function : $@convention(thin) T -> U
// $@convention(thin) T -> U must be a thin function type
// %1 has type $T -> U
```
Creates a reference to a `dynamically_replacable` SIL
function. A `dynamically_replacable` SIL function can be
replaced at runtime.
For the following Swift code:
```
dynamic func test_dynamically_replaceable() {}
func test_dynamic_call() {
test_dynamically_replaceable()
}
```
We will generate:
```
sil [dynamically_replacable] @test_dynamically_replaceable : $@convention(thin) () -> () {
bb0:
%0 = tuple ()
return %0 : $()
}
sil @test_dynamic_call : $@convention(thin) () -> () {
bb0:
%0 = dynamic_function_ref @test_dynamically_replaceable : $@convention(thin) () -> ()
%1 = apply %0() : $@convention(thin) () -> ()
%2 = tuple ()
return %2 : $()
}
```
### prev_dynamic_function_ref
```
sil-instruction ::= 'prev_dynamic_function_ref' sil-function-name ':' sil-type
%1 = prev_dynamic_function_ref @function : $@convention(thin) T -> U
// $@convention(thin) T -> U must be a thin function type
// %1 has type $T -> U
```
Creates a reference to a previous implementation of a
`dynamic_replacement` SIL function.
For the following Swift code:
```
@_dynamicReplacement(for: test_dynamically_replaceable())
func test_replacement() {
test_dynamically_replaceable() // calls previous implementation
}
```
We will generate:
```
sil [dynamic_replacement_for "test_dynamically_replaceable"] @test_replacement : $@convention(thin) () -> () {
bb0:
%0 = prev_dynamic_function_ref @test_replacement : $@convention(thin) () -> ()
%1 = apply %0() : $@convention(thin) () -> ()
%2 = tuple ()
return %2 : $()
}
```
### global_addr
```
sil-instruction ::= 'global_addr' sil-global-name ':' sil-type ('depends_on' sil-operand)?
%1 = global_addr @foo : $*Builtin.Word
%3 = global_addr @globalvar : $*Builtin.Word depends_on %2
// %2 has type $Builtin.SILToken
```
Creates a reference to the address of a global variable which has been
previously initialized by `alloc_global`. It is undefined behavior to
perform this operation on a global variable which has not been
initialized, except the global variable has a static initializer.
Optionally, the dependency to the initialization of the global can be
specified with a dependency token `depends_on <token>`. This is usually
a `builtin "once"` which calls the initializer for the global variable.
### global_value
```
sil-instruction ::= 'global_value' ('[' 'bare' ']')? sil-global-name ':' sil-type
%1 = global_value @v : $T
```
Returns the value of a global variable which has been previously
initialized by `alloc_global`. It is undefined behavior to perform this
operation on a global variable which has not been initialized, except
the global variable has a static initializer.
The `bare` attribute indicates that the object header is not used
throughout the lifetime of the value. This means, no reference counting
operations are performed on the object and its metadata is not used. The
header of bare objects doesn't need to be initialized.
### integer_literal
```
sil-instruction ::= 'integer_literal' sil-type ',' int-literal
%1 = integer_literal $Builtin.Int<n>, 123
// $Builtin.Int<n> must be a builtin integer type
// %1 has type $Builtin.Int<n>
```
Creates an integer literal value. The result will be of type
`Builtin.Int<n>`, which must be a builtin integer type. The literal
value is specified using Swift's integer literal syntax.
### float_literal
```
sil-instruction ::= 'float_literal' sil-type ',' int-literal
%1 = float_literal $Builtin.FP<n>, 0x3F800000
// $Builtin.FP<n> must be a builtin floating-point type
// %1 has type $Builtin.FP<n>
```
Creates a floating-point literal value. The result will be of type
`Builtin.FP<n>`, which must be a builtin floating-point type. The
literal value is specified as the bitwise representation of the floating
point value, using Swift's hexadecimal integer literal syntax.
### string_literal
```
sil-instruction ::= 'string_literal' encoding string-literal
encoding ::= 'utf8'
encoding ::= 'utf16'
encoding ::= 'objc_selector'
%1 = string_literal "asdf"
// %1 has type $Builtin.RawPointer
```
Creates a reference to a string in the global string table. The result
is a pointer to the data. The referenced string is always
null-terminated. The string literal value is specified using Swift's
string literal syntax (though `()` interpolations are not allowed).
When the encoding is `objc_selector`, the string literal produces a
reference to a UTF-8-encoded Objective-C selector in the Objective-C
method name segment.
### base_addr_for_offset
```
sil-instruction ::= 'base_addr_for_offset' sil-type
%1 = base_addr_for_offset $*S
// %1 has type $*S
```
Creates a base address for offset calculations. The result can be used
by address projections, like `struct_element_addr`, which themselves
return the offset of the projected fields. IR generation simply creates
a null pointer for `base_addr_for_offset`.
## Dynamic Dispatch
These instructions perform dynamic lookup of class and generic methods.
The `class_method` and `super_method` instructions must reference Swift
native methods and always use vtable dispatch.
The `objc_method` and `objc_super_method` instructions must reference
Objective-C methods (indicated by the `foreign` marker on a method
reference, as in `#NSObject.description!foreign`).
Note that `objc_msgSend` invocations can only be used as the callee of
an `apply` instruction or `partial_apply` instruction. They cannot be
stored or used as `apply` or `partial_apply` arguments.
### class_method
```
sil-instruction ::= 'class_method' sil-method-attributes?
sil-operand ',' sil-decl-ref ':' sil-type
%1 = class_method %0 : $T, #T.method : $@convention(class_method) U -> V
// %0 must be of a class type or class metatype $T
// #T.method must be a reference to a Swift native method of T or
// of one of its superclasses
// %1 will be of type $U -> V
```
Looks up a method based on the dynamic type of a class or class metatype
instance. It is undefined behavior if the class value is null.
If the static type of the class instance is known, or the method is
known to be final, then the instruction is a candidate for
devirtualization optimization. A devirtualization pass can consult the
module's [VTables](SIL.md#vtables) to find the SIL function that implements
the method and promote the instruction to a static
[function_ref](#function_ref).
### objc_method
```
sil-instruction ::= 'objc_method' sil-method-attributes?
sil-operand ',' sil-decl-ref ':' sil-type
%1 = objc_method %0 : $T, #T.method!foreign : $@convention(objc_method) U -> V
// %0 must be of a class type or class metatype $T
// #T.method must be a reference to an Objective-C method of T or
// of one of its superclasses
// %1 will be of type $U -> V
```
Performs Objective-C method dispatch using `objc_msgSend()`.
Objective-C method calls are never candidates for de-virtualization.
### super_method
```
sil-instruction ::= 'super_method' sil-method-attributes?
sil-operand ',' sil-decl-ref ':' sil-type
%1 = super_method %0 : $T, #Super.method : $@convention(thin) U -> V
// %0 must be of a non-root class type or class metatype $T
// #Super.method must be a reference to a native Swift method of T's
// superclass or of one of its ancestor classes
// %1 will be of type $@convention(thin) U -> V
```
Looks up a method in the superclass of a class or class metatype
instance.
### objc_super_method
```
sil-instruction ::= 'super_method' sil-method-attributes?
sil-operand ',' sil-decl-ref ':' sil-type
%1 = super_method %0 : $T, #Super.method!foreign : $@convention(thin) U -> V
// %0 must be of a non-root class type or class metatype $T
// #Super.method!foreign must be a reference to an ObjC method of T's
// superclass or of one of its ancestor classes
// %1 will be of type $@convention(thin) U -> V
```
This instruction performs an Objective-C message send using
`objc_msgSuper()`.
### witness_method
```
sil-instruction ::= 'witness_method' sil-method-attributes?
sil-type ',' sil-decl-ref ':' sil-type
%1 = witness_method $T, #Proto.method
: $@convention(witness_method) <Self: Proto> U -> V
// $T must be an archetype
// #Proto.method must be a reference to a method of one of the protocol
// constraints on T
// <Self: Proto> U -> V must be the type of the referenced method,
// generic on Self
// %1 will be of type $@convention(thin) <Self: Proto> U -> V
```
Looks up the implementation of a protocol method for a generic type
variable constrained by that protocol. The result will be generic on the
`Self` archetype of the original protocol and have the `witness_method`
calling convention. If the referenced protocol is an `@objc` protocol,
the resulting type has the `objc` calling convention.
## Function Application
These instructions call functions or wrap them in partial application or
specialization thunks.
In the following we allow for [apply](#apply),
[begin_apply](#begin_apply), and [try_apply](#try_apply) to have a
callee or caller actor isolation attached to them:
```
sil-actor-isolation ::= unspecified
::= actor_instance
::= nonisolated
::= nonisolated_unsafe
::= global_actor
::= global_actor_unsafe
sil-actor-isolation-callee ::= [callee_isolation=sil-actor-isolation]
sil-actor-isolation-caller ::= [caller_isolation=sil-actor-isolation]
```
These can be used to write test cases with actor isolation using these
instructions and is not intended to be used in SILGen today.
### apply
```
sil-instruction ::= 'apply' '[nothrow]'? sil-actor-isolation-callee?
sil-actor-isolation-caller? sil-value
sil-apply-substitution-list?
'(' (sil-value (',' sil-value)*)? ')'
':' sil-type
sil-apply-substitution-list ::= '<' sil-substitution
(',' sil-substitution)* '>'
sil-substitution ::= type '=' type
%r = apply %0(%1, %2, ...) : $(A, B, ...) -> R
// Note that the type of the callee '%0' is specified *after* the arguments
// %0 must be of a concrete function type $(A, B, ...) -> R
// %1, %2, etc. must be of the argument types $A, $B, etc.
// %r will be of the return type $R
%r = apply %0<A, B>(%1, %2, ...) : $<T, U>(T, U, ...) -> R
// %0 must be of a polymorphic function type $<T, U>(T, U, ...) -> R
// %1, %2, etc. must be of the argument types after substitution $A, $B, etc.
// %r will be of the substituted return type $R'
```
Transfers control to function `%0`, passing it the given arguments. In
the instruction syntax, the type of the callee is specified after the
argument list; the types of the argument and of the defined value are
derived from the function type of the callee. The input argument tuple
type is destructured, and each element is passed as an individual
argument. The `apply` instruction does no retaining or releasing of its
arguments by itself; the [calling convention](#calling-convention)'s
retain/release policy must be handled by separate explicit `retain` and
`release` instructions. The return value will likewise not be implicitly
retained or released.
The callee value must have function type. That function type may not
have an error result, except the instruction has the `nothrow` attribute
set. The `nothrow` attribute specifies that the callee has an error
result but does not actually throw. For the regular case of calling a
function with error result, use `try_apply`.
NB: If the callee value is of a thick function type, `apply` currently
consumes the callee value at +1 strong retain count.
If the callee is generic, all of its generic parameters must be bound by
the given substitution list. The arguments and return value is given
with these generic substitutions applied.
### begin_apply
```
sil-instruction ::= 'begin_apply' '[nothrow]'? sil-value
sil-apply-substitution-list?
'(' (sil-value (',' sil-value)*)? ')'
':' sil-type
(%anyAddr, %float, %token) = begin_apply %0() : $@yield_once () -> (@yields @inout %Any, @yields Float)
// %anyAddr : $*Any
// %float : $Float
// %token is a token
(%anyAddr, %float, %token, %allocation) = begin_apply %0() : $@yield_once_2 () -> (@yields @inout %Any, @yields Float)
// %anyAddr : $*Any
// %float : $Float
// %token is a token
// %allocation is a pointer to a token
```
Transfers control to coroutine `%0`, passing it the given arguments. The
rules for the application generally follow the rules for `apply`,
except:
- the callee value must have be of single-yield coroutine type
(`yield_once` or `yield_once_2`)
- control returns to this function not when the coroutine performs a
`return`, but when it performs a `yield`, and
- the instruction results are derived from the yields of the coroutine
instead of its normal results.
The final (in the case of `@yield_once`) or penultimate (in the case of
`@yield_once_2`) result of a `begin_apply` is a "token", a special value which
can only be used as the operand of an `end_apply`, `abort_apply`, or
`end_borrow` instruction. Before this second instruction is executed, the
coroutine is said to be "suspended", and the token represents a reference to its
suspended activation record.
If the coroutine's kind `yield_once_2`, its final result is an address
of a "token", representing the allocation done by the callee
coroutine. It can only be used as the operand of a `dealloc_stack` which
must appear after the coroutine is resumed.
The other results of the instruction correspond to the yields in the
coroutine type. In general, the rules of a yield are similar to the
rules for a parameter, interpreted as if the coroutine caller (the one
executing the `begin_apply`) were being "called" by the `yield`:
- If a yield has an indirect convention, the corresponding result will
have an address type; otherwise it has an object type. For example,
a result corresponding to an `@in Any` yield will have type `$Any`.
- The convention attributes are the same as the parameter convention
attributes, interpreted as if the `yield` were the "call" and the
`begin_apply` marked the entry to the "callee". For example, an
`@in Any` yield transfers ownership of the `Any` value reference
from the coroutine to the caller, which must destroy or move the
value from that position before ending or aborting the coroutine.
A coroutine optionally may produce normal results. These do not have
`@yields` annotation in the result type tuple. :: (%float, %token) =
begin_apply %0() : $@yield_once () -> (@yields Float, Int)
Normal results of a coroutine are produced by the corresponding
`end_apply` instruction.
A `begin_apply` must be uniquely either ended or aborted before exiting
the function or looping to an earlier portion of the function.
When throwing coroutines are supported, there will need to be a
`try_begin_apply` instruction.
### abort_apply
```
sil-instruction ::= 'abort_apply' sil-value
abort_apply %token
```
Aborts the given coroutine activation, which is currently suspended at a
`yield` instruction. Transfers control to the coroutine and takes the
`unwind` path from the `yield`. Control is transferred back when the
coroutine reaches an `unwind` instruction.
The operand must always be the token result of a `begin_apply`
instruction, which is why it need not specify a type.
Throwing coroutines will not require a new instruction for aborting a
coroutine; a coroutine is not allowed to throw when it is being aborted.
### end_apply
```
sil-instruction ::= 'end_apply' sil-value 'as' sil-type
end_apply %token as $()
```
Ends the given coroutine activation, which is currently suspended at a
`yield` instruction. Transfers control to the coroutine and takes the
`resume` path from the `yield`. Control is transferred back when the
coroutine reaches a `return` instruction.
The operand must always be the token result of a `begin_apply`
instruction, which is why it need not specify a type.
The result of `end_apply` is the normal result of the coroutine function
(the operand of the `return` instruction)."
When throwing coroutines are supported, there will need to be a
`try_end_apply` instruction.
### partial_apply
```
sil-instruction ::= 'partial_apply' partial-apply-attr* sil-value
sil-apply-substitution-list?
'(' (sil-value (',' sil-value)*)? ')'
':' sil-type
partial-apply-attr ::= '[callee_guaranteed]'
partial-apply-attr ::= '[isolated_any]'
partial-apply-attr ::= '[on_stack]'
%c = partial_apply %0(%1, %2, ...) : $(Z..., A, B, ...) -> R
// Note that the type of the callee '%0' is specified *after* the arguments
// %0 must be of a concrete function type $(Z..., A, B, ...) -> R
// %1, %2, etc. must be of the argument types $A, $B, etc.,
// of the tail part of the argument tuple of %0
// %c will be of the partially-applied thick function type (Z...) -> R
%c = partial_apply %0<A, B>(%1, %2, ...) : $(Z..., T, U, ...) -> R
// %0 must be of a polymorphic function type $<T, U>(T, U, ...) -> R
// %1, %2, etc. must be of the argument types after substitution $A, $B, etc.
// of the tail part of the argument tuple of %0
// %r will be of the substituted thick function type $(Z'...) -> R'
```
Creates a closure by partially applying the function `%0` to a partial
sequence of its arguments. This instruction is used to implement
closures.
A local function in Swift that captures context, such as `bar` in the
following example:
```
func foo(_ x:Int) -> Int {
func bar(_ y:Int) -> Int {
return x + y
}
return bar(1)
}
```
lowers to an uncurried entry point and is curried in the enclosing
function:
```
func @bar : $@convention(thin) (Int, @box Int, *Int) -> Int {
entry(%y : $Int, %x_box : $@box Int, %x_address : $*Int):
// ... body of bar ...
}
func @foo : $@convention(thin) Int -> Int {
entry(%x : $Int):
// Create a box for the 'x' variable
%x_box = alloc_box $Int
%x_addr = project_box %x_box : $@box Int
store %x to %x_addr : $*Int
// Create the bar closure
%bar_uncurried = function_ref @bar : $(Int, Int) -> Int
%bar = partial_apply %bar_uncurried(%x_box, %x_addr)
: $(Int, Builtin.NativeObject, *Int) -> Int
// Apply it
%1 = integer_literal $Int, 1
%ret = apply %bar(%1) : $(Int) -> Int
// Clean up
release %bar : $(Int) -> Int
return %ret : $Int
}
```
**Erased Isolation**: If the `partial_apply` is marked with the flag
`[isolated_any]`, the first applied argument must have type
`Optional<any Actor>`. In addition to being provided as an argument to
the partially-applied function, this value will be stored in a special
place in the context and can be recovered with
`function_extract_isolation`. The result type of the `partial_apply`
will be an `@isolated(any)` function type.
**Ownership Semantics of Closure Context during Invocation**: By
default, an escaping `partial_apply` (`partial_apply` without
`[on_stack]]` creates a closure whose invocation takes ownership of the
context, meaning that a call implicitly releases the closure.
If the `partial_apply` is marked with the flag `[callee_guaranteed]`,
the invocation instead uses a caller-guaranteed model, where the caller
promises not to release the closure while the function is being called.
The result type of the `partial_apply` will be a `@callee_guaranteed`
function type.
**Captured Value Ownership Semantics**: In the instruction syntax, the
type of the callee is specified after the argument list; the types of
the argument and of the defined value are derived from the function type
of the callee. Even so, the ownership requirements of the partial apply
are not the same as that of the callee function (and thus said
signature). Instead:
1. If the `partial_apply` has a `@noescape` function type
(`partial_apply [on_stack]`) the closure context is allocated on the
stack and is initialized to contain the closed-over values without
taking ownership of those values. The closed-over values are not
retained and the lifetime of the closed-over values must be managed
by other instruction independently of the `partial_apply`. The
lifetime of the stack context of a `partial_apply [on_stack]` must
be terminated with a `dealloc_stack`.
2. If the `partial_apply` has an escaping function type (not
`[on_stack]`) then the closure context will be heap allocated with a
retain count of 1. Any closed over parameters (except for `@inout`
parameters) will be consumed by the partial_apply. This ensures that
no matter when the `partial_apply` is called, the captured arguments
are alive. When the closure context's reference count reaches zero,
the contained values are destroyed. If the callee requires an owned
parameter, then the implicit partial_apply forwarder created by
IRGen will copy the underlying argument and pass it to the callee.
3. If an address argument has `@inout_aliasable` convention, the
closure obtained from `partial_apply` will not own its underlying
value. The `@inout_aliasable` parameter convention is used when a
`@noescape` closure captures an `inout` argument.
**Coroutines** `partial_apply` could be used to create closures over
coroutines. Overall, the `partial_apply` of a coroutine is
straightforward: it is another coroutine that captures arguments passed
to the `partial_apply` instruction. This closure applies the original
coroutine (similar to the `begin_apply` instruction) for yields
(suspend) and yields the resulting values. Then it calls the original
coroutine continuation for return or unwind, and forwards the results
(if any) to the caller as well. Currently only the autodiff
transformation produces `partial_apply` for coroutines while
differentiating modify accessors.
**NOTE:** If the callee is generic, all of its generic parameters must
be bound by the given substitution list. The arguments are given with
these generic substitutions applied, and the resulting closure is of
concrete function type with the given substitutions applied. The generic
parameters themselves cannot be partially applied; all of them must be
bound. The result is always a concrete function.
**TODO:** The instruction, when applied to a generic function, currently
implicitly performs abstraction difference transformations enabled by
the given substitutions, such as promoting address-only arguments and
returns to register arguments. This should be fixed.
### builtin
```
sil-instruction ::= 'builtin' string-literal
sil-apply-substitution-list?
'(' (sil-operand (',' sil-operand)*)? ')'
':' sil-type
%1 = builtin "foo"(%1 : $T, %2 : $U) : $V
// "foo" must name a function in the Builtin module
```
Invokes functionality built into the backend code generator, such as
LLVM-level instructions and intrinsics.
#### Assertion configuration
To be able to support disabling assertions at compile time there is a
builtin `assertion_configuration` . It can
be replaced at compile time by a constant or can stay opaque.
All `assert_configuration` builtins are replaced by the
constant propagation pass to the appropriate constant depending on
compile time settings. Subsequent passes remove dependent unwanted
control flow. Using this mechanism we support conditionally
enabling/disabling of code in SIL libraries depending on the assertion
configuration selected when the library is linked into user code.
There are three assertion configurations: Debug (0), Release (1) and
DisableReplacement (-1).
The optimization flag or a special assert configuration flag determines
the value. Depending on the configuration value, assertions in the
standard library will be executed or not.
The standard library uses this builtin to define an assert that can be
disabled at compile time.
``` none
func assert(...) {
if Int32(Builtin.assert_configuration() == 0) {
_assertionFailure(message, ...)
}
}
```
The `assert_configuration` builtin is serialized when we
build the standard library (we recognize the `-parse-stdlib` option and
don't do the constant replacement but leave the function application to
be serialized to SIL).
The compiler flag that influences the value of the
`assert_configuration` builtin is the optimization flag: at
`-Onone` the builtin will be replaced by `Debug` at higher
optimization levels the builtin will be replaced by `Release`.
Optionally, the value to use for replacement can be specified with the
`-assert-config` flag which overwrites the value selected by the
optimization flag (possible values are `Debug`, `Release`,
`DisableReplacement`).
If `assert_configuration` builtin stays opaque until
IRGen, IRGen will replace the application by the constant representing
Debug mode (0). This happens when building the standard library binary.
The generated SIL will retain the builtin but the generated binary
will contain code with assertions enabled.
## Metatypes
These instructions access metatypes, either statically by type name or
dynamically by introspecting class or generic values.
### metatype
```
sil-instruction ::= 'metatype' sil-type
%1 = metatype $T.Type
// %1 has type $T.Type
```
Creates a reference to the metatype object for type `T`.
### value_metatype
```
sil-instruction ::= 'value_metatype' sil-type ',' sil-operand
%1 = value_metatype $T.Type, %0 : $T
// %0 must be a value or address of type $T
// %1 will be of type $T.Type
```
Obtains a reference to the dynamic metatype of the value `%0`.
### existential_metatype
```
sil-instruction ::= 'existential_metatype' sil-type ',' sil-operand
%1 = existential_metatype $P.Type, %0 : $P
// %0 must be a value of class protocol or protocol composition
// type $P, or an address of address-only protocol type $*P
// %1 will be a $P.Type value referencing the metatype of the
// concrete value inside %0
```
Obtains the metatype of the concrete value referenced by the existential
container referenced by `%0`.
### objc_protocol
```
sil-instruction ::= 'objc_protocol' protocol-decl : sil-type
%0 = objc_protocol #ObjCProto : $Protocol
```
**TODO:** Fill this in.
## Aggregate Types
These instructions construct and project elements from structs, tuples,
and class instances.
### retain_value
```
sil-instruction ::= 'retain_value' sil-operand
retain_value %0 : $A
```
Retains a loadable value, which simply retains any references it holds.
For trivial types, this is a no-op. For reference types, this is
equivalent to a `strong_retain`. For `@unowned` types, this is
equivalent to an `unowned_retain`. In each of these cases, those are the
preferred forms.
For aggregate types, especially enums, it is typically both easier and
more efficient to reason about aggregate copies than it is to reason
about copies of the subobjects.
This instruction is _not_ available in OSSA.
### retain_value_addr
```
sil-instruction ::= 'retain_value_addr' sil-operand
retain_value_addr %0 : $*A
```
Retains a loadable value inside given address, which simply retains any
references it holds.
This instruction is _not_ available in OSSA.
### unmanaged_retain_value
```
sil-instruction ::= 'unmanaged_retain_value' sil-value
unmanaged_retain_value %0 : $A
```
This instruction has the same local semantics as `retain_value` but:
- Is valid in ownership qualified SIL.
- Is not intended to be statically paired at compile time by the
compiler.
The intention is that this instruction is used to implement unmanaged
constructs.
This instruction is _not_ available in OSSA.
### strong_copy_unmanaged_value
```
sil-instruction ::= 'strong_copy_unmanaged_value' sil-value
%1 = strong_copy_unmanaged_value %0 : $@sil_unmanaged A
// %1 will be a strong @owned $A.
```
This instruction has the same semantics as `copy_value` except that its
input is a trivial `@sil_unmanaged` type that doesn't require ref
counting. This is intended to be used semantically as a "conversion"
like instruction from `unmanaged` to `strong` and thus should never be
removed by the optimizer. Since the returned value is a strong owned
value, this instruction semantically should be treated as performing a
strong copy of the underlying value as if by the value's type lowering.
### copy_value
```
sil-instruction ::= 'copy_value' sil-operand
%1 = copy_value %0 : $A
```
Performs a copy of a loadable value as if by the value's type lowering
and returns the copy. The returned copy semantically is a value that is
completely independent of the operand. In terms of specific types:
1. For trivial types, this is equivalent to just propagating through
the trivial value.
2. For reference types, this is equivalent to performing a
`strong_retain` operation and returning the reference.
3. For `@unowned` types, this is equivalent to performing an
`unowned_retain` and returning the operand.
4. For aggregate types, this is equivalent to recursively performing a
`copy_value` on its components, forming a new aggregate from the
copied components, and then returning the new aggregate.
In ownership qualified functions, a `copy_value` produces a +1 value
that must be consumed at most once along any path through the program.
It is illegal in non-Raw SIL to `copy_value` a value that
is non-copyable.
### explicit_copy_value
```
sil-instruction ::= 'explicit_copy_value' sil-operand
%1 = explicit_copy_value %0 : $A
```
This is exactly the same instruction semantically as
[copy_value](#copy_value) with the exception that when move only
checking is performed, `explicit_copy_value` is
treated as an explicit copy asked for by the user that should not be
rewritten and should be treated as a non-consuming use.
This is used for two things:
1. Implementing a copy builtin for no implicit copy types.
2. To enable the move checker, once it has emitted an error diagnostic,
to still produce valid Ownership SSA SIL at the end of the
guaranteed optimization pipeline when we enter the Canonical SIL
stage.
### move_value
```
sil-instruction ::= 'move_value' '[lexical]'? sil-operand
%1 = move_value %0 : $@_moveOnly A
```
Performs a move of the operand, ending its lifetime. When ownership is
enabled, it always takes in an `@owned T` and produces a
new `@owned T`.
1. For trivial types, this is equivalent to just propagating through
the trivial value.
2. For reference types, this is equivalent to ending the lifetime of
the operand, beginning a new lifetime for the result and setting the
result to the value of the operand.
3. For aggregates, the operation is equivalent to performing a
move_value on each of its fields recursively.
After ownership is lowered, we leave in the move_value to provide a
place for IRGenSIL to know to store a potentially new variable (in case
the move was associated with a let binding).
NOTE: This instruction is used in an experimental feature called 'move
only values'. A move_value instruction is an instruction that
introduces (or injects) a type `T` into the move only value
space.
The `lexical` attribute specifies that the value corresponds to a local
variable with a lexical lifetime in the Swift source. Compare to the
`var_decl` attribute. See [Variable Lifetimes](Ownership.md#variable-lifetimes).
The optional `pointer_escape` attribute specifies that a pointer to the
operand escapes within the scope introduced by this move_value.
The optional `var_decl` attribute specifies that the operand corresponds
to a local variable in the Swift source.
Note: Although ``move_value`` conceptually forwards an owned value, it also
summarizes lifetime attributes for a whole [forward-extended
lifetime](SIL.md#lifetimes); therefore, it is not formally a forwarding
instruction.
### drop_deinit
```
sil-instruction ::= 'drop_deinit' sil-operand
%1 = drop_deinit %0 : $T
// T must be a move-only type
// %1 is an @owned T
%3 = drop_deinit %2 : $*T
// T must be a move-only type
// %2 has type *T
```
This instruction is a marker for a following destroy instruction to
suppress the call of the move-only type's deinitializer. The
instruction accepts an object or address type. If its argument is an
object type it takes in an `@owned T` and produces a new
`@owned T`. If its argument is an address type, it's an
identity projection.
If the operand is an object type, then this is a pseudo type-cast. It
consumes its operand and produces a new value with the same nominal
struct or enum type, but as if the type had no user-defined
deinitializer. It's only use must be a an instruction that ends the
aggregate lifetime, such as `destroy_value`,
`destructure_struct`, or `switch_enum`. If the
use is a `destroy_value`, then prevents the destroy from
invoking the deinitializer. For example:
```
%1 = drop_deinit %0 : $T
destroy_value %1 : $T // does not invoke deinit()
```
If the operand and result are addresses, drop_deinit ends the lifetime
of the referenced memory value while keeping the value's fields or enum
cases alive. The deinit of the value is not called. The returned address
can be used to access the value's field, e.g. with struct_element_addr,
or enum cases with switch_enum_addr. After the drop_deinit, it is
illegal to destroy its operand or result address with destroy_addr. For
example:
```
%1 = drop_deinit %0 : $S
%2 = struct_element_addr %1 : $*T, #S.field
destroy_addr %2 : $T
```
The instruction is only valid in ownership SIL.
### release_value
```
sil-instruction ::= 'release_value' sil-operand
release_value %0 : $A
```
Destroys a loadable value, by releasing any retainable pointers within
it.
This is defined to be equivalent to storing the operand into a stack
allocation and using 'destroy_addr' to destroy the object there.
For trivial types, this is a no-op. For reference types, this is
equivalent to a `strong_release`. For `@unowned` types, this is
equivalent to an `unowned_release`. In each of these cases, those are
the preferred forms.
For aggregate types, especially enums, it is typically both easier and
more efficient to reason about aggregate destroys than it is to reason
about destroys of the subobjects.
This instruction is _not_ available in OSSA.
### release_value_addr
```
sil-instruction ::= 'release_value_addr' sil-operand
release_value_addr %0 : $*A
```
Destroys a loadable value inside given address, by releasing any
retainable pointers within it.
This instruction is _not_ available in OSSA.
### unmanaged_release_value
```
sil-instruction ::= 'unmanaged_release_value' sil-value
unmanaged_release_value %0 : $A
```
This instruction has the same local semantics as `release_value` but:
- Is valid in ownership qualified SIL.
- Is not intended to be statically paired at compile time by the
compiler.
The intention is that this instruction is used to implement unmanaged
constructs.
This instruction is _not_ available in OSSA.
### destroy_value
```
sil-instruction ::= 'destroy_value' '[dead_end]'? '[poison]'? sil-operand
destroy_value %0 : $A
```
Destroys a loadable value, by releasing any retainable pointers within
it.
This is defined to be equivalent to storing the operand into a stack
allocation and using 'destroy_addr' to destroy the object there.
For trivial types, this is a no-op. For reference types, this is
equivalent to a `strong_release`. For `@unowned` types, this is
equivalent to an `unowned_release`. In each of these cases, those are
the preferred forms.
For aggregate types, especially enums, it is typically both easier and
more efficient to reason about aggregate destroys than it is to reason
about destroys of the subobjects.
The optional `dead_end` attribute specifies that this instruction was
created during lifetime completion and is eligible for deletion during
OSSA lowering.
### autorelease_value
```
sil-instruction ::= 'autorelease_value' sil-operand
autorelease_value %0 : $A
```
**TODO:** Complete this section.
### function_extract_isolation
```
sil-instruction ::= function_extract_isolation sil-operand
```
Reads the isolation of a `@isolated(any)` function value. The result is always
a borrowed value of type `$Optional<any Actor>`. It is exactly the value that
was originally used to construct the function with `partial_apply
[isolated_any]`.
### tuple
```
sil-instruction ::= 'tuple' sil-tuple-elements
sil-tuple-elements ::= '(' (sil-operand (',' sil-operand)*)? ')'
sil-tuple-elements ::= sil-type '(' (sil-value (',' sil-value)*)? ')'
%1 = tuple (%a : $A, %b : $B, ...)
// $A, $B, etc. must be loadable non-address types
// %1 will be of the "simple" tuple type $(A, B, ...)
%1 = tuple $(a:A, b:B, ...) (%a, %b, ...)
// (a:A, b:B, ...) must be a loadable tuple type
// %1 will be of the type $(a:A, b:B, ...)
```
Creates a loadable tuple value by aggregating multiple loadable values.
If the destination type is a "simple" tuple type, that is, it has no
keyword argument labels or variadic arguments, then the first notation
can be used, which interleaves the element values and types. If keyword
names or variadic fields are specified, then the second notation must be
used, which spells out the tuple type before the fields.
### tuple_extract
```
sil-instruction ::= 'tuple_extract' sil-operand ',' int-literal
%1 = tuple_extract %0 : $(T...), 123
// %0 must be of a loadable tuple type $(T...)
// %1 will be of the type of the selected element of %0
```
Extracts an element from a loadable tuple value.
### tuple_pack_extract
```
sil-instruction ::= 'tuple_pack_extract' sil-value 'of' sil-operand 'as' sil-type
%value = tuple_pack_extract %index of %tuple : $(repeat each T) as $@pack_element("01234567-89AB-CDEF-0123-000000000000") U
// %index must be of $Builtin.PackIndex type
// %tuple must be of tuple type
// %addr will be the result type specified by the 'as' clause
```
Extracts a value at a dynamic index from a tuple value.
Only valid in opaque values mode. Lowered by AddressLowering to
`tuple_pack_element_addr`. For more details, see that instruction.
### tuple_element_addr
```
sil-instruction ::= 'tuple_element_addr' sil-operand ',' int-literal
%1 = tuple_element_addr %0 : $*(T...), 123
// %0 must of a $*(T...) address-of-tuple type
// %1 will be of address type $*U where U is the type of the 123rd
// element of T
```
Given the address of a tuple in memory, derives the address of an
element within that value.
### tuple_pack_element_addr
```
sil-instruction ::= 'tuple_pack_element_addr' sil-value 'of' sil-operand 'as' sil-type
%addr = tuple_pack_element_addr %index of %tuple : $*(repeat each T) as $*@pack_element("01234567-89AB-CDEF-0123-000000000000") U
// %index must be of $Builtin.PackIndex type
// %tuple must be of address-of-tuple type
// %addr will be of the result type specified by the 'as' clause
```
Given the address of a tuple in memory, derives the address of a dynamic
element within that value.
The *induced pack type* for the tuple operand is the indirect pack type
corresponding to the types of the tuple elements and tuple element
expansions, exactly as if the labels were removed and the parentheses
were replaced with `Pack{`...`}`. For example,
for the tuple type `(repeat Optional<each T>, Float)`, the
induced pack type is `Pack{repeat Optional<each T>, Float}`.
The pack index operand must be a pack indexing instruction. The result
type (given by the `as` clause) must be structurally
well-typed for the pack index and the induced pack type; see the
structural type matching rules for pack indices.
### destructure_tuple
```
sil-instruction ::= 'destructure_tuple' sil-operand
(%elt1, ..., %eltn) = destructure_tuple %0 : $(Elt1Ty, ..., EltNTy)
// %0 must be a tuple of type $(Elt1Ty, ..., EltNTy)
// %eltN must have the type $EltNTy
```
Given a tuple value, split the value into its constituent elements.
### struct
```
sil-instruction ::= 'struct' sil-type '(' (sil-operand (',' sil-operand)*)? ')'
%1 = struct $S (%a : $A, %b : $B, ...)
// $S must be a loadable struct type
// $A, $B, ... must be the types of the physical 'var' fields of $S in order
// %1 will be of type $S
```
Creates a value of a loadable struct type by aggregating multiple
loadable values.
### struct_extract
```
sil-instruction ::= 'struct_extract' sil-operand ',' sil-decl-ref
%1 = struct_extract %0 : $S, #S.field
// %0 must be of a loadable struct type $S
// #S.field must be a physical 'var' field of $S
// %1 will be of the type of the selected field of %0
```
Extracts a physical field from a loadable struct value.
### struct_element_addr
```
sil-instruction ::= 'struct_element_addr' sil-operand ',' sil-decl-ref
%1 = struct_element_addr %0 : $*S, #S.field
// %0 must be of a struct type $S
// #S.field must be a physical 'var' field of $S
// %1 will be the address of the selected field of %0
```
Given the address of a struct value in memory, derives the address of a
physical field within the value.
### destructure_struct
```
sil-instruction ::= 'destructure_struct' sil-operand
(%elt1, ..., %eltn) = destructure_struct %0 : $S
// %0 must be a struct of type $S
// %eltN must have the same type as the Nth field of $S
```
Given a struct, split the struct into its constituent fields.
### object
```
sil-instruction ::= 'object' sil-type '(' (sil-operand (',' sil-operand)*)? ')'
object $T (%a : $A, %b : $B, ...)
// $T must be a non-generic or bound generic reference type
// The first operands must match the stored properties of T
// Optionally there may be more elements, which are tail-allocated to T
```
Constructs a statically initialized object. This instruction can only
appear as final instruction in a global variable static initializer
list.
### vector
```
sil-instruction ::= 'vector' '(' (sil-operand (',' sil-operand)*)? ')'
vector (%a : $T, %b : $T, ...)
// $T must be a non-generic or bound generic reference type
// All operands must have the same type
```
Constructs a statically initialized vector of elements. This instruction
can only appear as final instruction in a global variable static
initializer list.
### vector_base_addr
```
sil-instruction ::= 'vector_base_addr' sil-operand
%1 = vector_base_addr %0 : $*Builtin.FixedArray<N, Element>
// %0 must have type $*Builtin.FixedArray
// %1 will be of the element type of the Builtin.FixedArray
```
Derives the address of the first element of a vector, i.e. a `Builtin.FixedArray`,
from the address of the vector itself.
Addresses of other vector elements can then be derived with `index_addr`.
### ref_element_addr
```
sil-instruction ::= 'ref_element_addr' '[immutable]'? sil-operand ',' sil-decl-ref
%1 = ref_element_addr %0 : $C, #C.field
// %0 must be a value of class type $C
// #C.field must be a non-static physical field of $C
// %1 will be of type $*U where U is the type of the selected field
// of C
```
Given an instance of a class, derives the address of a physical instance
variable inside the instance. It is undefined behavior if the class
value is null.
The `immutable` attribute specifies that all loads of the same instance
variable from the same class reference operand are guaranteed to yield
the same value. The `immutable` attribute is used to reference COW
buffer elements after an `end_cow_mutation` and before a
`begin_cow_mutation`. The attribute is also used for let-fields of a
class after an `end_init_let_ref` and before a `begin_dealloc_ref`.
### ref_tail_addr
```
sil-instruction ::= 'ref_tail_addr' '[immutable]'? sil-operand ',' sil-type
%1 = ref_tail_addr %0 : $C, $E
// %0 must be a value of class type $C with tail-allocated elements $E
// %1 will be of type $*E
```
Given an instance of a class, which is created with tail-allocated
array(s), derives the address of the first element of the first
tail-allocated array. This instruction is used to project the first
tail-allocated element from an object which is created by an `alloc_ref`
with `tail_elems`. It is undefined behavior if the class instance does
not have tail-allocated arrays or if the element-types do not match.
The `immutable` attribute specifies that all loads of the same instance
variable from the same class reference operand are guaranteed to yield
the same value.
## Enums
These instructions construct and manipulate values of enum type.
Loadable enum values are created with the [enum](#enum) instruction.
Address-only enums require two-step initialization. First, if the case
requires data, that data is stored into the enum at the address
projected by [init_enum_data_addr](#init_enum_data_addr). This step is
skipped for cases without data. Finally, the tag for the enum is
injected with an [inject_enum_addr](#inject_enum_addr) instruction:
```
enum AddressOnlyEnum {
case HasData(AddressOnlyType)
case NoData
}
sil @init_with_data : $(AddressOnlyType) -> AddressOnlyEnum {
entry(%0 : $*AddressOnlyEnum, %1 : $*AddressOnlyType):
// Store the data argument for the case.
%2 = init_enum_data_addr %0 : $*AddressOnlyEnum, #AddressOnlyEnum.HasData!enumelt
copy_addr [take] %1 to [init] %2 : $*AddressOnlyType
// Inject the tag.
inject_enum_addr %0 : $*AddressOnlyEnum, #AddressOnlyEnum.HasData!enumelt
return
}
sil @init_without_data : $() -> AddressOnlyEnum {
// No data. We only need to inject the tag.
inject_enum_addr %0 : $*AddressOnlyEnum, #AddressOnlyEnum.NoData!enumelt
return
}
```
Accessing the value of a loadable enum is inseparable from dispatching
on its discriminator and is done with the [switch_enum](#switch_enum)
terminator:
```
enum Foo { case A(Int), B(String) }
sil @switch_foo : $(Foo) -> () {
entry(%foo : $Foo):
switch_enum %foo : $Foo, case #Foo.A!enumelt: a_dest, case #Foo.B!enumelt: b_dest
a_dest(%a : $Int):
/* use %a */
b_dest(%b : $String):
/* use %b */
}
```
An address-only enum can be tested by branching on it using the
[switch_enum_addr](#switch_enum_addr) terminator. Its value can then be
taken by destructively projecting the enum value with
[unchecked_take_enum_data_addr](#unchecked_take_enum_data_addr):
```
enum Foo<T> { case A(T), B(String) }
sil @switch_foo : $<T> (Foo<T>) -> () {
entry(%foo : $*Foo<T>):
switch_enum_addr %foo : $*Foo<T>, case #Foo.A!enumelt: a_dest,
case #Foo.B!enumelt: b_dest
a_dest:
%a = unchecked_take_enum_data_addr %foo : $*Foo<T>, #Foo.A!enumelt
/* use %a */
b_dest:
%b = unchecked_take_enum_data_addr %foo : $*Foo<T>, #Foo.B!enumelt
/* use %b */
}
```
Both [switch_enum](#switch_enum) and
[switch_enum_addr](#switch_enum_addr) must include a `default` case
unless the enum can be exhaustively switched in the current function,
i.e. when the compiler can be sure that it knows all possible present
and future values of the enum in question. This is generally true for
enums defined in Swift, but there are two exceptions: *non-frozen enums*
declared in libraries compiled with the `-enable-library-evolution`
flag, which may grow new cases in the future in an ABI-compatible way;
and enums marked with the `objc` attribute, for which other bit patterns
are permitted for compatibility with C. All enums imported from C are
treated as "non-exhaustive" for the same reason, regardless of the
presence or value of the `enum_extensibility` Clang attribute.
(See
[SE-0192](https://github.com/swiftlang/swift-evolution/blob/main/proposals/0192-non-exhaustive-enums.md)
for more information about non-frozen enums.)
### enum
```
sil-instruction ::= 'enum' sil-type ',' sil-decl-ref (',' sil-operand)?
%1 = enum $U, #U.EmptyCase!enumelt
%1 = enum $U, #U.DataCase!enumelt, %0 : $T
// $U must be an enum type
// #U.DataCase or #U.EmptyCase must be a case of enum $U
// If #U.Case has a data type $T, %0 must be a value of type $T
// If #U.Case has no data type, the operand must be omitted
// %1 will be of type $U
```
Creates a loadable enum value in the given `case`. If the `case` has a
data type, the enum value will contain the operand value.
### unchecked_enum_data
```
sil-instruction ::= 'unchecked_enum_data' sil-operand ',' sil-decl-ref
%1 = unchecked_enum_data %0 : $U, #U.DataCase!enumelt
// $U must be an enum type
// #U.DataCase must be a case of enum $U with data
// %1 will be of object type $T for the data type of case U.DataCase
```
Unsafely extracts the payload data for an enum `case` from an enum
value. It is undefined behavior if the enum does not contain a value of
the given case.
### init_enum_data_addr
```
sil-instruction ::= 'init_enum_data_addr' sil-operand ',' sil-decl-ref
%1 = init_enum_data_addr %0 : $*U, #U.DataCase!enumelt
// $U must be an enum type
// #U.DataCase must be a case of enum $U with data
// %1 will be of address type $*T for the data type of case U.DataCase
```
Projects the address of the data for an enum `case` inside an enum. This
does not modify the enum or check its value. It is intended to be used
as part of the initialization sequence for an address-only enum. Storing
to the `init_enum_data_addr` for a case followed by `inject_enum_addr`
with that same case is guaranteed to result in a fully-initialized enum
value of that case being stored. Loading from the `init_enum_data_addr`
of an initialized enum value or injecting a mismatched case tag is
undefined behavior.
The address is invalidated as soon as the operand enum is fully
initialized by an `inject_enum_addr`.
### inject_enum_addr
```
sil-instruction ::= 'inject_enum_addr' sil-operand ',' sil-decl-ref
inject_enum_addr %0 : $*U, #U.Case!enumelt
// $U must be an enum type
// #U.Case must be a case of enum $U
// %0 will be overlaid with the tag for #U.Case
```
Initializes the enum value referenced by the given address by overlaying
the tag for the given case. If the case has no data, this instruction is
sufficient to initialize the enum value. If the case has data, the data
must be stored into the enum at the `init_enum_data_addr` address for
the case *before* `inject_enum_addr` is applied. It is undefined
behavior if `inject_enum_addr` is applied for a case with data to an
uninitialized enum, or if `inject_enum_addr` is applied for a case with
data when data for a mismatched case has been stored to the enum.
### unchecked_take_enum_data_addr
```
sil-instruction ::= 'unchecked_take_enum_data_addr' sil-operand ',' sil-decl-ref
%1 = unchecked_take_enum_data_addr %0 : $*U, #U.DataCase!enumelt
// $U must be an enum type
// #U.DataCase must be a case of enum $U with data
// %1 will be of address type $*T for the data type of case U.DataCase
```
Takes the address of the payload for the given enum `case` in-place in
memory. It is undefined behavior if the referenced enum does not contain
a value of the given `case`.
The result shares memory with the original enum value. If an enum
declaration is unconditionally loadable (meaning it's loadable
regardless of any generic parameters), and it has more than one case
with an associated value, then it may embed the enum tag within the
payload area. If this is the case, then
`unchecked_take_enum_data_addr` will clear the tag from the
payload, invalidating the referenced enum value, but leaving the payload
value referenced by the result address valid. In these cases, the enum
memory cannot be reinitialized as an enum until the payload has also
been invalidated.
If an enum has no more than one payload case, or if the declaration is
ever address-only, then `unchecked_take_enum_data_addr` is
guaranteed to be nondestructive, and the payload address can be accessed
without invalidating the enum in these cases. The payload can be
invalidated to invalidate the enum (assuming the enum does not have a
`deinit` at the type level).
### select_enum
```
sil-instruction ::= 'select_enum' sil-operand sil-select-case*
(',' 'default' sil-value)?
':' sil-type
%n = select_enum %0 : $U,
case #U.Case1!enumelt: %1,
case #U.Case2!enumelt: %2, /* ... */
default %3 : $T
// $U must be an enum type
// #U.Case1, Case2, etc. must be cases of enum $U
// %1, %2, %3, etc. must have type $T
// %n has type $T
```
Selects one of the "case" or "default" operands based on the case of
an enum value. This is equivalent to a trivial
[switch_enum](#switch_enum) branch sequence:
```
entry:
switch_enum %0 : $U,
case #U.Case1!enumelt: bb1,
case #U.Case2!enumelt: bb2, /* ... */
default bb_default
bb1:
br cont(%1 : $T) // value for #U.Case1
bb2:
br cont(%2 : $T) // value for #U.Case2
bb_default:
br cont(%3 : $T) // value for default
cont(%n : $T):
// use argument %n
```
but turns the control flow dependency into a data flow dependency. For
address-only enums, [select_enum_addr](#select_enum_addr) offers the
same functionality for an indirectly referenced enum value in memory.
Like [switch_enum](#switch_enum), `select_enum` must have
a `default` case unless the enum can be exhaustively switched in the
current function.
### select_enum_addr
```
sil-instruction ::= 'select_enum_addr' sil-operand sil-select-case*
(',' 'default' sil-value)?
':' sil-type
%n = select_enum_addr %0 : $*U,
case #U.Case1!enumelt: %1,
case #U.Case2!enumelt: %2, /* ... */
default %3 : $T
// %0 must be the address of an enum type $*U
// #U.Case1, Case2, etc. must be cases of enum $U
// %1, %2, %3, etc. must have type $T
// %n has type $T
```
Selects one of the "case" or "default" operands based on the case of
the referenced enum value. This is the address-only counterpart to
[select_enum](#select_enum).
Like [switch_enum_addr](#switch_enum_addr),
`select_enum_addr` must have a `default` case unless
the enum can be exhaustively switched in the current function.
## Protocol and Protocol Composition Types
These instructions create and manipulate values of protocol and protocol
composition type. From SIL's perspective, protocol and protocol
composition types consist of an *existential container*, which is a
generic container for a value of unknown runtime type, referred to as an
"existential type" in type theory. The existential container consists
of a reference to the *witness table(s)* for the protocol(s) referred to
by the protocol type and a reference to the underlying *concrete value*,
which may be either stored in-line inside the existential container for
small values or allocated separately into a buffer owned and managed by
the existential container for larger values.
Depending on the constraints applied to an existential type, an
existential container may use one of several representations:
- **Opaque existential containers**: If none of the protocols in a
protocol type are class protocols, then the existential container
for that type is address-only and referred to in the implementation
as an *opaque existential container*. The value semantics of the
existential container propagate to the contained concrete value.
Applying [copy_addr](#copy_addr) to an opaque existential container
copies the contained concrete value, deallocating or reallocating
the destination container's owned buffer if necessary. Applying
[destroy_addr](#destroy_addr) to an opaque existential container
destroys the concrete value and deallocates any buffers owned by the
existential container. The following instructions manipulate opaque
existential containers:
- [init_existential_addr](#init_existential_addr)
- [open_existential_addr](#open_existential_addr)
- [deinit_existential_addr](#deinit_existential_addr)
- **Opaque existential containers loadable types**: In the SIL Opaque
Values mode of operation, we take an opaque value as-is. Said value
might be replaced with one of the _addr instructions above before
IR generation. The following instructions manipulate "loadable"
opaque existential containers:
- [init_existential_value](#init_existential_value)
- [open_existential_value](#open_existential_value)
- [deinit_existential_value](#deinit_existential_value)
- **Class existential containers**: If a protocol type is constrained
by one or more class protocols, then the existential container for
that type is loadable and referred to in the implementation as a
*class existential container*. Class existential containers have
reference semantics and can be `retain`-ed and `release`-d. The
following instructions manipulate class existential containers:
- [init_existential_ref](#init_existential_ref)
- [open_existential_ref](#open_existential_ref)
- **Metatype existential containers**: Existential metatypes use a
container consisting of the type metadata for the conforming type
along with the protocol conformances. Metatype existential
containers are trivial types. The following instructions manipulate
metatype existential containers:
- [init_existential_metatype](#init_existential_metatype)
- [open_existential_metatype](#open_existential_metatype)
- **Boxed existential containers**: The standard library `Error`
protocol uses a size-optimized reference-counted container, which
indirectly stores the conforming value. Boxed existential containers
can be `retain`-ed and `release`-d. The following instructions
manipulate boxed existential containers:
- [alloc_existential_box](#alloc_existential_box)
- [project_existential_box](#project_existential_box)
- [open_existential_box](#open_existential_box)
- [open_existential_box_value](#open_existential_box_value)
- [dealloc_existential_box](#dealloc_existential_box)
Some existential types may additionally support specialized
representations when they contain certain known concrete types. For
example, when Objective-C interop is available, the `Error` protocol
existential supports a class existential container representation for
`NSError` objects, so it can be initialized from one using
[init_existential_ref](#init_existential_ref) instead of the more
expensive [alloc_existential_box](#alloc_existential_box):
```
bb(%nserror: $NSError):
// The slow general way to form an Error, allocating a box and
// storing to its value buffer:
%error1 = alloc_existential_box $Error, $NSError
%addr = project_existential_box $NSError in %error1 : $Error
strong_retain %nserror: $NSError
store %nserror to %addr : $NSError
// The fast path supported for NSError:
strong_retain %nserror: $NSError
%error2 = init_existential_ref %nserror: $NSError, $Error
```
### init_existential_addr
```
sil-instruction ::= 'init_existential_addr' sil-operand ',' sil-type
%1 = init_existential_addr %0 : $*P, $T
// %0 must be of a $*P address type for non-class protocol or protocol
// composition type P
// $T must be an AST type that fulfills protocol(s) P
// %1 will be of type $*T', where T' is the maximally abstract lowering
// of type T
```
Partially initializes the memory referenced by `%0` with an existential
container prepared to contain a value of type `$T`. The result of the
instruction is an address referencing the storage for the contained
value, which remains uninitialized. The contained value must be
`store`-d or `copy_addr`-ed to in order for the existential value to be
fully initialized. If the existential container needs to be destroyed
while the contained value is uninitialized,
[deinit_existential_addr](#deinit_existential_addr) must be used to do
so. A fully initialized existential container can be destroyed with
[destroy_addr](#destroy_addr) as usual. It is undefined behavior to
[destroy_addr](#destroy_addr) a partially-initialized existential
container.
### init_existential_value
```
sil-instruction ::= 'init_existential_value' sil-operand ',' sil-type ','
sil-type
%1 = init_existential_value %0 : $L, $C, $P
// %0 must be of loadable type $L, lowered from AST type $C, conforming to
// protocol(s) $P
// %1 will be of type $P
```
Loadable version of the above: Inits-up the existential container
prepared to contain a value of type `$P`.
### deinit_existential_addr
```
sil-instruction ::= 'deinit_existential_addr' sil-operand
deinit_existential_addr %0 : $*P
// %0 must be of a $*P address type for non-class protocol or protocol
// composition type P
```
Undoes the partial initialization performed by
[init_existential_addr](#init_existential_addr).
[deinit_existential_addr](#deinit_existential_addr) is only valid for
existential containers that have been partially initialized by
[init_existential_addr](#init_existential_addr) but haven't had their
contained value initialized. A fully initialized existential must be
destroyed with [destroy_addr](#destroy_addr).
### deinit_existential_value
```
sil-instruction ::= 'deinit_existential_value' sil-operand
deinit_existential_value %0 : $P
// %0 must be of a $P opaque type for non-class protocol or protocol
// composition type P
```
Undoes the partial initialization performed by
[init_existential_value](#init_existential_value).
[deinit_existential_value](#deinit_existential_value) is only valid for
existential containers that have been partially initialized by
[init_existential_value](#init_existential_value) but haven't had their
contained value initialized. A fully initialized existential must be
destroyed with [destroy_value](#destroy_value).
### open_existential_addr
```
sil-instruction ::= 'open_existential_addr' sil-allowed-access sil-operand 'to' sil-type
sil-allowed-access ::= 'immutable_access'
sil-allowed-access ::= 'mutable_access'
%1 = open_existential_addr immutable_access %0 : $*P to $*@opened P
// %0 must be of a $*P type for non-class protocol or protocol composition
// type P
// $*@opened P must be a unique archetype that refers to an opened
// existential type P.
// %1 will be of type $*@opened P
```
Obtains the address of the concrete value inside the existential
container referenced by `%0`. The protocol conformances associated with
this existential container are associated directly with the archetype
`$*@opened P`. This pointer can be used with any operation on
archetypes, such as `witness_method` assuming this operation obeys the
access constraint: The returned address can either allow
`mutable_access` or `immutable_access`. Users of the returned address
may only consume (e.g `destroy_addr` or `copy_addr [take]`) or mutate
the value at the address if they have `mutable_access`.
### open_existential_value
```
sil-instruction ::= 'open_existential_value' sil-operand 'to' sil-type
%1 = open_existential_value %0 : $P to $@opened P
// %0 must be of a $P type for non-class protocol or protocol composition
// type P
// $@opened P must be a unique archetype that refers to an opened
// existential type P.
// %1 will be of type $@opened P
```
Loadable version of the above: Opens-up the existential container
associated with `%0`. The protocol conformances associated with this
existential container are associated directly with the archetype
`$@opened P`.
### init_existential_ref
```
sil-instruction ::= 'init_existential_ref' sil-operand ':' sil-type ','
sil-type
%1 = init_existential_ref %0 : $C' : $C, $P
// %0 must be of class type $C', lowered from AST type $C, conforming to
// protocol(s) $P
// $P must be a class protocol or protocol composition type
// %1 will be of type $P
```
Creates a class existential container of type `$P` containing a
reference to the class instance `%0`.
### open_existential_ref
```
sil-instruction ::= 'open_existential_ref' sil-operand 'to' sil-type
%1 = open_existential_ref %0 : $P to $@opened P
// %0 must be of a $P type for a class protocol or protocol composition
// $@opened P must be a unique archetype that refers to an opened
// existential type P
// %1 will be of type $@opened P
```
Extracts the class instance reference from a class existential
container. The protocol conformances associated with this existential
container are associated directly with the archetype `@opened P`. This
pointer can be used with any operation on archetypes, such as
[witness_method](#witness_method). When the operand is of metatype type,
the result will be the metatype of the opened archetype.
### init_existential_metatype
```
sil-instruction ::= 'init_existential_metatype' sil-operand ',' sil-type
%1 = init_existential_metatype $0 : $@<rep> T.Type, $@<rep> P.Type
// %0 must be of a metatype type $@<rep> T.Type where T: P
// %@<rep> P.Type must be the existential metatype of a protocol or protocol
// composition, with the same metatype representation <rep>
// %1 will be of type $@<rep> P.Type
```
Creates a metatype existential container of type `$P.Type` containing
the conforming metatype of `$T`.
### open_existential_metatype
```
sil-instruction ::= 'open_existential_metatype' sil-operand 'to' sil-type
%1 = open_existential_metatype %0 : $@<rep> P.Type to $@<rep> (@opened P).Type
// %0 must be of a $P.Type existential metatype for a protocol or protocol
// composition
// $@<rep> (@opened P).Type must be the metatype of a unique archetype that
// refers to an opened existential type P, with the same metatype
// representation <rep>
// %1 will be of type $@<rep> (@opened P).Type
```
Extracts the metatype from an existential metatype. The protocol
conformances associated with this existential container are associated
directly with the archetype `@opened P`.
### alloc_existential_box
```
sil-instruction ::= 'alloc_existential_box' sil-type ',' sil-type
%1 = alloc_existential_box $P, $T
// $P must be a protocol or protocol composition type with boxed
// representation
// $T must be an AST type that conforms to P
// %1 will be of type $P
```
Allocates a boxed existential container of type `$P` with space to hold
a value of type `$T'`. The box is not fully initialized until a valid
value has been stored into the box. If the box must be deallocated
before it is fully initialized,
[dealloc_existential_box](#dealloc_existential_box) must be used. A
fully initialized box can be `retain`-ed and `release`-d like any
reference-counted type. The
[project_existential_box](#project_existential_box) instruction is used
to retrieve the address of the value inside the container.
### project_existential_box
```
sil-instruction ::= 'project_existential_box' sil-type 'in' sil-operand
%1 = project_existential_box $T in %0 : $P
// %0 must be a value of boxed protocol or protocol composition type $P
// $T must be the most abstracted lowering of the AST type for which the box
// was allocated
// %1 will be of type $*T
```
Projects the address of the value inside a boxed existential container.
The address is dependent on the lifetime of the owner reference `%0`. It
is undefined behavior if the concrete type `$T` is not the same type for
which the box was allocated with
[alloc_existential_box](#alloc_existential_box).
### open_existential_box
```
sil-instruction ::= 'open_existential_box' sil-operand 'to' sil-type
%1 = open_existential_box %0 : $P to $*@opened P
// %0 must be a value of boxed protocol or protocol composition type $P
// %@opened P must be the address type of a unique archetype that refers to
/// an opened existential type P
// %1 will be of type $*@opened P
```
Projects the address of the value inside a boxed existential container,
and uses the enclosed type and protocol conformance metadata to bind the
opened archetype `$@opened P`. The result address is dependent on both
the owning box and the enclosing function; in order to "open" a boxed
existential that has directly adopted a class reference, temporary
scratch space may need to have been allocated.
### open_existential_box_value
```
sil-instruction ::= 'open_existential_box_value' sil-operand 'to' sil-type
%1 = open_existential_box_value %0 : $P to $@opened P
// %0 must be a value of boxed protocol or protocol composition type $P
// %@opened P must be a unique archetype that refers to an opened
// existential type P
// %1 will be of type $@opened P
```
Projects the value inside a boxed existential container, and uses the
enclosed type and protocol conformance metadata to bind the opened
archetype `$@opened P`.
### dealloc_existential_box
```
sil-instruction ::= 'dealloc_existential_box' sil-operand, sil-type
dealloc_existential_box %0 : $P, $T
// %0 must be an uninitialized box of boxed existential container type $P
// $T must be the AST type for which the box was allocated
```
Deallocates a boxed existential container. The value inside the
existential buffer is not destroyed; either the box must be
uninitialized, or the value must have been projected out and destroyed
beforehand. It is undefined behavior if the concrete type `$T` is not
the same type for which the box was allocated with
[alloc_existential_box](#alloc_existential_box).
## Blocks
Blocks are used in ObjectiveC and are similar to closures.
### project_block_storage
```
sil-instruction ::= 'project_block_storage' sil-operand ':' sil-type
```
### init_block_storage_header
**TODO:** Fill this in. The printing of this instruction looks incomplete
on trunk currently.
## Pack Indexing
These instructions are collectively called the *pack indexing
instructions*. Each of them produces a single value of type
`Builtin.PackIndex`. Instructions that consume pack indices generally
provide a projected element type which is required to be structurally
well-typed for the given pack index and the actual pack type they index
into. This rule depends on the exact pack indexing instruction used and
is described in a section above.
All pack indexing instructions carry an **indexed pack type**, which is
a formal type that must be a pack type. Pack indexing instructions can
be used to index into any pack with the same shape as the indexed pack
type. The components of the actual indexed pack do not need to be
exactly the same as the components of the indexing instruction's
indexed pack type as long as they contain expansions in the same places
and those expansions expand pack parameters with the same shape.
### scalar_pack_index
```
sil-instruction ::= 'scalar_pack_index' int-literal 'of' sil-type
%index = scalar_pack_index 0 of $Pack{Int, repeat each T, Int}
```
Produce the dynamic pack index of a scalar (non-pack-expansion)
component of a pack. The type operand is the indexed pack type. The
integer operand is an index into the components of this pack type; it
must be in range and resolve to a component that is not a pack
expansion.
Substitution must adjust the component index appropriately so that it
still refers to the same component. For example, if the pack type is
`Pack{repeat each T, Int}`, and substitution replaces `T` with
`Pack{Float, repeat each U}`, a component index of 1 must be adjusted to
2 so that it still refers to the `Int` element.
### pack_pack_index
```
sil-instruction ::= 'pack_pack_index' int-literal, sil-value 'of' sil-type
```
Produce the dynamic pack index of an element of a slice of a pack. The
type operand is the indexed pack type. The integer operand is an index
into the components of this pack type and must be in range. The value
operand is the index in the pack slice and must be another pack indexing
instruction. The pack slice starts at the given index and extends for a
number of components equal to the number of components in the indexed
pack type of the operand. The pack type induced from the indexed pack
type by this slice must have the same shape as the indexed pack type of
the operand.
Substitution must adjust the component index appropriately so that it
still refers to the same component. For example, if the pack type is
`Pack{repeat each T, Int}`, and substitution replaces `T` with
`Pack{Float, repeat each U}`, a component index of 1 must be adjusted to
2 so that the slice will continue to begin at the `Int` element.
Note how, in the example above, the slice does not contain any pack
expansions. (It is either empty or the singleton pack `Pack{Int}`.) This
is not typically how this instruction is used but can easily occur after
inlining or other type substitution.
### dynamic_pack_index
```
sil-instruction ::= 'dynamic_pack_index' sil-value 'of' sil-type
```
Produce the dynamic pack index of an unknown element of a pack. The type
operand is the indexed pack type. The value operand is a dynamic index
into the dynamic elements of the pack and must have type `Builtin.Word`.
The instruction has undefined behavior if the index is not in range for
the pack.
## Variadic Generics
### pack_length
```
sil-instruction ::= 'pack_length' sil-type
```
Produce the dynamic length of the given pack, which must be a formal
pack type. The value of the instruction has type `Builtin.Word`.
### open_pack_element
```
sil-instruction ::= 'open_pack_element' sil-value 'of' generic-parameter-list+ 'at' sil-apply-substitution-list ',' 'shape' sil-type ',' 'uuid' string-literal
```
Binds one or more opened pack element archetypes in the local type
environment.
The generic signature is the *generalization signature* of the pack
elements. This signature need not be related in any way to the generic
signature (if any) of the enclosing SIL function.
The `shape` type operand is resolved in the context of the
generalization signature. It must name a pack parameter. Archetypes will
be bound for all pack parameters with the same shape as this parameter.
The `uuid` operand must be an RFC 4122 UUID string, which is composed of
32 hex digits separated by hyphens in the pattern `8-4-4-4-12`. There
must not be any other `open_pack_element` instruction with this UUID in
the SIL function. Opened pack element archetypes are identified by this
UUID and are different from any other opened pack element archetypes in
the function, even if the operands otherwise match exactly.
The value operand is the pack index and must be the result of a pack
indexing instruction.
The substitution list matches the generalization signature and provides
contextual bindings for all of the type information there. As usual, the
substitutions for any pack parameters must be pack types. For pack
parameters with the same shape as the shape operand, these pack
substitutions must have the same shape as the indexed pack type of the
pack index operand (and therefore the same shape as each other).
The cost of this instruction is proportionate to the sum of the number
of pack parameters in the generalization signature with the same shape
as the shape type and the number of protocol conformance requirements
the generalization signature imposes on those parameters and their
associated types. If any of this information is not required for the
correct execution of the SIL function, simplifying the generalization
signature used by the`open_pack_element` can be a significant
optimization.
### pack_element_get
```
sil-instruction ::= 'pack_element_get' sil-value 'of' sil-operand 'as' sil-type
%addr = pack_element_get %index of %pack : $*Pack{Int, repeat each T} as $*Int
```
Extracts the value previously stored in a pack at a particular index. If
the pack element is uninitialized, this has undefined behavior.
Ownership is unclear for direct packs.
The first operand is the pack index and must be a pack indexing
instruction. The second operand is the pack and must be the address of a
pack value. The type operand is the projected element type of the pack
element and must be structurally well-typed for the given index and pack
type; see the structural type matching rules for pack indices.
### pack_element_set
```
sil-instruction ::= 'pack_element_set' sil-operand 'into' sil-value 'of' sil-operand
pack_element_set %addr : $*@pack_element("...") each U into %index of %pack : $*Pack{Int, repeat each T}
```
Places a value in a pack at a particular index.
Ownership is unclear for direct packs.
The first operand is the new element value. The second operand is the
pack index and must be a pack indexing instruction. The third operand is
the pack and must be the address of a pack value. The type of the
element value operand is the projected element type of the pack element
and must be structurally well-typed for the given index and pack type;
see the structural type matching rules for pack indices.
## Value Generics
### type_value
```
sil-instruction ::= 'type_value' sil-type 'for' sil-identifier
```
Produce the dynamic value of the given value generic, which must be a
formal value generic type. The value of the instruction has the type of
whatever the underlying value generic's type is. For right now that is
limited to `Int`.
## Unchecked Conversions
These instructions implement type conversions which are not checked.
These are either user-level conversions that are always safe and do not
need to be checked, or implementation detail conversions that are
unchecked for performance or flexibility.
### upcast
```
sil-instruction ::= 'upcast' sil-operand 'to' sil-type
%1 = upcast %0 : $D to $B
// $D and $B must be class types or metatypes, with B a superclass of D
// %1 will have type $B
```
Represents a conversion from a derived class instance or metatype to a
superclass, or from a base-class-constrained archetype to its base
class.
### address_to_pointer
```
sil-instruction ::= 'address_to_pointer' ('[' 'stack_protection' ']')? sil-operand 'to' sil-type
%1 = address_to_pointer %0 : $*T to $Builtin.RawPointer
// %0 must be of an address type $*T
// %1 will be of type Builtin.RawPointer
```
Creates a `Builtin.RawPointer` value corresponding to the address `%0`.
Converting the result pointer back to an address of the same type will
give an address equivalent to `%0`. It is undefined behavior to cast the
`RawPointer` to any address type other than its original address type or
any [layout compatible types](Types.md#layout-compatible-types).
The `stack_protection` flag indicates that stack protection is done for
the pointer origin.
### pointer_to_address
```
sil-instruction ::= 'pointer_to_address' sil-operand 'to' ('[' 'strict' ']')? ('[' 'invariant' ']')? ('[' 'alignment' '=' alignment ']')? sil-type
alignment ::= [0-9]+
%1 = pointer_to_address %0 : $Builtin.RawPointer to [strict] $*T
// %1 will be of type $*T
```
Creates an address value corresponding to the `Builtin.RawPointer` value
`%0`. Converting a `RawPointer` back to an address of the same type as
its originating [address_to_pointer](#address_to_pointer) instruction
gives back an equivalent address. It is undefined behavior to cast the
`RawPointer` back to any type other than its original address type or
[layout compatible types](Types.md#layout-compatible-types). It is also
undefined behavior to cast a `RawPointer` from a heap object to any
address type.
The `strict` flag indicates whether the returned address adheres to
strict aliasing. If true, then the type of each memory access dependent
on this address must be consistent with the memory's bound type. A
memory access from an address that is not strict cannot have its address
substituted with a strict address, even if other nearby memory accesses
at the same location are strict.
The `invariant` flag is set if loading from the returned address always
produces the same value.
The `alignment` integer value specifies the byte alignment of the
address. `alignment=0` is the default, indicating the natural alignment
of `T`.
### unchecked_ref_cast
```
sil-instruction ::= 'unchecked_ref_cast' sil-operand 'to' sil-type
%1 = unchecked_ref_cast %0 : $A to $B
// %0 must be an object of type $A
// $A must be a type with retainable pointer representation
// %1 will be of type $B
// $B must be a type with retainable pointer representation
```
Converts a heap object reference to another heap object reference type.
This conversion is unchecked, and it is undefined behavior if the
destination type is not a valid type for the heap object. The heap
object reference on either side of the cast may be a class existential,
and may be wrapped in one level of Optional.
### unchecked_ref_cast_addr
```
sil-instruction ::= 'unchecked_ref_cast_addr'
sil-type 'in' sil-operand 'to'
sil-type 'in' sil-operand
unchecked_ref_cast_addr $A in %0 : $*A to $B in %1 : $*B
// %0 must be the address of an object of type $A
// $A must be a type with retainable pointer representation
// %1 must be the address of storage for an object of type $B
// $B must be a retainable pointer representation
```
Loads a heap object reference from an address and stores it at the
address of another uninitialized heap object reference. The loaded
reference is always taken, and the stored reference is initialized. This
conversion is unchecked, and it is undefined behavior if the destination
type is not a valid type for the heap object. The heap object reference
on either side of the cast may be a class existential, and may be
wrapped in one level of Optional.
### unchecked_addr_cast
```
sil-instruction ::= 'unchecked_addr_cast' sil-operand 'to' sil-type
%1 = unchecked_addr_cast %0 : $*A to $*B
// %0 must be an address
// %1 will be of type $*B
```
Converts an address to a different address type. Using the resulting
address is undefined unless `B` is layout compatible with `A`. The
layout of `B` may be smaller than that of `A` as long as the lower order
bytes have identical layout.
### unchecked_trivial_bit_cast
```
sil-instruction ::= 'unchecked_trivial_bit_cast' sil-operand 'to' sil-type
%1 = unchecked_trivial_bit_cast %0 : $Builtin.NativeObject to $Builtin.Word
// %0 must be an object.
// %1 must be an object with trivial type.
```
Bitcasts an object of type `A` to be of same sized or smaller type `B`
with the constraint that `B` must be trivial. This can be used for
bitcasting among trivial types, but more importantly is a one way
bitcast from non-trivial types to trivial types.
### unchecked_bitwise_cast
```
sil-instruction ::= 'unchecked_bitwise_cast' sil-operand 'to' sil-type
%1 = unchecked_bitwise_cast %0 : $A to $B
```
Bitwise copies an object of type `A` into a new object of type `B` of
the same size or smaller.
### unchecked_value_cast
```
sil-instruction ::= 'unchecked_value_cast' sil-operand 'to' sil-type
%1 = unchecked_value_cast %0 : $A to $B
```
Bitwise copies an object of type `A` into a new layout-compatible object
of type `B` of the same size.
This instruction is assumed to forward a fixed ownership (set upon its
construction) and lowers to 'unchecked_bitwise_cast' in non-OSSA code.
This causes the cast to lose its guarantee of layout-compatibility.
### unchecked_ownership_conversion
```
sil-instruction ::= 'unchecked_ownership_conversion' sil-operand ',' sil-value-ownership-kind 'to' sil-value-ownership-kind
%1 = unchecked_ownership_conversion %0 : $A, @guaranteed to @owned
```
Converts its operand to an identical value of the same type but with
different ownership without performing any semantic operations normally
required by for ownership conversion.
This is used in Objective-C compatible destructors to convert a
guaranteed parameter to an owned parameter without performing a semantic
copy.
The resulting value must meet the usual ownership requirements; for
example, a trivial type must have '.none' ownership.
NOTE: A guaranteed result value is assumed to be a non-dependent guaranteed
value like a function argument.
### ref_to_raw_pointer
```
sil-instruction ::= 'ref_to_raw_pointer' sil-operand 'to' sil-type
%1 = ref_to_raw_pointer %0 : $C to $Builtin.RawPointer
// $C must be a class type, or Builtin.NativeObject, or AnyObject
// %1 will be of type $Builtin.RawPointer
```
Converts a heap object reference to a `Builtin.RawPointer`. The
`RawPointer` result can be cast back to the originating class type but
does not have ownership semantics. It is undefined behavior to cast a
`RawPointer` from a heap object reference to an address using
[pointer_to_address](#pointer_to_address).
### raw_pointer_to_ref
```
sil-instruction ::= 'raw_pointer_to_ref' sil-operand 'to' sil-type
%1 = raw_pointer_to_ref %0 : $Builtin.RawPointer to $C
// $C must be a class type, or Builtin.NativeObject, or AnyObject
// %1 will be of type $C
```
Converts a `Builtin.RawPointer` back to a heap object reference. Casting
a heap object reference to `Builtin.RawPointer` back to the same type
gives an equivalent heap object reference (though the raw pointer has no
ownership semantics for the object on its own). It is undefined behavior
to cast a `RawPointer` to a type unrelated to the dynamic type of the
heap object. It is also undefined behavior to cast a `RawPointer` from
an address to any heap object type.
### ref_to_unowned
```
sil-instruction ::= 'ref_to_unowned' sil-operand
%1 = unowned_to_ref %0 : T
// $T must be a reference type
// %1 will have type $@unowned T
```
Adds the `@unowned` qualifier to the type of a reference to a heap
object. No runtime effect.
### unowned_to_ref
```
sil-instruction ::= 'unowned_to_ref' sil-operand
%1 = unowned_to_ref %0 : $@unowned T
// $T must be a reference type
// %1 will have type $T
```
Strips the `@unowned` qualifier off the type of a reference to a heap
object. No runtime effect.
### ref_to_unmanaged
TODO
### unmanaged_to_ref
TODO
### convert_function
```
sil-instruction ::= 'convert_function' sil-operand 'to'
('[' 'without_actually_escaping' ']')?
sil-type
%1 = convert_function %0 : $T -> U to $T' -> U'
// %0 must be of a function type $T -> U ABI-compatible with $T' -> U'
// (see below)
// %1 will be of type $T' -> U'
```
Performs a conversion of the function `%0` to type `T`, which must be
ABI-compatible with the type of `%0`. Function types are ABI-compatible
if their input and result types are tuple types that, after
destructuring, differ only in the following ways:
- Corresponding tuple elements may add, remove, or change keyword
names. `(a:Int, b:Float, UnicodeScalar) -> ()` and
`(x:Int, Float, z:UnicodeScalar) -> ()` are ABI compatible.
- A class tuple element of the destination type may be a superclass or
subclass of the source type's corresponding tuple element.
The function types may also differ in attributes, except that the
`convention` attribute cannot be changed and the `@noescape` attribute
must not change for functions with context.
A `convert_function` cannot be used to change a thick type's
`@noescape` attribute (`@noescape` function types with context are not
ABI compatible with escaping function types with context) -- however,
thin function types with and without `@noescape` are ABI compatible
because they have no context. To convert from an escaping to a
`@noescape` thick function type use `convert_escape_to_noescape`.
With the `without_actually_escaping` attribute, the `convert_function`
may be used to convert a non-escaping closure into an escaping function
type. This attribute must be present whenever the closure operand has an
unboxed capture (via `@inout_aliasable`) *and* the resulting function
type is escaping. (This only happens as a result of
`withoutActuallyEscaping()`). If the attribute is present then the
resulting function type must be escaping, but the operand's function
type may or may not be @noescape. Note that a non-escaping closure may
have unboxed captured even though its SIL function type is "escaping".
### convert_escape_to_noescape
```
sil-instruction ::= 'convert_escape_to_noescape' sil-operand 'to' sil-type
%1 = convert_escape_to_noescape %0 : $T -> U to $@noescape T' -> U'
// %0 must be of a function type $T -> U ABI-compatible with $T' -> U'
// (see convert_function)
// %1 will be of the trivial type $@noescape T -> U
```
Converts an escaping (non-trivial) function type to a `@noescape`
trivial function type. Something must guarantee the lifetime of the
input `%0` for the duration of the use `%1`.
A `convert_escape_to_noescape [not_guaranteed] %opd` indicates that the
lifetime of its operand was not guaranteed by SILGen and a mandatory
pass must be run to ensure the lifetime of `%opd` for the
conversion's uses.
A `convert_escape_to_noescape [escaped]` indicates that the result was
passed to a function (materializeForSet) which escapes the closure in a
way not expressed by the convert's users. The mandatory pass must
ensure the lifetime in a conservative way.
### thunk
```
sil-instruction ::= 'thunk' sil-thunk-attr* sil-value sil-apply-substitution-list? () sil-type
sil-thunk-attr ::= '[' thunk-kind ']'
sil-thunk-kind ::= identity
%1 = thunk [identity] %0() : $@convention(thin) (T) -> U
// %0 must be of a function type $T -> U
// %1 will be of type @callee_guaranteed (T) -> U since we are creating an
// "identity" thunk.
%1 = thunk [identity] %0<T>() : $@convention(thin) (τ_0_0) -> ()
// %0 must be of a function type $T -> ()
// %1 will be of type @callee_guaranteed <τ_0_0> (τ_0_0) -> () since we are creating a
// "identity" thunk.
```
Takes in a function and depending on the kind produces a new function
result that is `@callee_guaranteed`. The specific way that the function
type of the input is modified by this instruction depends on the
specific `sil-thunk-kind` of the instruction. So for instance, the
`hop_to_mainactor_if_needed` thunk just returns a callee_guaranteed
version of the input function... but one could imagine a
"reabstracted" thunk kind that would produce the appropriate
reabstracted thunk kind.
This instructions is lowered to a true think in Lowered SIL by the
ThunkLowering pass.
It is assumed that like [partial_apply](#partial_apply), if we need a
substitution map, it will be attached to `thunk`. This ensures
that we have the substitution map already created if we need to create a
[partial_apply](#partial_apply).
### classify_bridge_object
```
sil-instruction ::= 'classify_bridge_object' sil-operand
%1 = classify_bridge_object %0 : $Builtin.BridgeObject
// %1 will be of type (Builtin.Int1, Builtin.Int1)
```
Decodes the bit representation of the specified `Builtin.BridgeObject`
value, returning two bits: the first indicates whether the object is an
Objective-C object, the second indicates whether it is an Objective-C
tagged pointer value.
### value_to_bridge_object
```
sil-instruction ::= 'value_to_bridge_object' sil-operand
%1 = value_to_bridge_object %0 : $T
// %1 will be of type Builtin.BridgeObject
```
Sets the BridgeObject to a tagged pointer representation holding its
operands by tagging and shifting the operand if needed:
```
value_to_bridge_object %x ===
(x << _swift_abi_ObjCReservedLowBits) | _swift_BridgeObject_TaggedPointerBits
```
`%x` thus must not be using any high bits shifted away or the tag bits
post-shift. ARC operations on such tagged values are NOPs.
### ref_to_bridge_object
```
sil-instruction ::= 'ref_to_bridge_object' sil-operand, sil-operand
%2 = ref_to_bridge_object %0 : $C, %1 : $Builtin.Word
// %1 must be of reference type $C
// %2 will be of type Builtin.BridgeObject
```
Creates a `Builtin.BridgeObject` that references `%0`, with spare bits
in the pointer representation populated by bitwise-OR-ing in the value
of `%1`. It is undefined behavior if this bitwise OR operation affects
the reference identity of `%0`; in other words, after the following
instruction sequence:
```
%b = ref_to_bridge_object %r : $C, %w : $Builtin.Word
%r2 = bridge_object_to_ref %b : $Builtin.BridgeObject to $C
```
`%r` and `%r2` must be equivalent. In particular, it is assumed that
retaining or releasing the `BridgeObject` is equivalent to retaining or
releasing the original reference, and that the above
`ref_to_bridge_object` / `bridge_object_to_ref` round-trip can be folded
away to a no-op.
On platforms with ObjC interop, there is additionally a
platform-specific bit in the pointer representation of a `BridgeObject`
that is reserved to indicate whether the referenced object has native
Swift refcounting. It is undefined behavior to set this bit when the
first operand references an Objective-C object.
### bridge_object_to_ref
```
sil-instruction ::= 'bridge_object_to_ref' sil-operand 'to' sil-type
%1 = bridge_object_to_ref %0 : $Builtin.BridgeObject to $C
// $C must be a reference type
// %1 will be of type $C
```
Extracts the object reference from a `Builtin.BridgeObject`, masking out
any spare bits.
### bridge_object_to_word
```
sil-instruction ::= 'bridge_object_to_word' sil-operand 'to' sil-type
%1 = bridge_object_to_word %0 : $Builtin.BridgeObject to $Builtin.Word
// %1 will be of type $Builtin.Word
```
Provides the bit pattern of a `Builtin.BridgeObject` as an integer.
### thin_to_thick_function
```
sil-instruction ::= 'thin_to_thick_function' sil-operand 'to' sil-type
%1 = thin_to_thick_function %0 : $@convention(thin) T -> U to $T -> U
// %0 must be of a thin function type $@convention(thin) T -> U
// The destination type must be the corresponding thick function type
// %1 will be of type $T -> U
```
Converts a thin function value, that is, a bare function pointer with no
context information, into a thick function value with ignored context.
Applying the resulting thick function value is equivalent to applying
the original thin value. The `thin_to_thick_function` conversion may be
eliminated if the context is proven not to be needed.
### thick_to_objc_metatype
```
sil-instruction ::= 'thick_to_objc_metatype' sil-operand 'to' sil-type
%1 = thick_to_objc_metatype %0 : $@thick T.Type to $@objc_metatype T.Type
// %0 must be of a thick metatype type $@thick T.Type
// The destination type must be the corresponding Objective-C metatype type
// %1 will be of type $@objc_metatype T.Type
```
Converts a thick metatype to an Objective-C class metatype. `T` must be
of class, class protocol, or class protocol composition type.
### objc_to_thick_metatype
```
sil-instruction ::= 'objc_to_thick_metatype' sil-operand 'to' sil-type
%1 = objc_to_thick_metatype %0 : $@objc_metatype T.Type to $@thick T.Type
// %0 must be of an Objective-C metatype type $@objc_metatype T.Type
// The destination type must be the corresponding thick metatype type
// %1 will be of type $@thick T.Type
```
Converts an Objective-C class metatype to a thick metatype. `T` must be
of class, class protocol, or class protocol composition type.
### objc_metatype_to_object
TODO
### objc_existential_metatype_to_object
TODO
### cast_implicitactor_to_opaqueisolation
```
sil-instruction ::= 'cast_implicitactor_to_opaqueisolation' sil-operand
%1 = cast_implicitactor_to_opaqueisolation %0 : $Builtin.ImplicitActor
// %0 must have guaranteed ownership
// %1 must have guaranteed ownership
// %1 will have type $Optional<any Actor>
```
Convert a `$Builtin.ImplicitActor` to a `$Optional<any Actor>` masking out any
bits that we have stolen from the witness table pointer.
At IRGen time, we lower this to the relevant masking operations, allowing us to
avoid exposing these low level details to the SIL optimizer. On platforms where
we support TBI, IRGen uses a mask that is the bottom 2 bits of the top nibble of
the pointer. On 64 bit platforms this is bit 60,61. If the platform does not
support TBI, then IRGen uses the bottom two tagged pointer bits of the pointer
(bits 0,1).
## Checked Conversions
Some user-level cast operations can fail and thus require runtime
checking.
The `unconditional_checked_cast_addr` and
[unconditional_checked_cast](#unconditional_checked_cast) instructions
performs an unconditional checked cast; it is a runtime failure if the
cast fails. The [checked_cast_addr_br](#checked_cast_addr_br) and
[checked_cast_br](#checked_cast_br) terminator instruction performs a
conditional checked cast; it branches to one of two destinations based
on whether the cast succeeds or not.
### unconditional_checked_cast
```
sil-instruction ::= 'unconditional_checked_cast'
sil-prohibit-isolated-conformances?
sil-operand 'to' sil-type
%1 = unconditional_checked_cast %0 : $A to $B
%1 = unconditional_checked_cast %0 : $*A to $*B
// $A and $B must be both objects or both addresses
// %1 will be of type $B or $*B
```
Performs a checked scalar conversion, causing a runtime failure if the
conversion fails. Casts that require changing representation or
ownership are unsupported.
### unconditional_checked_cast_addr
```
sil-instruction ::= 'unconditional_checked_cast_addr'
sil-prohibit-isolated-conformances?
sil-type 'in' sil-operand 'to'
sil-type 'in' sil-operand
unconditional_checked_cast_addr $A in %0 : $*@thick A to $B in %1 : $*@thick B
// $A and $B must be both addresses
// %1 will be of type $*B
// $A is destroyed during the conversion. There is no implicit copy.
```
Performs a checked indirect conversion, causing a runtime failure if the
conversion fails.
## Runtime Failures
### cond_fail
```
sil-instruction ::= 'cond_fail' sil-operand, string-literal
cond_fail %0 : $Builtin.Int1, "failure reason"
// %0 must be of type $Builtin.Int1
```
This instruction produces a runtime failure if the
operand is 1. Execution proceeds normally if the operand is zero. The
second operand is a static failure message, which is displayed by the
debugger in case the failure is triggered.
## Terminators
These instructions terminate a basic block. Every basic block must end
with a terminator. Terminators may only appear as the final instruction
of a basic block.
### unreachable
```
sil-terminator ::= 'unreachable'
unreachable
```
Indicates that control flow must not reach the end of the current basic
block. It is a dataflow error if an unreachable terminator is reachable
from the entry point of a function and is not immediately preceded by an
`apply` of a no-return function.
### return
```
sil-terminator ::= 'return' sil-operand
return %0 : $T
// $T must be the return type of the current function
```
Exits the current function and returns control to the calling function.
If the current function was invoked with an `apply` instruction, the
result of that function will be the operand of this `return`
instruction. If the current function was invoked with a `try_apply`
instruction, control resumes at the normal destination, and the value of
the basic block argument will be the operand of this `return`
instruction.
If the current function is a single-yield coroutine (`yield_once` or
`yield_once_2`), there must not be a path from the entry block to a
`return` which does not pass through a `yield` instruction. This rule
does not apply in the `raw` SIL stage.
`return` does not retain or release its operand or any other values.
A function must not contain more than one `return` instruction.
### return_borrow
```
sil-terminator ::= 'return_borrow' sil-operand 'from_scopes' '(' (sil-operand (',' sil-operand)*)? ')'
return_borrow %0 : $T from_scopes (%1, %2 ...)
// %0 must be a @guaranteed value
// %1, %2, ... must be borrow introducers for %0, like `load_borrow`
// $T must be the return type of the current function
```
return_borrow instruction is valid only for functions @guaranteed results.
It is used to a return a @guaranteed value that maybe produced within borrow scopes local to the function.
### throw
```
sil-terminator ::= 'throw' sil-operand
throw %0 : $T
// $T must be the error result type of the current function
```
Exits the current function and returns control to the calling function.
The current function must have an error result, and so the function must
have been invoked with a `try_apply` instruction. Control will resume in
the error destination of that instruction, and the basic block argument
will be the operand of the `throw`.
`throw` does not retain or release its operand or any other values.
A function must not contain more than one `throw` instruction.
### throw_addr
```
sil-terminator ::= 'throw_addr'
throw_addr
// indirect error result must be initialized at this point
```
Exits the current function and returns control to the calling function.
The current function must have an indirect error result, and so the
function must have been invoked with a `try_apply` instruction. Control
will resume in the error destination of that instruction.
The function is responsible for initializing its error result before the
`throw_addr`.
`throw_addr` does not retain or release any values.
A function must not contain more than one `throw_addr` instruction.
### yield
```
sil-terminator ::= 'yield' sil-yield-values
',' 'resume' sil-identifier
',' 'unwind' sil-identifier
sil-yield-values ::= sil-operand
sil-yield-values ::= '(' (sil-operand (',' sil-operand)*)? ')'
```
Temporarily suspends the current function and provides the given values
to the calling function. The current function must be a coroutine, and
the yield values must match the yield types of the coroutine. If the
calling function resumes the coroutine normally, control passes to the
`resume` destination. If the calling function aborts the coroutine,
control passes to the `unwind` destination.
The `resume` and `unwind` destination blocks must be uniquely referenced
by the `yield` instruction. This prevents them from becoming critical
edges.
In a single-yield coroutine (`yield_once` or `yield_once_2`), there must
not be a control flow path leading from the `resume` edge to another
`yield` instruction in this function. This rule does not apply in the
`raw` SIL stage.
There must not be a control flow path leading from the `unwind` edge to
a `return` instruction, to a `throw` instruction, or to any block
reachable from the entry block via a path that does not pass through an
`unwind` edge. That is, the blocks reachable from `unwind` edges must
jointly form a disjoint subfunction of the coroutine.
### unwind
```
sil-terminator ::= 'unwind'
```
Exits the current function and returns control to the calling function,
completing an unwind from a `yield`. The current function must be a
coroutine.
`unwind` is only permitted in blocks reachable from the `unwind` edges
of `yield` instructions.
### br
```
sil-terminator ::= 'br' sil-identifier
'(' (sil-operand (',' sil-operand)*)? ')'
br label (%0 : $A, %1 : $B, ...)
// `label` must refer to a basic block label within the current function
// %0, %1, etc. must be of the types of `label`'s arguments
```
Unconditionally transfers control from the current basic block to the
block labeled `label`, binding the given values to the arguments of the
destination basic block.
### cond_br
```
sil-terminator ::= 'cond_br' sil-operand ','
sil-identifier '(' (sil-operand (',' sil-operand)*)? ')' ','
sil-identifier '(' (sil-operand (',' sil-operand)*)? ')'
cond_br %0 : $Builtin.Int1, true_label (%a : $A, %b : $B, ...),
false_label (%x : $X, %y : $Y, ...)
// %0 must be of $Builtin.Int1 type
// `true_label` and `false_label` must refer to block labels within the
// current function and must not be identical
// %a, %b, etc. must be of the types of `true_label`'s arguments
// %x, %y, etc. must be of the types of `false_label`'s arguments
```
Conditionally branches to `true_label` if `%0` is equal to `1` or to
`false_label` if `%0` is equal to `0`, binding the corresponding set of
values to the arguments of the chosen destination block.
In OSSA, `cond_br` must not have any arguments because in OSSA critical control
flow edges are not allowed.
### switch_value
```
sil-terminator ::= 'switch_value' sil-operand
(',' sil-switch-value-case)*
(',' sil-switch-default)?
sil-switch-value-case ::= 'case' sil-value ':' sil-identifier
sil-switch-default ::= 'default' sil-identifier
switch_value %0 : $Builtin.Int<n>, case %1: label1,
case %2: label2,
...,
default labelN
// %0 must be a value of builtin integer type $Builtin.Int<n>
// `label1` through `labelN` must refer to block labels within the current
// function
// FIXME: All destination labels currently must take no arguments
```
Conditionally branches to one of several destination basic blocks based
on a value of builtin integer. If the operand value matches one of the
`case` values of the instruction, control is transferred to the
corresponding basic block. If there is a `default` basic block, control
is transferred to it if the value does not match any of the `case`
values. It is undefined behavior if the value does not match any cases
and no `default` branch is provided.
### switch_enum
```
sil-terminator ::= 'switch_enum' sil-operand
(',' sil-switch-enum-case)*
(',' sil-switch-default)?
sil-switch-enum-case ::= 'case' sil-decl-ref ':' sil-identifier
switch_enum %0 : $U, case #U.Foo!enumelt: label1,
case #U.Bar!enumelt: label2,
...,
default labelN
// %0 must be a value of enum type $U
// #U.Foo, #U.Bar, etc. must be 'case' declarations inside $U
// `label1` through `labelN` must refer to block labels within the current
// function
// label1 must take either no basic block arguments, or a single argument
// of the type of #U.Foo's data
// label2 must take either no basic block arguments, or a single argument
// of the type of #U.Bar's data, etc.
// labelN must take no basic block arguments
```
Conditionally branches to one of several destination basic blocks based
on the discriminator in a loadable `enum` value. Unlike `switch_int`,
`switch_enum` requires coverage of the operand type: If the `enum` type
cannot be switched exhaustively in the current function, the `default`
branch is required; otherwise, the `default` branch is required unless a
destination is assigned to every `case` of the `enum`. The destination
basic block for a `case` may take an argument of the corresponding
`enum` `case`'s data type (or of the address type, if the operand is an
address). If the branch is taken, the destination's argument will be
bound to the associated data inside the original enum value. For
example:
```
enum Foo {
case Nothing
case OneInt(Int)
case TwoInts(Int, Int)
}
sil @sum_of_foo : $Foo -> Int {
entry(%x : $Foo):
switch_enum %x : $Foo,
case #Foo.Nothing!enumelt: nothing,
case #Foo.OneInt!enumelt: one_int,
case #Foo.TwoInts!enumelt: two_ints
nothing:
%zero = integer_literal $Int, 0
return %zero : $Int
one_int(%y : $Int):
return %y : $Int
two_ints(%ab : $(Int, Int)):
%a = tuple_extract %ab : $(Int, Int), 0
%b = tuple_extract %ab : $(Int, Int), 1
%add = function_ref @add : $(Int, Int) -> Int
%result = apply %add(%a, %b) : $(Int, Int) -> Int
return %result : $Int
}
```
On a path dominated by a destination block of `switch_enum`, copying or
destroying the basic block argument has equivalent reference counting
semantics to copying or destroying the `switch_enum` operand:
```
// This retain_value...
retain_value %e1 : $Enum
switch_enum %e1, case #Enum.A: a, case #Enum.B: b
a(%a : $A):
// ...is balanced by this release_value
release_value %a
b(%b : $B):
// ...and this one
release_value %b
```
### switch_enum_addr
```
sil-terminator ::= 'switch_enum_addr' sil-operand
(',' sil-switch-enum-case)*
(',' sil-switch-default)?
switch_enum_addr %0 : $*U, case #U.Foo!enumelt: label1,
case #U.Bar!enumelt: label2,
...,
default labelN
// %0 must be the address of an enum type $*U
// #U.Foo, #U.Bar, etc. must be cases of $U
// `label1` through `labelN` must refer to block labels within the current
// function
// The destinations must take no basic block arguments
```
Conditionally branches to one of several destination basic blocks based
on the discriminator in the enum value referenced by the address
operand.
Unlike `switch_int`, `switch_enum` requires coverage of the operand
type: If the `enum` type cannot be switched exhaustively in the current
function, the `default` branch is required; otherwise, the `default`
branch is required unless a destination is assigned to every `case` of
the `enum`. Unlike `switch_enum`, the payload value is not passed to the
destination basic blocks; it must be projected out separately with
[unchecked_take_enum_data_addr](#unchecked_take_enum_data_addr).
### dynamic_method_br
```
sil-terminator ::= 'dynamic_method_br' sil-operand ',' sil-decl-ref
',' sil-identifier ',' sil-identifier
dynamic_method_br %0 : $P, #X.method, bb1, bb2
// %0 must be of protocol type
// #X.method must be a reference to an @objc method of any class
// or protocol type
```
Looks up the implementation of an Objective-C method with the same
selector as the named method for the dynamic type of the value inside an
existential container. The "self" operand of the result function value
is represented using an opaque type, the value for which must be
projected out as a value of type `Builtin.ObjCPointer`.
If the operand is determined to have the named method, this instruction
branches to `bb1`, passing it the uncurried function corresponding to
the method found. If the operand does not have the named method, this
instruction branches to `bb2`.
### checked_cast_br
```
sil-terminator ::= 'checked_cast_br' sil-checked-cast-exact?
sil-prohibit-isolated-conformances?
sil-type 'in'
sil-operand 'to' sil-type ','
sil-identifier ',' sil-identifier
sil-checked-cast-exact ::= '[' 'exact' ']'
sil-prohibit-isolated-conformances ::= '[' 'prohibit_isolated_conformances' ']'
checked_cast_br A in %0 : $A to $B, bb1, bb2
checked_cast_br *A in %0 : $*A to $*B, bb1, bb2
checked_cast_br [exact] A in %0 : $A to $A, bb1, bb2
// $A and $B must be both object types or both address types
// bb1 must take a single argument of type $B or $*B
// bb2 must take no arguments
```
Performs a checked scalar conversion from `$A` to `$B`. If the
conversion succeeds, control is transferred to `bb1`, and the result of
the cast is passed into `bb1` as an argument. If the conversion fails,
control is transferred to `bb2`.
An exact cast checks whether the dynamic type is exactly the target
type, not any possible subtype of it. The source and target types must
be class types.
A cast can specify that the runtime should prohibit all uses of isolated
conformances when attempting to satisfy protocol requirements of existentials.
### checked_cast_addr_br
```
sil-terminator ::= 'checked_cast_addr_br'
sil-prohibit-isolated-conformances?
sil-cast-consumption-kind
sil-type 'in' sil-operand 'to'
sil-stype 'in' sil-operand ','
sil-identifier ',' sil-identifier
sil-cast-consumption-kind ::= 'take_always'
sil-cast-consumption-kind ::= 'take_on_success'
sil-cast-consumption-kind ::= 'copy_on_success'
checked_cast_addr_br take_always $A in %0 : $*@thick A to $B in %2 : $*@thick B, bb1, bb2
// $A and $B must be both address types
// bb1 must take a single argument of type $*B
// bb2 must take no arguments
```
Performs a checked indirect conversion from `$A` to `$B`. If the
conversion succeeds, control is transferred to `bb1`, and the result of
the cast is left in the destination. If the conversion fails, control is
transferred to `bb2`.
### try_apply
```
sil-terminator ::= 'try_apply' sil-value
sil-apply-substitution-list?
'(' (sil-value (',' sil-value)*)? ')'
':' sil-type
'normal' sil-identifier, 'error' sil-identifier
try_apply %0(%1, %2, ...) : $(A, B, ...) -> (R, @error E),
normal bb1, error bb2
bb1(%3 : R):
bb2(%4 : E):
// Note that the type of the callee '%0' is specified *after* the arguments
// %0 must be of a concrete function type $(A, B, ...) -> (R, @error E)
// %1, %2, etc. must be of the argument types $A, $B, etc.
```
Transfers control to the function specified by `%0`, passing it the
given arguments. When `%0` returns, control resumes in either the normal
destination (if it returns with `return`) or the error destination (if
it returns with `throw`).
`%0` must have a function type with an error result.
The rules on generic substitutions are identical to those of `apply`.
### await_async_continuation
```
sil-terminator ::= 'await_async_continuation' sil-value
',' 'resume' sil-identifier
(',' 'error' sil-identifier)?
await_async_continuation %0 : $UnsafeContinuation<T>, resume bb1
await_async_continuation %0 : $UnsafeThrowingContinuation<T>, resume bb1, error bb2
bb1(%1 : @owned $T):
bb2(%2 : @owned $Error):
```
Suspends execution of an `@async` function until the continuation is
resumed. The continuation must be the result of a
`get_async_continuation` or `get_async_continuation_addr` instruction
within the same function; see the documentation for
`get_async_continuation` for discussion of further constraints on the IR
between `get_async_continuation[_addr]` and `await_async_continuation`.
This terminator can only appear inside an `@async` function. The
instruction must always have a `resume` successor, but must have an
`error` successor if and only if the operand is an
`UnsafeThrowingContinuation<T>`.
If the operand is the result of a `get_async_continuation` instruction,
then the `resume` successor block must take an argument whose type is
the maximally-abstracted lowered type of `T`, matching the type argument
of the `Unsafe[Throwing]Continuation<T>` operand. The value of the
`resume` argument is owned by the current function. If the operand is
the result of a `get_async_continuation_addr` instruction, then the
`resume` successor block must *not* take an argument; the resume value
will be written to the memory referenced by the operand to the
`get_async_continuation_addr` instruction, after which point the value
in that memory becomes owned by the current function. With either
variant, if the `await_async_continuation` instruction has an `error`
successor block, the `error` block must take a single `Error` argument,
and that argument is owned by the enclosing function. The memory
referenced by a `get_async_continuation_addr` instruction remains
uninitialized when `await_async_continuation` resumes on the `error`
successor.
It is possible for a continuation to be resumed before
`await_async_continuation`. In this case, the resume operation returns
immediately to its caller. When the `await_async_continuation`
instruction later executes, it then immediately transfers control to its
`resume` or `error` successor block, using the resume or error value
that the continuation was already resumed with.
## Differentiable Programming
### differentiable_function
```
sil-instruction ::= 'differentiable_function'
sil-differentiable-function-parameter-indices
sil-value ':' sil-type
sil-differentiable-function-derivative-functions-clause?
sil-differentiable-function-parameter-indices ::=
'[' 'parameters' [0-9]+ (' ' [0-9]+)* ']'
sil-differentiable-derivative-functions-clause ::=
'with_derivative'
'{' sil-value ':' sil-type ',' sil-value ':' sil-type '}'
differentiable_function [parameters 0] %0 : $(T) -> T
with_derivative {%1 : $(T) -> (T, (T) -> T), %2 : $(T) -> (T, (T) -> T)}
```
Creates a `@differentiable` function from an original function operand
and derivative function operands (optional). There are two derivative
function kinds: a Jacobian-vector products (JVP) function and a
vector-Jacobian products (VJP) function.
`[parameters ...]` specifies parameter indices that the original
function is differentiable with respect to.
The `with_derivative` clause specifies the derivative function operands
associated with the original function.
The differentiation transformation canonicalizes all
`differentiable_function` instructions, generating
derivative functions if necessary to fill in derivative function
operands.
In raw SIL, the `with_derivative` clause is optional. In canonical SIL,
the `with_derivative` clause is mandatory.
### linear_function
```
sil-instruction ::= 'linear_function'
sil-linear-function-parameter-indices
sil-value ':' sil-type
sil-linear-function-transpose-function-clause?
sil-linear-function-parameter-indices ::=
'[' 'parameters' [0-9]+ (' ' [0-9]+)* ']'
sil-linear-transpose-function-clause ::=
with_transpose sil-value ':' sil-type
linear_function [parameters 0] %0 : $(T) -> T with_transpose %1 : $(T) -> T
```
Bundles a function with its transpose function into a
`@differentiable(_linear)` function.
`[parameters ...]` specifies parameter indices that the original
function is linear with respect to.
A `with_transpose` clause specifies the transpose function associated
with the original function. When a `with_transpose` clause is not
specified, the mandatory differentiation transform will add a
`with_transpose` clause to the instruction.
In raw SIL, the `with_transpose` clause is optional. In canonical SIL,
the `with_transpose` clause is mandatory.
### differentiable_function_extract
```
sil-instruction ::= 'differentiable_function_extract'
'[' sil-differentiable-function-extractee ']'
sil-value ':' sil-type
('as' sil-type)?
sil-differentiable-function-extractee ::= 'original' | 'jvp' | 'vjp'
differentiable_function_extract [original] %0 : $@differentiable (T) -> T
differentiable_function_extract [jvp] %0 : $@differentiable (T) -> T
differentiable_function_extract [vjp] %0 : $@differentiable (T) -> T
differentiable_function_extract [jvp] %0 : $@differentiable (T) -> T
as $(@in_constant T) -> (T, (T.TangentVector) -> T.TangentVector)
```
Extracts the original function or a derivative function from the given
`@differentiable` function. The extractee is one of the following:
`[original]`, `[jvp]`, or `[vjp]`.
In lowered SIL, an explicit extractee type may be provided. This is
currently used by the LoadableByAddress transformation, which rewrites
function types.
### linear_function_extract
```
sil-instruction ::= 'linear_function_extract'
'[' sil-linear-function-extractee ']'
sil-value ':' sil-type
sil-linear-function-extractee ::= 'original' | 'transpose'
linear_function_extract [original] %0 : $@differentiable(_linear) (T) -> T
linear_function_extract [transpose] %0 : $@differentiable(_linear) (T) -> T
```
Extracts the original function or a transpose function from the given
`@differentiable(_linear)` function. The extractee is one of the
following: `[original]` or `[transpose]`.
### differentiability_witness_function
```
sil-instruction ::=
'differentiability_witness_function'
'[' sil-differentiability-witness-function-kind ']'
'[' differentiability-kind ']'
'[' 'parameters' sil-differentiability-witness-function-index-list ']'
'[' 'results' sil-differentiability-witness-function-index-list ']'
generic-parameter-clause?
sil-function-name ':' sil-type
sil-differentiability-witness-function-kind ::= 'jvp' | 'vjp' | 'transpose'
sil-differentiability-witness-function-index-list ::= [0-9]+ (' ' [0-9]+)*
differentiability_witness_function [vjp] [reverse] [parameters 0] [results 0]
<T where T: Differentiable> @foo : $(T) -> T
```
Looks up a differentiability witness function (JVP, VJP, or transpose)
for a referenced function via SIL differentiability witnesses.
The differentiability witness function kind identifies the witness
function to look up: `[jvp]`, `[vjp]`, or `[transpose]`.
The remaining components identify the SIL differentiability witness:
- Original function name.
- Differentiability kind.
- Parameter indices.
- Result indices.
- Witness generic parameter clause (optional). When parsing SIL, the
parsed witness generic parameter clause is combined with the
original function's generic signature to form the full witness
generic signature.
## Optimizer Dataflow Marker Instructions
### mark_unresolved_non_copyable_value
```
sil-instruction ::= 'mark_unresolved_non_copyable_value'
'[' sil-optimizer-analysis-marker ']'
sil-optimizer-analysis-marker ::= 'consumable_and_assignable'
::= 'no_consume_or_assign'
```
A canary value inserted by a SIL generating frontend to signal to the
move checker to check a specific value. Valid only in Raw SIL. The
relevant checkers should remove the
`mark_unresolved_non_copyable_value`
instruction after successfully running the relevant diagnostic. The idea
here is that instead of needing to introduce multiple "flagging"
instructions for the optimizer, we can just reuse this one instruction
by varying the kind.
If the sil optimizer analysis marker is `consumable_and_assignable` then
the move checker is told to check that the result of this instruction is
consumed at most once. If the marker is `no_consume_or_assign`, then the
move checker will validate that the result of this instruction is never
consumed or assigned over.
## No Implicit Copy and No Escape Value Instructions
### copyable_to_moveonlywrapper
```
sil-instruction ::= 'copyable_to_moveonlywrapper'
```
`copyable_to_moveonlywrapper` takes in a
`T` and maps it to a move only wrapped `@moveOnly T`. This is
semantically used by a code generator initializing a new moveOnly
binding from a copyable value. It semantically destroys its input
@owned value and returns a brand new independent @owned @moveOnly
value. It also is used to convert a trivial copyable value with type
'Trivial' into an owned non-trivial value of type '@moveOnly
Trivial'. If one thinks of '@moveOnly' as a monad, this is how one
injects a copyable value into the move only space.
### moveonlywrapper_to_copyable
```
sil-instruction ::= 'moveonlywrapper_to_copyable [owned]'
sil-instruction ::= 'moveonlywrapper_to_copyable [guaranteed]'
```
`moveonlywrapper_to_copyable` takes in a
`@moveOnly T` and produces a new `T` value. This is a 'forwarding'
instruction where at parse time, we only allow for one to choose it to
be [owned] or [guaranteed]. With time, we may eliminate the need for
the guaranteed form in the future.
- `moveonlywrapper_to_copyable [owned]` is used to
signal the end of lifetime of the '@moveOnly' wrapper. SILGen
inserts these when ever a move only value has its ownership passed
to a situation where a copyable value is needed. Since it is
consuming, we know that the no implicit copy or no-escape checker
will ensure that if we need a copy for it, the program will emit a
diagnostic.
- `moveonlywrapper_to_copyable [guaranteed]` is used to
pass a @moveOnly T value as a copyable guaranteed parameter with
type 'T' to a function. In the case of using no-implicit-copy
checking this is always fine since no-implicit-copy is a local
pattern. This would be an error when performing no escape checking.
Importantly, this instruction also is where in the case of an
@moveOnly trivial type, we convert from the non-trivial
representation to the trivial representation.
### copyable_to_moveonlywrapper_addr
```
sil-instruction ::= 'copyable_to_moveonlywrapper_addr'
```
`copyable_to_moveonlywrapper_addr`
takes in a `*T` and maps it to a move only wrapped `*@moveOnly T`.
This is semantically used by a code generator initializing a new
moveOnly binding from a copyable value. It semantically acts as an
address cast. If one thinks of '@moveOnly' as a monad, this is how one
injects a copyable value into the move only space.
### moveonlywrapper_to_copyable_addr
```
sil-instruction ::= 'moveonlywrapper_to_copyable_addr'
```
`moveonlywrapper_to_copyable_addr`
takes in a `*@moveOnly T` and produces a new `*T` value. This
instruction acts like an address cast that projects out the underlying T
from an @moveOnly T.
NOTE: From the perspective of the address checker, a trivial
[load](#load) with a
`moveonlywrapper_to_copyable_addr`
operand is considered to be a use of a non-copyable type.
## Weak linking support
### has_symbol
``` none
sil-instruction ::= 'has_symbol' sil-decl-ref
```
Returns true if each of the underlying symbol addresses associated with
the given declaration are non-null. This can be used to determine
whether a weakly-imported declaration is available at runtime.
## `Builtin.Borrow` support
The `Builtin.Borrow` type is the primitive building block for forming first-
class borrowed accesses to other values. Some Swift values never have
a fixed memory address, but address-only values are always fixed in memory, and
some loadable types have dependencies that take the address of an in-memory
representation. As such, `Builtin.Borrow<T>` may have one of two
representations, depending on the properties of `T`:
- `Builtin.Borrow<T>` may contain a bitwise copy of the representation of
the referenced `T`.
- `Builtin.Borrow<T>` may contain the address of the referenced `T` in memory.
The layout of `Builtin.Borrow<T>` for any specific concrete type `T` is always
the same, so when the layout of `T` is fully known, `Builtin.Borrow<T>` is
always loadable even if that `T` is not. On the other hand, for a `T` with
unknown layout, `Builtin.Borrow<T>` will also have unknown layout and be
treated as address-only. This leaves three cases for SIL to represent:
- Both `T` and `Builtin.Borrow<T>` are loadable. Values of `Builtin.Borrow<T>`
are created by `make_borrow`, and the referenced value is dereferenced with
`dereference_borrow`.
- `T` is address-only, or has address-dependent references, but
`Builtin.Borrow<T>` is loadable. Values of `Builtin.Borrow<T>` are created
by `make_addr_borrow`, and the address of the referenced value is retrieved
with `dereference_addr_borrow`.
- `T` and `Builtin.Borrow<T>` are both address-only. Memory locations of type
`Builtin.Borrow<T>` are initialized using `init_borrow_addr`, and the address
of the referenced value is retrieved with `dereference_borrow_addr`.
### make_borrow
```none
sil-instruction ::= 'make_borrow' sil-value
%borrow: $Builtin.Borrow<T> = make_borrow %target: $T
```
Returns a `Builtin.Borrow` value referencing the given value.
The result `%borrow` has a lifetime dependency on borrowing `%target`.
### dereference_borrow
```none
sil-instruction ::= 'dereference_borrow' sil-value
%target: $T = dereference_borrow %borrow: $Builtin.Borrow<T>
```
Returns the value referenced by a `Builtin.Borrow`. The result `%target` is
a borrow of the original value, scoped by the lifetime of `%borrow`.
### make_addr_borrow
```none
sil-instruction ::= 'make_addr_borrow' sil-value
%borrow: $Builtin.Borrow<T> = make_addr_borrow %target: $*T
```
Returns a `Builtin.Borrow` value referencing the value at the given memory
location.
The result `%borrow` has a lifetime dependency on the borrow stored
at the memory location `%target`.
### dereference_addr_borrow
```none
sil-instruction ::= 'dereference_addr_borrow' sil-value
%target: $*T = dereference_addr_borrow %borrow: $Builtin.Borrow<T>
```
Returns the address of the value referenced by a `Builtin.Borrow`.
### init_borrow_addr
```none
sil-instruction ::= 'init_borrow_addr' sil-value 'with' sil-value
init_borrow_addr %borrow : $*Builtin.Borrow<T> with $target : $*T
```
Initializes a `Builtin.Borrow` in memory to reference a target value in
memory.
The value stored to `%borrow` has a lifetime dependency on the borrow stored
at the memory location `%target`.
### dereference_addr_borrow
```none
sil-instruction ::= 'dereference_addr_borrow' sil-value
%target: $*T = dereference_addr_borrow %borrow: $Builtin.Borrow<T>
```
Returns the address of the value referenced by a `Builtin.Borrow` in memory.
## Miscellaneous instructions
### ignored_use
```none
sil-instruction ::= 'ignored_use'
```
This instruction acts as a synthetic use instruction that suppresses unused
variable warnings. In Swift the equivalent operation is '_ = x'. This
importantly also provides a way to find the source location for '_ = x' when
emitting SIL diagnostics. It is only legal in Raw SIL and is removed as dead
code when we convert to Canonical SIL.
DISCUSSION: Before the introduction of this instruction, in certain cases,
SILGen would just not emit anything for '_ = x'... so one could not emit
diagnostics upon this case. | unknown | github | https://github.com/apple/swift | docs/SIL/Instructions.md |
# testutils.py - utility module for psycopg2 testing.
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Use unittest2 if available. Otherwise mock a skip facility with warnings.
import os
import sys
from functools import wraps
from testconfig import dsn
try:
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
if hasattr(unittest, 'skipIf'):
skip = unittest.skip
skipIf = unittest.skipIf
else:
import warnings
def skipIf(cond, msg):
def skipIf_(f):
@wraps(f)
def skipIf__(self):
if cond:
warnings.warn(msg)
return
else:
return f(self)
return skipIf__
return skipIf_
def skip(msg):
return skipIf(True, msg)
def skipTest(self, msg):
warnings.warn(msg)
return
unittest.TestCase.skipTest = skipTest
# Silence warnings caused by the stubborness of the Python unittest maintainers
# http://bugs.python.org/issue9424
if not hasattr(unittest.TestCase, 'assert_') \
or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue:
# mavaff...
unittest.TestCase.assert_ = unittest.TestCase.assertTrue
unittest.TestCase.failUnless = unittest.TestCase.assertTrue
unittest.TestCase.assertEquals = unittest.TestCase.assertEqual
unittest.TestCase.failUnlessEqual = unittest.TestCase.assertEqual
class ConnectingTestCase(unittest.TestCase):
"""A test case providing connections for tests.
A connection for the test is always available as `self.conn`. Others can be
created with `self.connect()`. All are closed on tearDown.
Subclasses needing to customize setUp and tearDown should remember to call
the base class implementations.
"""
def setUp(self):
self._conns = []
def tearDown(self):
# close the connections used in the test
for conn in self._conns:
if not conn.closed:
conn.close()
def connect(self, **kwargs):
try:
self._conns
except AttributeError, e:
raise AttributeError(
"%s (did you remember calling ConnectingTestCase.setUp()?)"
% e)
import psycopg2
conn = psycopg2.connect(dsn, **kwargs)
self._conns.append(conn)
return conn
def _get_conn(self):
if not hasattr(self, '_the_conn'):
self._the_conn = self.connect()
return self._the_conn
def _set_conn(self, conn):
self._the_conn = conn
conn = property(_get_conn, _set_conn)
def decorate_all_tests(cls, *decorators):
"""
Apply all the *decorators* to all the tests defined in the TestCase *cls*.
"""
for n in dir(cls):
if n.startswith('test'):
for d in decorators:
setattr(cls, n, d(getattr(cls, n)))
def skip_if_no_uuid(f):
"""Decorator to skip a test if uuid is not supported by Py/PG."""
@wraps(f)
def skip_if_no_uuid_(self):
try:
import uuid
except ImportError:
return self.skipTest("uuid not available in this Python version")
try:
cur = self.conn.cursor()
cur.execute("select typname from pg_type where typname = 'uuid'")
has = cur.fetchone()
finally:
self.conn.rollback()
if has:
return f(self)
else:
return self.skipTest("uuid type not available on the server")
return skip_if_no_uuid_
def skip_if_tpc_disabled(f):
"""Skip a test if the server has tpc support disabled."""
@wraps(f)
def skip_if_tpc_disabled_(self):
from psycopg2 import ProgrammingError
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("SHOW max_prepared_transactions;")
except ProgrammingError:
return self.skipTest(
"server too old: two phase transactions not supported.")
else:
mtp = int(cur.fetchone()[0])
cnn.close()
if not mtp:
return self.skipTest(
"server not configured for two phase transactions. "
"set max_prepared_transactions to > 0 to run the test")
return f(self)
return skip_if_tpc_disabled_
def skip_if_no_namedtuple(f):
@wraps(f)
def skip_if_no_namedtuple_(self):
try:
from collections import namedtuple
except ImportError:
return self.skipTest("collections.namedtuple not available")
else:
return f(self)
return skip_if_no_namedtuple_
def skip_if_no_iobase(f):
"""Skip a test if io.TextIOBase is not available."""
@wraps(f)
def skip_if_no_iobase_(self):
try:
from io import TextIOBase
except ImportError:
return self.skipTest("io.TextIOBase not found.")
else:
return f(self)
return skip_if_no_iobase_
def skip_before_postgres(*ver):
"""Skip a test on PostgreSQL before a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_before_postgres_(f):
@wraps(f)
def skip_before_postgres__(self):
if self.conn.server_version < int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_before_postgres__
return skip_before_postgres_
def skip_after_postgres(*ver):
"""Skip a test on PostgreSQL after (including) a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_after_postgres_(f):
@wraps(f)
def skip_after_postgres__(self):
if self.conn.server_version >= int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_after_postgres__
return skip_after_postgres_
def skip_before_python(*ver):
"""Skip a test on Python before a certain version."""
def skip_before_python_(f):
@wraps(f)
def skip_before_python__(self):
if sys.version_info[:len(ver)] < ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_before_python__
return skip_before_python_
def skip_from_python(*ver):
"""Skip a test on Python after (including) a certain version."""
def skip_from_python_(f):
@wraps(f)
def skip_from_python__(self):
if sys.version_info[:len(ver)] >= ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_from_python__
return skip_from_python_
def skip_if_no_superuser(f):
"""Skip a test if the database user running the test is not a superuser"""
@wraps(f)
def skip_if_no_superuser_(self):
from psycopg2 import ProgrammingError
try:
return f(self)
except ProgrammingError, e:
import psycopg2.errorcodes
if e.pgcode == psycopg2.errorcodes.INSUFFICIENT_PRIVILEGE:
self.skipTest("skipped because not superuser")
else:
raise
return skip_if_no_superuser_
def skip_if_green(reason):
def skip_if_green_(f):
@wraps(f)
def skip_if_green__(self):
from testconfig import green
if green:
return self.skipTest(reason)
else:
return f(self)
return skip_if_green__
return skip_if_green_
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
def skip_if_no_getrefcount(f):
@wraps(f)
def skip_if_no_getrefcount_(self):
if not hasattr(sys, 'getrefcount'):
return self.skipTest('skipped, no sys.getrefcount()')
else:
return f(self)
return skip_if_no_getrefcount_
def script_to_py3(script):
"""Convert a script to Python3 syntax if required."""
if sys.version_info[0] < 3:
return script
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
f.write(script.encode())
f.flush()
filename = f.name
f.close()
# 2to3 is way too chatty
import logging
logging.basicConfig(filename=os.devnull)
from lib2to3.main import main
if main("lib2to3.fixes", ['--no-diffs', '-w', '-n', filename]):
raise Exception('py3 conversion failed')
f2 = open(filename)
try:
return f2.read()
finally:
f2.close()
os.remove(filename) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslicemirroring
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
endpointsliceutil "k8s.io/endpointslice/util"
"k8s.io/klog/v2"
endpointsv1 "k8s.io/kubernetes/pkg/api/v1/endpoints"
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
)
// reconciler is responsible for transforming current EndpointSlice state into
// desired state
type reconciler struct {
client clientset.Interface
// endpointSliceTracker tracks the list of EndpointSlices and associated
// resource versions expected for each Endpoints resource. It can help
// determine if a cached EndpointSlice is out of date.
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
// eventRecorder allows reconciler to record an event if it finds an invalid
// IP address in an Endpoints resource.
eventRecorder record.EventRecorder
// maxEndpointsPerSubset references the maximum number of endpoints that
// should be added to an EndpointSlice for an EndpointSubset. This allows
// for a simple 1:1 mapping between EndpointSubset and EndpointSlice.
maxEndpointsPerSubset int32
// metricsCache tracks values for total numbers of desired endpoints as well
// as the efficiency of EndpointSlice endpoints distribution
metricsCache *metrics.Cache
}
// reconcile takes an Endpoints resource and ensures that corresponding
// EndpointSlices exist. It creates, updates, or deletes EndpointSlices to
// ensure the desired set of addresses are represented by EndpointSlices.
func (r *reconciler) reconcile(ctx context.Context, endpoints *corev1.Endpoints, existingSlices []*discovery.EndpointSlice) error {
logger := klog.FromContext(ctx)
// Calculate desired state.
d := newDesiredCalc()
numInvalidAddresses := 0
addressesSkipped := 0
// canonicalize the Endpoints subsets before processing them
subsets := endpointsv1.RepackSubsets(endpoints.Subsets)
for _, subset := range subsets {
multiKey := d.initPorts(subset.Ports)
totalAddresses := len(subset.Addresses) + len(subset.NotReadyAddresses)
totalAddressesAdded := 0
for _, address := range subset.Addresses {
// Break if we've reached the max number of addresses to mirror
// per EndpointSubset. This allows for a simple 1:1 mapping between
// EndpointSubset and EndpointSlice.
if totalAddressesAdded >= int(r.maxEndpointsPerSubset) {
break
}
if ok := d.addAddress(address, multiKey, true); ok {
totalAddressesAdded++
} else {
numInvalidAddresses++
logger.Info("Address in Endpoints is not a valid IP, it will not be mirrored to an EndpointSlice", "endpoints", klog.KObj(endpoints), "IP", address.IP)
}
}
for _, address := range subset.NotReadyAddresses {
// Break if we've reached the max number of addresses to mirror
// per EndpointSubset. This allows for a simple 1:1 mapping between
// EndpointSubset and EndpointSlice.
if totalAddressesAdded >= int(r.maxEndpointsPerSubset) {
break
}
if ok := d.addAddress(address, multiKey, false); ok {
totalAddressesAdded++
} else {
numInvalidAddresses++
logger.Info("Address in Endpoints is not a valid IP, it will not be mirrored to an EndpointSlice", "endpoints", klog.KObj(endpoints), "IP", address.IP)
}
}
addressesSkipped += totalAddresses - totalAddressesAdded
}
// This metric includes addresses skipped for being invalid or exceeding
// MaxEndpointsPerSubset.
metrics.AddressesSkippedPerSync.WithLabelValues().Observe(float64(addressesSkipped))
// Record an event on the Endpoints resource if we skipped mirroring for any
// invalid IP addresses.
if numInvalidAddresses > 0 {
r.eventRecorder.Eventf(endpoints, corev1.EventTypeWarning, InvalidIPAddress,
"Skipped %d invalid IP addresses when mirroring to EndpointSlices", numInvalidAddresses)
}
// Record a separate event if we skipped mirroring due to the number of
// addresses exceeding MaxEndpointsPerSubset.
if addressesSkipped > numInvalidAddresses {
logger.Info("Addresses in Endpoints were skipped due to exceeding MaxEndpointsPerSubset", "skippedAddresses", addressesSkipped, "endpoints", klog.KObj(endpoints))
r.eventRecorder.Eventf(endpoints, corev1.EventTypeWarning, TooManyAddressesToMirror,
"A max of %d addresses can be mirrored to EndpointSlices per Endpoints subset. %d addresses were skipped", r.maxEndpointsPerSubset, addressesSkipped)
}
// Build data structures for existing state.
existingSlicesByKey := endpointSlicesByKey(existingSlices)
// Determine changes necessary for each group of slices by port map.
epMetrics := metrics.NewEndpointPortCache()
totals := totalsByAction{}
slices := slicesByAction{}
for portKey, desiredEndpoints := range d.endpointsByKey {
numEndpoints := len(desiredEndpoints)
pmSlices, pmTotals := r.reconcileByPortMapping(
endpoints, existingSlicesByKey[portKey], desiredEndpoints, d.portsByKey[portKey], portKey.addressType())
slices.append(pmSlices)
totals.add(pmTotals)
epMetrics.Set(endpointsliceutil.PortMapKey(portKey), metrics.EfficiencyInfo{
Endpoints: numEndpoints,
Slices: len(existingSlicesByKey[portKey]) + len(pmSlices.toCreate) - len(pmSlices.toDelete),
})
}
// If there are unique sets of ports that are no longer desired, mark
// the corresponding endpoint slices for deletion.
for portKey, existingSlices := range existingSlicesByKey {
if _, ok := d.endpointsByKey[portKey]; !ok {
for _, existingSlice := range existingSlices {
slices.toDelete = append(slices.toDelete, existingSlice)
}
}
}
metrics.EndpointsAddedPerSync.WithLabelValues().Observe(float64(totals.added))
metrics.EndpointsUpdatedPerSync.WithLabelValues().Observe(float64(totals.updated))
metrics.EndpointsRemovedPerSync.WithLabelValues().Observe(float64(totals.removed))
endpointsNN := types.NamespacedName{Name: endpoints.Name, Namespace: endpoints.Namespace}
r.metricsCache.UpdateEndpointPortCache(endpointsNN, epMetrics)
return r.finalize(ctx, endpoints, slices)
}
// reconcileByPortMapping compares the endpoints found in existing slices with
// the list of desired endpoints and returns lists of slices to create, update,
// and delete.
func (r *reconciler) reconcileByPortMapping(
endpoints *corev1.Endpoints,
existingSlices []*discovery.EndpointSlice,
desiredSet endpointsliceutil.EndpointSet,
endpointPorts []discovery.EndpointPort,
addressType discovery.AddressType,
) (slicesByAction, totalsByAction) {
slices := slicesByAction{}
totals := totalsByAction{}
// If no endpoints are desired, mark existing slices for deletion and
// return.
if desiredSet.Len() == 0 {
slices.toDelete = existingSlices
for _, epSlice := range existingSlices {
totals.removed += len(epSlice.Endpoints)
}
return slices, totals
}
if len(existingSlices) == 0 {
// if no existing slices, all desired endpoints will be added.
totals.added = desiredSet.Len()
} else {
// if >0 existing slices, mark all but 1 for deletion.
slices.toDelete = existingSlices[1:]
// generated slices must mirror all endpoints annotations but EndpointsLastChangeTriggerTime and LastAppliedConfigAnnotation
compareAnnotations := cloneAndRemoveKeys(endpoints.Annotations, corev1.EndpointsLastChangeTriggerTime, corev1.LastAppliedConfigAnnotation)
compareLabels := cloneAndRemoveKeys(existingSlices[0].Labels, discovery.LabelManagedBy, discovery.LabelServiceName)
// Return early if first slice matches desired endpoints, labels and annotations
totals = totalChanges(existingSlices[0], desiredSet)
if totals.added == 0 && totals.updated == 0 && totals.removed == 0 &&
apiequality.Semantic.DeepEqual(endpoints.Labels, compareLabels) &&
apiequality.Semantic.DeepEqual(compareAnnotations, existingSlices[0].Annotations) &&
!needRebuildExistingSlices(endpoints, existingSlices[0]) {
if !r.endpointSliceTracker.Has(existingSlices[0]) {
r.endpointSliceTracker.Update(existingSlices[0]) // Always ensure each EndpointSlice is being tracked.
}
return slices, totals
}
}
// generate a new slice with the desired endpoints.
var sliceName string
if len(existingSlices) > 0 {
sliceName = existingSlices[0].Name
}
newSlice := newEndpointSlice(endpoints, endpointPorts, addressType, sliceName)
for desiredSet.Len() > 0 && len(newSlice.Endpoints) < int(r.maxEndpointsPerSubset) {
endpoint, _ := desiredSet.PopAny()
newSlice.Endpoints = append(newSlice.Endpoints, *endpoint)
}
if newSlice.Name != "" {
slices.toUpdate = []*discovery.EndpointSlice{newSlice}
} else { // Slices to be created set GenerateName instead of Name.
slices.toCreate = []*discovery.EndpointSlice{newSlice}
}
return slices, totals
}
// finalize creates, updates, and deletes slices as specified
func (r *reconciler) finalize(ctx context.Context, endpoints *corev1.Endpoints, slices slicesByAction) error {
// If there are slices to create and delete, recycle the slices marked for
// deletion by replacing creates with updates of slices that would otherwise
// be deleted.
recycleSlices(&slices)
epsClient := r.client.DiscoveryV1().EndpointSlices(endpoints.Namespace)
// Don't create more EndpointSlices if corresponding Endpoints resource is
// being deleted.
if endpoints.DeletionTimestamp == nil {
for _, endpointSlice := range slices.toCreate {
createdSlice, err := epsClient.Create(ctx, endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("failed to create EndpointSlice for Endpoints %s/%s: %v", endpoints.Namespace, endpoints.Name, err)
}
r.endpointSliceTracker.Update(createdSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
}
}
for _, endpointSlice := range slices.toUpdate {
updatedSlice, err := epsClient.Update(ctx, endpointSlice, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update %s EndpointSlice for Endpoints %s/%s: %v", endpointSlice.Name, endpoints.Namespace, endpoints.Name, err)
}
r.endpointSliceTracker.Update(updatedSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
}
for _, endpointSlice := range slices.toDelete {
err := epsClient.Delete(ctx, endpointSlice.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete %s EndpointSlice for Endpoints %s/%s: %v", endpointSlice.Name, endpoints.Namespace, endpoints.Name, err)
}
r.endpointSliceTracker.ExpectDeletion(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
return nil
}
// deleteEndpoints deletes any associated EndpointSlices and cleans up any
// Endpoints references from the metricsCache.
func (r *reconciler) deleteEndpoints(ctx context.Context, namespace, name string, endpointSlices []*discovery.EndpointSlice) error {
r.metricsCache.DeleteEndpoints(types.NamespacedName{Namespace: namespace, Name: name})
var errs []error
for _, endpointSlice := range endpointSlices {
err := r.client.DiscoveryV1().EndpointSlices(namespace).Delete(ctx, endpointSlice.Name, metav1.DeleteOptions{})
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("error(s) deleting %d/%d EndpointSlices for %s/%s Endpoints, including: %s", len(errs), len(endpointSlices), namespace, name, errs[0])
}
return nil
}
// endpointSlicesByKey returns a map that groups EndpointSlices by unique
// addrTypePortMapKey values.
func endpointSlicesByKey(existingSlices []*discovery.EndpointSlice) map[addrTypePortMapKey][]*discovery.EndpointSlice {
slicesByKey := map[addrTypePortMapKey][]*discovery.EndpointSlice{}
for _, existingSlice := range existingSlices {
epKey := newAddrTypePortMapKey(existingSlice.Ports, existingSlice.AddressType)
slicesByKey[epKey] = append(slicesByKey[epKey], existingSlice)
}
return slicesByKey
}
// totalChanges returns the total changes that will be required for an
// EndpointSlice to match a desired set of endpoints.
func totalChanges(existingSlice *discovery.EndpointSlice, desiredSet endpointsliceutil.EndpointSet) totalsByAction {
totals := totalsByAction{}
existingMatches := 0
for _, endpoint := range existingSlice.Endpoints {
got := desiredSet.Get(&endpoint)
if got == nil {
// If not desired, increment number of endpoints to be deleted.
totals.removed++
} else {
existingMatches++
// If existing version of endpoint doesn't match desired version
// increment number of endpoints to be updated.
if !endpointsliceutil.EndpointsEqualBeyondHash(got, &endpoint) {
totals.updated++
}
}
}
// Any desired endpoints that have not been found in the existing slice will
// be added.
totals.added = desiredSet.Len() - existingMatches
return totals
}
func needRebuildExistingSlices(endpoints *corev1.Endpoints, existingSlice *discovery.EndpointSlice) bool {
for index := range existingSlice.OwnerReferences {
owner := existingSlice.OwnerReferences[index]
if owner.Kind == "Endpoints" && owner.Name == endpoints.Name && owner.UID != endpoints.UID {
return true
}
}
return false
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/endpointslicemirroring/reconciler.go |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_MATH_GRAD_H_
#define TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_MATH_GRAD_H_
#include "tensorflow/c/eager/gradients.h"
namespace tensorflow {
namespace gradients {
GradientFunction* AddRegisterer(const ForwardOperation& op);
GradientFunction* ExpRegisterer(const ForwardOperation& op);
GradientFunction* MatMulRegisterer(const ForwardOperation& op);
GradientFunction* SqrtRegisterer(const ForwardOperation& op);
GradientFunction* NegRegisterer(const ForwardOperation& op);
GradientFunction* SubRegisterer(const ForwardOperation& op);
GradientFunction* MulRegisterer(const ForwardOperation& op);
GradientFunction* Log1pRegisterer(const ForwardOperation& op);
GradientFunction* DivNoNanRegisterer(const ForwardOperation& op);
} // namespace gradients
} // namespace tensorflow
#endif // TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_MATH_GRAD_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/experimental/gradients/math_grad.h |
#!/usr/bin/env python
"""
SocksiPy + urllib2 handler
version: 0.3
author: e<e@tr0ll.in>
This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket...
"""
import ssl
try:
import urllib2
import httplib
except ImportError: # Python 3
import urllib.request as urllib2
import http.client as httplib
import socks # $ pip install PySocks
def merge_dict(a, b):
d = a.copy()
d.update(b)
return d
class SocksiPyConnection(httplib.HTTPConnection):
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
httplib.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
self.sock = socks.socksocket()
self.sock.setproxy(*self.proxyargs)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
class SocksiPyConnectionS(httplib.HTTPSConnection):
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socks.socksocket()
sock.setproxy(*self.proxyargs)
if type(self.timeout) in (int, float):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
def __init__(self, *args, **kwargs):
self.args = args
self.kw = kwargs
urllib2.HTTPHandler.__init__(self)
def http_open(self, req):
def build(host, port=None, timeout=0, **kwargs):
kw = merge_dict(self.kw, kwargs)
conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw)
return conn
return self.do_open(build, req)
def https_open(self, req):
def build(host, port=None, timeout=0, **kwargs):
kw = merge_dict(self.kw, kwargs)
conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw)
return conn
return self.do_open(build, req)
if __name__ == "__main__":
import sys
try:
port = int(sys.argv[1])
except (ValueError, IndexError):
port = 9050
opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port))
print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode())
print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from page_sets import key_mobile_sites_pages
class KeyMobileSitesPage(page_module.Page):
def __init__(self, url, page_set, name='', labels=None,
action_on_load_complete=False):
super(KeyMobileSitesPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json', labels=labels)
self.user_agent_type = 'mobile'
self.archive_data_file = 'data/key_mobile_sites.json'
self.action_on_load_complete = action_on_load_complete
class KeyMobileSitesPageSet(page_set_module.PageSet):
""" Key mobile sites with smooth interactions. """
def __init__(self):
super(KeyMobileSitesPageSet, self).__init__(
user_agent_type='mobile',
archive_data_file='data/key_mobile_sites.json',
bucket=page_set_module.PARTNER_BUCKET)
# Add pages with predefined classes that contain custom navigation logic.
predefined_page_classes = [
key_mobile_sites_pages.CapitolVolkswagenPage,
key_mobile_sites_pages.TheVergeArticlePage,
key_mobile_sites_pages.CnnArticlePage,
key_mobile_sites_pages.FacebookPage,
key_mobile_sites_pages.YoutubeMobilePage,
key_mobile_sites_pages.LinkedInPage,
key_mobile_sites_pages.YahooAnswersPage,
key_mobile_sites_pages.GoogleNewsMobilePage,
key_mobile_sites_pages.GoogleNewsMobile2Page,
key_mobile_sites_pages.AmazonNicolasCagePage,
]
for page_class in predefined_page_classes:
self.AddUserStory(page_class(self))
# Add pages with custom page interaction logic.
# Page behaves non-deterministically, replaced with test version for now.
# self.AddUserStory(GroupClonedPage(self))
# mean_input_event_latency cannot be tracked correctly for
# GroupClonedListImagesPage.
# See crbug.com/409086.
# self.AddUserStory(GroupClonedListImagesPage(self))
# Add pages with custom labels.
# Why: Top news site.
self.AddUserStory(KeyMobileSitesPage(
url='http://nytimes.com/', page_set=self, labels=['fastpath']))
# Why: Image-heavy site.
self.AddUserStory(KeyMobileSitesPage(
url='http://cuteoverload.com', page_set=self, labels=['fastpath']))
# Why: #11 (Alexa global), google property; some blogger layouts
# have infinite scroll but more interesting.
self.AddUserStory(KeyMobileSitesPage(
url='http://googlewebmastercentral.blogspot.com/',
page_set=self, name='Blogger'))
# Why: #18 (Alexa global), Picked an interesting post """
self.AddUserStory(KeyMobileSitesPage(
# pylint: disable=line-too-long
url='http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
page_set=self,
name='Wordpress'))
# Why: #6 (Alexa) most visited worldwide, picked an interesting page
self.AddUserStory(KeyMobileSitesPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab)'))
# Why: Wikipedia page with a delayed scroll start
self.AddUserStory(KeyMobileSitesPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab) - delayed scroll start',
action_on_load_complete=True))
# Why: #8 (Alexa global), picked an interesting page
# Forbidden (Rate Limit Exceeded)
# self.AddUserStory(KeyMobileSitesPage(
# url='http://twitter.com/katyperry', page_set=self, name='Twitter'))
# Why: #37 (Alexa global) """
self.AddUserStory(KeyMobileSitesPage(
url='http://pinterest.com',
page_set=self,
name='Pinterest'))
# Why: #1 sports.
# Fails often; crbug.com/249722'
# self.AddUserStory(KeyMobileSitesPage(
# url='http://espn.go.com', page_set=self, name='ESPN'))
# Why: crbug.com/231413
# Doesn't scroll; crbug.com/249736
# self.AddUserStory(KeyMobileSitesPage(
# url='http://forecast.io', page_set=self))
# Why: crbug.com/169827
self.AddUserStory(KeyMobileSitesPage(
url='http://slashdot.org/', page_set=self, labels=['fastpath']))
# Why: #5 Alexa news """
self.AddUserStory(KeyMobileSitesPage(
url='http://www.reddit.com/r/programming/comments/1g96ve',
page_set=self, labels=['fastpath']))
# Why: Problematic use of fixed position elements """
self.AddUserStory(KeyMobileSitesPage(
url='http://www.boingboing.net', page_set=self, labels=['fastpath']))
# Add simple pages with no custom navigation logic or labels.
urls_list = [
# Why: Social; top Google property; Public profile; infinite scrolls.
# pylint: disable=line-too-long
'https://plus.google.com/app/basic/110031535020051778989/posts?source=apppromo',
# Why: crbug.com/242544
('http://www.androidpolice.com/2012/10/03/rumor-evidence-mounts-that-an-'
'lg-optimus-g-nexus-is-coming-along-with-a-nexus-phone-certification-'
'program/'),
# Why: crbug.com/149958
'http://gsp.ro',
# Why: Top tech blog
'http://theverge.com',
# Why: Top tech site
'http://digg.com',
# Why: Top Google property; a Google tab is often open
'https://www.google.com/#hl=en&q=barack+obama',
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
'http://www.cnn.com',
# Why: #1 commerce website by time spent by users in US
'http://shop.mobileweb.ebay.com/searchresults?kw=viking+helmet',
# Why: #1 Alexa recreation
# pylint: disable=line-too-long
'http://www.booking.com/searchresults.html?src=searchresults&latitude=65.0500&longitude=25.4667',
# Why: #1 Alexa sports
'http://sports.yahoo.com/',
# Why: Top tech blog
'http://techcrunch.com',
# Why: #6 Alexa sports
'http://mlb.com/',
# Why: #14 Alexa California
'http://www.sfgate.com/',
# Why: Non-latin character set
'http://worldjournal.com/',
# Why: Mobile wiki
'http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
# Why: #15 Alexa news
'http://online.wsj.com/home-page',
# Why: Image-heavy mobile site
'http://www.deviantart.com/',
# Why: Top search engine
('http://www.baidu.com/s?wd=barack+obama&rsv_bp=0&rsv_spt=3&rsv_sug3=9&'
'rsv_sug=0&rsv_sug4=3824&rsv_sug1=3&inputT=4920'),
# Why: Top search engine
'http://www.bing.com/search?q=sloths',
# Why: Good example of poor initial scrolling
'http://ftw.usatoday.com/2014/05/spelling-bee-rules-shenanigans'
]
for url in urls_list:
self.AddUserStory(KeyMobileSitesPage(url, self)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.statistics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Common statistic names for Filesystem-level statistics,
* including internals.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class FileSystemStatisticNames {
private FileSystemStatisticNames() {
}
/**
* How long did filesystem initialization take?
*/
public static final String FILESYSTEM_INITIALIZATION = "filesystem_initialization";
/**
* How long did filesystem close take?
*/
public static final String FILESYSTEM_CLOSE = "filesystem_close";
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/FileSystemStatisticNames.java |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import json
import os
import os.path
import re
import sys
from distutils.version import LooseVersion
import packaging.specifiers
from ansible.module_utils.urls import open_url
BUNDLED_RE = re.compile(b'\\b_BUNDLED_METADATA\\b')
def get_bundled_libs(paths):
bundled_libs = set()
for filename in fnmatch.filter(paths, 'lib/ansible/compat/*/__init__.py'):
bundled_libs.add(filename)
bundled_libs.add('lib/ansible/module_utils/distro/__init__.py')
bundled_libs.add('lib/ansible/module_utils/six/__init__.py')
bundled_libs.add('lib/ansible/module_utils/compat/ipaddress.py')
# backports.ssl_match_hostname should be moved to its own file in the future
bundled_libs.add('lib/ansible/module_utils/urls.py')
return bundled_libs
def get_files_with_bundled_metadata(paths):
with_metadata = set()
for path in paths:
if path == 'test/sanity/code-smell/update-bundled.py':
continue
with open(path, 'rb') as f:
body = f.read()
if BUNDLED_RE.search(body):
with_metadata.add(path)
return with_metadata
def get_bundled_metadata(filename):
with open(filename, 'r') as module:
for line in module:
if line.strip().startswith('_BUNDLED_METADATA'):
data = line[line.index('{'):].strip()
break
else:
raise ValueError('Unable to check bundled library for update. Please add'
' _BUNDLED_METADATA dictionary to the library file with'
' information on pypi name and bundled version.')
metadata = json.loads(data)
return metadata
def get_latest_applicable_version(pypi_data, constraints=None):
latest_version = "0"
if 'version_constraints' in metadata:
version_specification = packaging.specifiers.SpecifierSet(metadata['version_constraints'])
for version in pypi_data['releases']:
if version in version_specification:
if LooseVersion(version) > LooseVersion(latest_version):
latest_version = version
else:
latest_version = pypi_data['info']['version']
return latest_version
if __name__ == '__main__':
paths = sys.argv[1:] or sys.stdin.read().splitlines()
bundled_libs = get_bundled_libs(paths)
files_with_bundled_metadata = get_files_with_bundled_metadata(paths)
for filename in files_with_bundled_metadata.difference(bundled_libs):
print('{0}: ERROR: File contains _BUNDLED_METADATA but needs to be added to'
' test/sanity/code-smell/update-bundled.py'.format(filename))
for filename in bundled_libs:
try:
metadata = get_bundled_metadata(filename)
except ValueError as e:
print('{0}: ERROR: {1}'.format(filename, e))
continue
except (IOError, OSError) as e:
if e.errno == 2:
print('{0}: ERROR: {1}. Perhaps the bundled library has been removed'
' or moved and the bundled library test needs to be modified as'
' well?'.format(filename, e))
pypi_fh = open_url('https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name']))
pypi_data = json.loads(pypi_fh.read().decode('utf-8'))
constraints = metadata.get('version_constraints', None)
latest_version = get_latest_applicable_version(pypi_data, constraints)
if LooseVersion(metadata['version']) < LooseVersion(latest_version):
print('{0}: UPDATE {1} from {2} to {3} {4}'.format(
filename,
metadata['pypi_name'],
metadata['version'],
latest_version,
'https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name']))) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
// FakeConfigMap represents a fake config map
type FakeConfigMap struct {
Name string
Data map[string]string
}
// Create creates a fake configmap using the provided client
func (c *FakeConfigMap) Create(client clientset.Interface) error {
return apiclient.CreateOrUpdate(client.CoreV1().ConfigMaps(metav1.NamespaceSystem), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: metav1.NamespaceSystem,
},
Data: c.Data,
})
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/test/resources/configmap.go |
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from paste.auth.digest import *
from paste.wsgilib import raw_interactive
from paste.httpexceptions import *
from paste.httpheaders import AUTHORIZATION, WWW_AUTHENTICATE, REMOTE_USER
import os
import six
def application(environ, start_response):
content = REMOTE_USER(environ)
start_response("200 OK",(('Content-Type', 'text/plain'),
('Content-Length', len(content))))
if six.PY3:
content = content.encode('utf8')
return [content]
realm = "tag:clarkevans.com,2005:testing"
def backwords(environ, realm, username):
""" dummy password hash, where user password is just reverse """
password = list(username)
password.reverse()
password = "".join(password)
return digest_password(realm, username, password)
application = AuthDigestHandler(application,realm,backwords)
application = HTTPExceptionHandler(application)
def check(username, password, path="/"):
""" perform two-stage authentication to verify login """
(status,headers,content,errors) = \
raw_interactive(application,path, accept='text/html')
assert status.startswith("401")
challenge = WWW_AUTHENTICATE(headers)
response = AUTHORIZATION(username=username, password=password,
challenge=challenge, path=path)
assert "Digest" in response and username in response
(status,headers,content,errors) = \
raw_interactive(application,path,
HTTP_AUTHORIZATION=response)
if status.startswith("200"):
return content
if status.startswith("401"):
return None
assert False, "Unexpected Status: %s" % status
def test_digest():
assert b'bing' == check("bing","gnib")
assert check("bing","bad") is None
#
# The following code uses sockets to test the functionality,
# to enable use:
#
# $ TEST_SOCKET py.test
#
if os.environ.get("TEST_SOCKET",""):
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import build_opener, HTTPDigestAuthHandler
from paste.debug.testserver import serve
server = serve(application)
def authfetch(username,password,path="/",realm=realm):
server.accept(2)
import socket
socket.setdefaulttimeout(5)
uri = ("http://%s:%s" % server.server_address) + path
auth = HTTPDigestAuthHandler()
auth.add_password(realm,uri,username,password)
opener = build_opener(auth)
result = opener.open(uri)
return result.read()
def test_success():
assert "bing" == authfetch('bing','gnib')
def test_failure():
# urllib tries 5 more times before it gives up
server.accept(5)
try:
authfetch('bing','wrong')
assert False, "this should raise an exception"
except HTTPError as e:
assert e.code == 401
def test_shutdown():
server.stop() | unknown | codeparrot/codeparrot-clean | ||
import csv
import shutil
import datetime
ORIGINAL = "rec-center-hourly.csv"
BACKUP = "rec-center-hourly-backup.csv"
DATE_FORMAT = "%m/%d/%y %H:%M"
def isTuesday(date):
return date.weekday() is 1
def withinOctober(date):
return datetime.datetime(2010, 10, 1) <= date < datetime.datetime(2010, 11, 1)
def run():
# Backup original
shutil.copyfile(ORIGINAL, BACKUP)
with open(ORIGINAL, 'rb') as inputFile:
reader = csv.reader(inputFile)
outputCache = ""
headers = reader.next()
types = reader.next()
flags = reader.next()
for row in [headers, types, flags]:
outputCache += ",".join(row) + "\n"
for row in reader:
dateString = row[0]
date = datetime.datetime.strptime(dateString, DATE_FORMAT)
consumption = float(row[1])
if isTuesday(date) and withinOctober(date):
consumption = 5.0
outputCache += "%s,%f\n" % (dateString, consumption)
with open(ORIGINAL, 'wb') as outputFile:
outputFile.write(outputCache)
if __name__ == "__main__":
run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Controls verbosity of the script output and logging.
KUBE_VERBOSE="${KUBE_VERBOSE:-2}"
# Handler for when we exit automatically on an error.
# Borrowed from https://gist.github.com/ahendrix/7030300
kube::log::errexit() {
local err="${PIPESTATUS[*]}"
# If the shell we are in doesn't have errexit set (common in subshells) then
# don't dump stacks.
set +o | grep -qe "-o errexit" || return
set +o xtrace
local code="${1:-1}"
# Print out the stack trace described by $function_stack
if [ ${#FUNCNAME[@]} -gt 2 ]
then
kube::log::error "Call tree:"
for ((i=1;i<${#FUNCNAME[@]}-1;i++))
do
kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)"
done
fi
kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1
}
kube::log::install_errexit() {
# trap ERR to provide an error handler whenever a command exits nonzero this
# is a more verbose version of set -o errexit
trap 'kube::log::errexit' ERR
# setting errtrace allows our ERR trap handler to be propagated to functions,
# expansions and subshells
set -o errtrace
}
# Print out the stack trace
#
# Args:
# $1 The number of stack frames to skip when printing.
kube::log::stack() {
local stack_skip=${1:-0}
stack_skip=$((stack_skip + 1))
if [[ ${#FUNCNAME[@]} -gt ${stack_skip} ]]; then
echo "Call stack:" >&2
local i
for ((i=1 ; i <= ${#FUNCNAME[@]} - stack_skip ; i++))
do
local frame_no=$((i - 1 + stack_skip))
local source_file=${BASH_SOURCE[${frame_no}]}
local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
local funcname=${FUNCNAME[${frame_no}]}
echo " ${i}: ${source_file}:${source_lineno} ${funcname}(...)" >&2
done
fi
}
# Log an error and exit.
# Args:
# $1 Message to log with the error
# $2 The error code to return
# $3 The number of stack frames to skip when printing.
kube::log::error_exit() {
local message="${1:-}"
local code="${2:-1}"
local stack_skip="${3:-0}"
stack_skip=$((stack_skip + 1))
if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
local source_file=${BASH_SOURCE[${stack_skip}]}
local source_line=${BASH_LINENO[$((stack_skip - 1))]}
echo "!!! Error in ${source_file}:${source_line}" >&2
[[ -z ${1-} ]] || {
echo " ${1}" >&2
}
kube::log::stack ${stack_skip}
echo "Exiting with status ${code}" >&2
fi
exit "${code}"
}
# Log an error but keep going. Don't dump the stack or exit.
kube::log::error() {
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "!!! ${timestamp} ${1-}" >&2
shift
for message; do
echo " ${message}" >&2
done
}
# Print an usage message to stderr. The arguments are printed directly.
kube::log::usage() {
echo >&2
local message
for message; do
echo "${message}" >&2
done
echo >&2
}
kube::log::usage_from_stdin() {
local messages=()
while read -r line; do
messages+=("${line}")
done
kube::log::usage "${messages[@]}"
}
# Print out some info that isn't a top level status line
kube::log::info() {
local V="${V:-0}"
if (( KUBE_VERBOSE < V )); then
return
fi
for message; do
echo "${message}"
done
}
# Just like kube::log::info, but no \n, so you can make a progress bar
kube::log::progress() {
for message; do
echo -e -n "${message}"
done
}
kube::log::info_from_stdin() {
local messages=()
while read -r line; do
messages+=("${line}")
done
kube::log::info "${messages[@]}"
}
# Print a status line. Formatted to show up in a stream of output.
kube::log::status() {
local V="${V:-0}"
if (( KUBE_VERBOSE < V )); then
return
fi
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ ${timestamp} ${1}"
shift
for message; do
echo " ${message}"
done
}
# Log a command and run it. Uses a subshell which gets replaced by the command after logging.
kube::log::run() (
V="${V:-0}"
if (( KUBE_VERBOSE >= V )); then
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ ${timestamp} ${*}"
fi
exec "${@}"
) | unknown | github | https://github.com/kubernetes/kubernetes | hack/lib/logging.sh |
#!/usr/bin/env python
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro gt.fierro@berkeley.edu github.com/gtfierro
"""
"""
Uses the extended ContentHandler from xml_driver to extract the needed fields
from patent grant documents
"""
from cStringIO import StringIO
from datetime import datetime
from unidecode import unidecode
from handler import Patobj, PatentHandler
import re
import uuid
import xml.sax
import xml_util
import xml_driver
claim_num_regex = re.compile(r'^\d+\. *') # removes claim number from claim text
class Patent(PatentHandler):
def __init__(self, xml_string, is_string=False):
xh = xml_driver.XMLHandler()
parser = xml_driver.make_parser()
parser.setContentHandler(xh)
parser.setFeature(xml_driver.handler.feature_external_ges, False)
l = xml.sax.xmlreader.Locator()
xh.setDocumentLocator(l)
if is_string:
parser.parse(StringIO(xml_string))
else:
parser.parse(xml_string)
self.attributes = ['app','application','assignee_list','inventor_list',
'us_classifications',
'claims']
self.xml = xh.root.us_patent_application
self.country = self.xml.publication_reference.contents_of('country', upper=False)[0]
self.application = xml_util.normalize_document_identifier(self.xml.publication_reference.contents_of('doc_number')[0])
self.kind = self.xml.publication_reference.contents_of('kind')[0]
if self.xml.application_reference:
self.pat_type = self.xml.application_reference[0].get_attribute('appl-type', upper=False)
else:
self.pat_type = None
self.date_app = self.xml.publication_reference.contents_of('date')[0]
self.clm_num = len(self.xml.claims.claim)
self.abstract = self.xml.abstract.contents_of('p', '', as_string=True, upper=False)
self.invention_title = self._invention_title()
self.app = {
"id": self.application,
"type": self.pat_type,
"number": self.application,
"country": self.country,
"date": self._fix_date(self.date_app),
"abstract": self.abstract,
"title": self.invention_title,
"kind": self.kind,
"num_claims": self.clm_num
}
self.app["id"] = str(self.app["date"])[:4] + "/" + self.app["number"]
def _invention_title(self):
original = self.xml.contents_of('invention_title', upper=False)[0]
if isinstance(original, list):
original = ''.join(original)
return original
def _name_helper(self, tag_root):
"""
Returns dictionary of firstname, lastname with prefix associated
with lastname
"""
firstname = tag_root.contents_of('first_name', as_string=True, upper=False)
lastname = tag_root.contents_of('last_name', as_string=True, upper=False)
return xml_util.associate_prefix(firstname, lastname)
def _name_helper_dict(self, tag_root):
"""
Returns dictionary of firstname, lastname with prefix associated
with lastname
"""
firstname = tag_root.contents_of('first_name', as_string=True, upper=False)
lastname = tag_root.contents_of('last_name', as_string=True, upper=False)
return {'name_first': firstname, 'name_last': lastname}
def _fix_date(self, datestring):
"""
Converts a number representing YY/MM to a Date
"""
if not datestring:
return None
elif datestring[:4] < "1900":
return None
# default to first of month in absence of day
if datestring[-4:-2] == '00':
datestring = datestring[:-4] + '01' + datestring[-2:]
if datestring[-2:] == '00':
datestring = datestring[:6] + '01'
try:
datestring = datetime.strptime(datestring, '%Y%m%d')
return datestring
except Exception as inst:
print inst, datestring
return None
@property
def assignee_list(self):
"""
Returns list of dictionaries:
assignee:
name_last
name_first
residence
nationality
organization
sequence
location:
id
city
state
country
"""
assignees = self.xml.assignees.assignee
if not assignees:
return []
res = []
for i, assignee in enumerate(assignees):
# add assignee data
asg = {}
asg.update(self._name_helper_dict(assignee)) # add firstname, lastname
asg['organization'] = assignee.contents_of('orgname', as_string=True, upper=False)
asg['role'] = assignee.contents_of('role', as_string=True)
asg['nationality'] = assignee.nationality.contents_of('country')[0]
asg['residence'] = assignee.nationality.contents_of('country')[0]
# add location data for assignee
loc = {}
for tag in ['city', 'state', 'country']:
loc[tag] = assignee.contents_of(tag, as_string=True, upper=False)
#this is created because of MySQL foreign key case sensitivities
loc['id'] = unidecode(u"|".join([loc['city'], loc['state'], loc['country']]).lower())
if any(asg.values()) or any(loc.values()):
asg['sequence'] = i
asg['uuid'] = str(uuid.uuid1())
res.append([asg, loc])
return res
@property
def inventor_list(self):
"""
Returns list of lists of applicant dictionary and location dictionary
applicant:
name_last
name_first
sequence
location:
id
city
state
country
"""
applicants = self.xml.applicants.applicant
if not applicants:
return []
res = []
for i, applicant in enumerate(applicants):
# add applicant data
app = {}
app.update(self._name_helper_dict(applicant.addressbook))
app['nationality'] = applicant.nationality.contents_of('country', as_string=True)
# add location data for applicant
loc = {}
for tag in ['city', 'state', 'country']:
loc[tag] = applicant.addressbook.contents_of(tag, as_string=True, upper=False)
#this is created because of MySQL foreign key case sensitivities
loc['id'] = unidecode("|".join([loc['city'], loc['state'], loc['country']]).lower())
del app['nationality']
if any(app.values()) or any(loc.values()):
app['sequence'] = i
app['uuid'] = str(uuid.uuid1())
res.append([app, loc])
return res
def _get_doc_info(self, root):
"""
Accepts an XMLElement root as an argument. Returns list of
[country, doc-number, kind, date] for the given root
"""
res = {}
for tag in ['country', 'kind', 'date']:
data = root.contents_of(tag)
res[tag] = data[0] if data else ''
res['number'] = xml_util.normalize_document_identifier(
root.contents_of('doc_number')[0])
return res
@property
def us_classifications(self):
"""
Returns list of dictionaries representing us classification
main:
class
subclass
"""
classes = []
i = 0
main = self.xml.classification_national.contents_of('main_classification')
data = {'class': main[0][:3].replace(' ', ''),
'subclass': main[0][3:].replace(' ', '')}
if any(data.values()):
classes.append([
{'uuid': str(uuid.uuid1()), 'sequence': i},
{'id': data['class'].upper()},
{'id': "{class}/{subclass}".format(**data).upper()}])
i = i + 1
if self.xml.classification_national.further_classification:
further = self.xml.classification_national.contents_of('further_classification')
for classification in further:
data = {'class': classification[:3].replace(' ', ''),
'subclass': classification[3:].replace(' ', '')}
if any(data.values()):
classes.append([
{'uuid': str(uuid.uuid1()), 'sequence': i},
{'id': data['class'].upper()},
{'id': "{class}/{subclass}".format(**data).upper()}])
i = i + 1
return classes
@property
def claims(self):
"""
Returns list of dictionaries representing claims
claim:
text
dependent -- -1 if an independent claim, else this is the number
of the claim this one is dependent on
sequence
"""
claims = self.xml.claim
res = []
for i, claim in enumerate(claims):
data = {}
data['text'] = claim.contents_of('claim_text', as_string=True, upper=False)
# remove leading claim num from text
data['text'] = claim_num_regex.sub('', data['text'])
data['sequence'] = i+1 # claims are 1-indexed
if claim.claim_ref:
# claim_refs are 'claim N', so we extract the N
claim_str = claim.contents_of('claim_ref',\
as_string=True).split(' ')[-1]
data['dependent'] = int(''.join(c for c in claim_str if c.isdigit()))
data['uuid'] = str(uuid.uuid1())
res.append(data)
return res | unknown | codeparrot/codeparrot-clean | ||
"""
Filename globbing like the python glob module with minor differences:
* glob relative to an arbitrary directory
* include . and ..
* check that link targets exist, not just links
"""
import os, re, fnmatch
import util
_globcheck = re.compile('[[*?]')
def hasglob(p):
return _globcheck.search(p) is not None
def glob(fsdir, path):
"""
Yield paths matching the path glob. Sorts as a bonus. Excludes '.' and '..'
"""
dir, leaf = os.path.split(path)
if dir == '':
return globpattern(fsdir, leaf)
if hasglob(dir):
dirsfound = glob(fsdir, dir)
else:
dirsfound = [dir]
r = []
for dir in dirsfound:
fspath = util.normaljoin(fsdir, dir)
if not os.path.isdir(fspath):
continue
r.extend((util.normaljoin(dir, found) for found in globpattern(fspath, leaf)))
return r
def globpattern(dir, pattern):
"""
Return leaf names in the specified directory which match the pattern.
"""
if not hasglob(pattern):
if pattern == '':
if os.path.isdir(dir):
return ['']
return []
if os.path.exists(util.normaljoin(dir, pattern)):
return [pattern]
return []
leaves = os.listdir(dir) + ['.', '..']
# "hidden" filenames are a bit special
if not pattern.startswith('.'):
leaves = [leaf for leaf in leaves
if not leaf.startswith('.')]
leaves = fnmatch.filter(leaves, pattern)
leaves = filter(lambda l: os.path.exists(util.normaljoin(dir, l)), leaves)
leaves.sort()
return leaves | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil"
ndf "k8s.io/component-helpers/nodedeclaredfeatures"
ndftesting "k8s.io/component-helpers/nodedeclaredfeatures/testing"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/test/utils/ktesting"
)
type fakeContainerCommandRunner struct {
Cmd []string
ID kubecontainer.ContainerID
Err error
Msg string
}
func (f *fakeContainerCommandRunner) RunInContainer(_ context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
f.Cmd = cmd
f.ID = id
return []byte(f.Msg), f.Err
}
func stubPodStatusProvider(podIP string) podStatusProvider {
return podStatusProviderFunc(func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
return &kubecontainer.PodStatus{
ID: uid,
Name: name,
Namespace: namespace,
IPs: []string{podIP},
}, nil
})
}
type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
return f(uid, name, namespace)
}
func TestRunHandlerExec(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakeCommandRunner := fakeContainerCommandRunner{}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
Exec: &v1.ExecAction{
Command: []string{"ls", "-a"},
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fakeCommandRunner.ID != containerID ||
!reflect.DeepEqual(container.Lifecycle.PostStart.Exec.Command, fakeCommandRunner.Cmd) {
t.Errorf("unexpected commands: %v", fakeCommandRunner)
}
}
type fakeHTTP struct {
url string
headers http.Header
err error
resp *http.Response
}
func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) {
f.url = req.URL.String()
f.headers = req.Header.Clone()
return f.resp, f.err
}
func TestRunHandlerHttp(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPGetter := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
Host: "foo",
Port: intstr.FromInt32(8080),
Path: "bar",
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.ObjectMeta.UID = "foo-bar-quux"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fakeHTTPGetter.url != "http://foo:8080/bar" {
t.Errorf("unexpected url: %s", fakeHTTPGetter.url)
}
}
func TestRunHandlerHttpWithHeaders(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
Host: "foo",
Port: intstr.FromInt32(8080),
Path: "/bar",
HTTPHeaders: []v1.HTTPHeader{
{Name: "Foo", Value: "bar"},
},
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fakeHTTPDoer.url != "http://foo:8080/bar" {
t.Errorf("unexpected url: %s", fakeHTTPDoer.url)
}
if fakeHTTPDoer.headers["Foo"][0] != "bar" {
t.Errorf("missing http header: %s", fakeHTTPDoer.headers)
}
}
func TestRunHandlerHttps(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
Scheme: v1.URISchemeHTTPS,
Host: "foo",
Path: "bar",
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
t.Run("consistent", func(t *testing.T) {
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if fakeHTTPDoer.url != "https://foo:70/bar" {
t.Errorf("unexpected url: %s", fakeHTTPDoer.url)
}
})
}
func TestRunHandlerHTTPPort(t *testing.T) {
tests := []struct {
Name string
Port intstr.IntOrString
ExpectError bool
Expected string
}{
{
Name: "consistent/with port",
Port: intstr.FromString("70"),
Expected: "https://foo:70/bar",
}, {
Name: "consistent/without port",
Port: intstr.FromString(""),
ExpectError: true,
},
}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
Scheme: v1.URISchemeHTTPS,
Host: "foo",
Port: intstr.FromString("unexpected"),
Path: "bar",
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
container.Lifecycle.PostStart.HTTPGet.Port = tt.Port
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if hasError := (err != nil); hasError != tt.ExpectError {
t.Errorf("unexpected error: %v", err)
}
if fakeHTTPDoer.url != tt.Expected {
t.Errorf("unexpected url: %s", fakeHTTPDoer.url)
}
})
}
}
func TestRunHTTPHandler(t *testing.T) {
type expected struct {
OldURL string
OldHeader http.Header
NewURL string
NewHeader http.Header
}
tests := []struct {
Name string
PodIP string
HTTPGet *v1.HTTPGetAction
Expected expected
}{
{
Name: "missing pod IP",
PodIP: "",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("42"),
Host: "example.test",
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://example.test:42/foo",
OldHeader: http.Header{},
NewURL: "http://example.test:42/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "missing host",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("42"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:42/foo",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:42/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "path with leading slash",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "/foo",
Port: intstr.FromString("42"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:42//foo",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:42/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "path without leading slash",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("42"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:42/foo",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:42/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "port resolution",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("quux"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:8080/foo",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:8080/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "https",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("4430"),
Scheme: "https",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:4430/foo",
OldHeader: http.Header{},
NewURL: "https://233.252.0.1:4430/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "unknown scheme",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("80"),
Scheme: "baz",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:80/foo",
OldHeader: http.Header{},
NewURL: "baz://233.252.0.1:80/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "query param",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo?k=v",
Port: intstr.FromString("80"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:80/foo?k=v",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:80/foo?k=v",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "fragment",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo#frag",
Port: intstr.FromString("80"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{},
},
Expected: expected{
OldURL: "http://233.252.0.1:80/foo#frag",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:80/foo#frag",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "headers",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Path: "foo",
Port: intstr.FromString("80"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{
{
Name: "Foo",
Value: "bar",
},
},
},
Expected: expected{
OldURL: "http://233.252.0.1:80/foo",
OldHeader: http.Header{},
NewURL: "http://233.252.0.1:80/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"Foo": {"bar"},
"User-Agent": {"kube-lifecycle/."},
},
},
}, {
Name: "host header",
PodIP: "233.252.0.1",
HTTPGet: &v1.HTTPGetAction{
Host: "example.test",
Path: "foo",
Port: intstr.FromString("80"),
Scheme: "http",
HTTPHeaders: []v1.HTTPHeader{
{
Name: "Host",
Value: "from.header",
},
},
},
Expected: expected{
OldURL: "http://example.test:80/foo",
OldHeader: http.Header{},
NewURL: "http://example.test:80/foo",
NewHeader: http.Header{
"Accept": {"*/*"},
"User-Agent": {"kube-lifecycle/."},
"Host": {"from.header"},
},
},
},
}
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{},
},
Ports: []v1.ContainerPort{
{
Name: "quux",
ContainerPort: 8080,
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
fakePodStatusProvider := stubPodStatusProvider(tt.PodIP)
container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet
pod.Spec.Containers = []v1.Container{container}
verify := func(t *testing.T, expectedHeader http.Header, expectedURL string) {
fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(expectedHeader, fakeHTTPDoer.headers); diff != "" {
t.Errorf("unexpected header (-want, +got)\n:%s", diff)
}
if fakeHTTPDoer.url != expectedURL {
t.Errorf("url = %v; want %v", fakeHTTPDoer.url, tt.Expected.NewURL)
}
}
t.Run("consistent", func(t *testing.T) {
verify(t, tt.Expected.NewHeader, tt.Expected.NewURL)
})
})
}
}
func TestRunHandlerNil(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
podName := "podFoo"
podNamespace := "nsFoo"
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Namespace = podNamespace
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expect error, but got nil")
}
}
func TestRunHandlerExecFailure(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
expectedErr := fmt.Errorf("invalid command")
fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
command := []string{"ls", "--a"}
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
Exec: &v1.ExecAction{
Command: command,
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error())
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expected error: %v", expectedErr)
}
if msg != expectedErrMsg {
t.Errorf("unexpected error message: %q; expected %q", msg, expectedErrMsg)
}
}
func TestRunHandlerHttpFailure(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
expectedErr := fmt.Errorf("fake http error")
expectedResp := http.Response{
Body: io.NopCloser(strings.NewReader(expectedErr.Error())),
}
fakeHTTPGetter := fakeHTTP{err: expectedErr, resp: &expectedResp}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
containerName := "containerFoo"
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
Host: "foo",
Port: intstr.FromInt32(8080),
Path: "bar",
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr)
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expected error: %v", expectedErr)
}
if msg != expectedErrMsg {
t.Errorf("unexpected error message: %q; expected %q", msg, expectedErrMsg)
}
if fakeHTTPGetter.url != "http://foo:8080/bar" {
t.Errorf("unexpected url: %s", fakeHTTPGetter.url)
}
}
func TestRunHandlerHttpsFailureFallback(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
// Since prometheus' gatherer is global, other tests may have updated metrics already, so
// we need to reset them prior running this test.
// This also implies that we can't run this test in parallel with other tests.
metrics.Register()
legacyregistry.Reset()
var actualHeaders http.Header
srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
actualHeaders = r.Header.Clone()
}))
defer srv.Close()
_, port, err := net.SplitHostPort(srv.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
recorder := &record.FakeRecorder{Events: make(chan string, 10)}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(srv.Client(), &fakeContainerCommandRunner{}, fakePodStatusProvider, recorder).(*handlerRunner)
containerName := "containerFoo"
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
// set the scheme to https to ensure it falls back to HTTP.
Scheme: "https",
Host: "127.0.0.1",
Port: intstr.FromString(port),
Path: "bar",
HTTPHeaders: []v1.HTTPHeader{
{
Name: "Authorization",
Value: "secret",
},
},
},
},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if msg != "" {
t.Errorf("unexpected error message: %q", msg)
}
if actualHeaders.Get("Authorization") != "" {
t.Error("unexpected Authorization header")
}
expectedMetrics := `
# HELP kubelet_lifecycle_handler_http_fallbacks_total [ALPHA] The number of times lifecycle handlers successfully fell back to http from https.
# TYPE kubelet_lifecycle_handler_http_fallbacks_total counter
kubelet_lifecycle_handler_http_fallbacks_total 1
`
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedMetrics), "kubelet_lifecycle_handler_http_fallbacks_total"); err != nil {
t.Fatal(err)
}
select {
case event := <-recorder.Events:
if !strings.Contains(event, "LifecycleHTTPFallback") {
t.Fatalf("expected LifecycleHTTPFallback event, got %q", event)
}
default:
t.Fatal("no event recorded")
}
}
func TestIsHTTPResponseError(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
t.Fatal(err)
}
req.URL.Scheme = "https"
_, err = http.DefaultClient.Do(req)
if !isHTTPResponseError(err) {
t.Errorf("unexpected http response error: %v", err)
}
}
func TestRunSleepHandler(t *testing.T) {
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
containerName := "containerFoo"
container := v1.Container{
Name: containerName,
Lifecycle: &v1.Lifecycle{
PreStop: &v1.LifecycleHandler{},
},
}
pod := v1.Pod{}
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
tests := []struct {
name string
sleepSeconds int64
terminationGracePeriodSeconds int64
expectErr bool
expectedErr string
}{
{
name: "valid seconds",
sleepSeconds: 5,
terminationGracePeriodSeconds: 30,
},
{
name: "longer than TerminationGracePeriodSeconds",
sleepSeconds: 3,
terminationGracePeriodSeconds: 2,
expectErr: true,
expectedErr: "container terminated before sleep hook finished",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
pod.Spec.Containers[0].Lifecycle.PreStop.Sleep = &v1.SleepAction{Seconds: tt.sleepSeconds}
ctx, cancel := context.WithTimeout(tCtx, time.Duration(tt.terminationGracePeriodSeconds)*time.Second)
defer cancel()
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PreStop)
if !tt.expectErr && err != nil {
t.Errorf("unexpected success")
}
if tt.expectErr && err.Error() != tt.expectedErr {
t.Errorf("%s: expected error want %s, got %s", tt.name, tt.expectedErr, err.Error())
}
})
}
}
func TestDeclaredFeaturesAdmitHandler(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
},
}
createMockFeature := func(t *testing.T, name string, inferForSched bool, maxVersionStr string) *ndftesting.MockFeature {
m := ndftesting.NewMockFeature(t)
m.EXPECT().Name().Return(name).Maybe()
m.EXPECT().InferForScheduling(mock.Anything).Return(inferForSched).Maybe()
if maxVersionStr != "" {
maxVersionStr := version.MustParseSemantic(maxVersionStr)
m.EXPECT().MaxVersion().Return(maxVersionStr).Maybe()
} else {
m.EXPECT().MaxVersion().Return(nil).Maybe()
}
return m
}
testCases := []struct {
name string
nodeDeclaredFeatures []string
version *version.Version
registeredFeatures []ndf.Feature
expectedAdmit bool
expectedReason string
expectedMessage string
}{
{
name: "Admit: no requirements",
nodeDeclaredFeatures: []string{"FeatureA"},
version: version.MustParseSemantic("1.30.0"),
registeredFeatures: []ndf.Feature{
createMockFeature(t, "FeatureA", true, ""),
},
expectedAdmit: true,
},
{
name: "Admit: requirements met",
nodeDeclaredFeatures: []string{"FeatureA"},
version: version.MustParseSemantic("1.30.0"),
registeredFeatures: []ndf.Feature{
createMockFeature(t, "FeatureA", true, ""),
},
expectedAdmit: true,
},
{
name: "Reject: requirements not met",
nodeDeclaredFeatures: []string{},
version: version.MustParseSemantic("1.30.0"),
registeredFeatures: []ndf.Feature{
createMockFeature(t, "FeatureA", true, ""),
},
expectedAdmit: false,
expectedReason: PodFeatureUnsupported,
expectedMessage: "Pod requires node features that are not available: FeatureA",
},
{
name: "Admit without feature declared - feature generally available",
nodeDeclaredFeatures: []string{},
version: version.MustParseSemantic("1.35.0"),
registeredFeatures: []ndf.Feature{
createMockFeature(t, "FeatureA", true, "1.34.0"),
},
expectedAdmit: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
framework, err := ndf.New(tc.registeredFeatures)
require.NoError(t, err)
handler := NewDeclaredFeaturesAdmitHandler(framework, ndf.NewFeatureSet(tc.nodeDeclaredFeatures...), tc.version)
attrs := &PodAdmitAttributes{Pod: pod}
result := handler.Admit(attrs)
require.Equal(t, tc.expectedAdmit, result.Admit)
if !result.Admit {
require.Equal(t, tc.expectedReason, result.Reason)
require.Contains(t, result.Message, tc.expectedMessage, "Expected message '%s' to contain '%s'", result.Message, tc.expectedMessage)
}
})
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/lifecycle/handlers_test.go |
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.yaxis", **kwargs):
super(RangebreaksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs
) | unknown | codeparrot/codeparrot-clean | ||
use rustc_abi::Endian;
use crate::spec::{
Abi, Arch, Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetMetadata, TargetOptions, base,
};
pub(crate) fn target() -> Target {
let mut base = base::freebsd::opts();
base.cpu = "ppc64".into();
base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
base.stack_probes = StackProbeType::Inline;
base.abi = Abi::ElfV2;
base.llvm_abiname = "elfv2".into();
Target {
llvm_target: "powerpc64-unknown-freebsd".into(),
metadata: TargetMetadata {
description: Some("PPC64 FreeBSD (ELFv2)".into()),
tier: Some(3),
host_tools: Some(true),
std: Some(true),
},
pointer_width: 64,
data_layout: "E-m:e-Fn32-i64:64-i128:128-n32:64".into(),
arch: Arch::PowerPC64,
options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_target/src/spec/targets/powerpc64_unknown_freebsd.rs |
#
#Kylciane Cristiny Lopes Freitas - 1615310052
#Thiago Santos Borges - 1615310023
#
import random
cond = True
pedra = 1
papel = 2
tesoura = 3
spock = 4
lagarto = 5
print("Vamos brincar de Jokenpo?")
print("Digite seu nome")
nome = input()
print("1 - Pedra")
print("2 - Papel")
print("3 - Tesoura")
print("4 - Spock")
print("5 - Lagarto")
tentativa = int(input("Digite uma face\n"))
while cond:
bot = random.randrange(1,4)
#############################################################
if tentativa == pedra and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == papel and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == tesoura and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == lagarto and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
elif tentativa == spock and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Empate!!!")
###########################################################
elif tentativa == pedra and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPapel cobre pedra")
elif tentativa == pedra and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nSpock vaporiza a pedra")
elif tentativa == pedra and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPedra quebra tesoura")
elif tentativa == pedra and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPedra esmaga lagarto")
##############################################################
elif tentativa == papel and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nTesoura corta papel")
elif tentativa == papel and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nLagarto come papel")
elif tentativa == papel and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPapel cobre pedra")
elif tentativa == papel and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nPapel refuta spock")
################################################################
elif tentativa == tesoura and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPedra quebra tesoura")
elif tentativa == tesoura and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nSpock quebra tesoura")
elif tentativa == tesoura and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nTesoura corta papel")
elif tentativa == tesoura and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nTesoura decapita lagarto")
##################################################################
elif tentativa == spock and bot == lagarto:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nLagarto envenena spock")
elif tentativa == spock and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPapel refuta spock")
elif tentativa == spock and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nSpock quebra tesoura")
elif tentativa == spock and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nSpock vaporiza a pedra")
###################################################################
elif tentativa == lagarto and bot == pedra:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nPedra esmaga lagarto")
elif tentativa == lagarto and bot == tesoura:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Derrota!!!\nTesoura decapita lagarto")
elif tentativa == lagarto and bot == papel:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nLagarto come papel")
elif tentativa == lagarto and bot == spock:
print(nome+": ",tentativa)
print("Robo : ",bot)
print("Vitoria!!!\nLagarto envenena spock")
###################################################################
resp = str(input("Deseja jogar novamente??\n"))
if resp == "n":
cond = False
else:
cond = True
tentativa = int(input("Digite uma face\n")) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-output
# Author : Stuart Paterson
########################################################################
"""
Retrieve output of a Grid pilot
Usage:
dirac-admin-get-pilot-output [options] ... PilotID ...
Arguments:
PilotID: Grid ID of the pilot
Example:
$ dirac-admin-get-pilot-output https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
$ ls -la
drwxr-xr-x 2 hamar marseill 2048 Feb 21 14:13 pilot_26KCLKBFtxXKHF4_ZrQjkw
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotOutput(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
"""
contains a function for marshalling literal (and only literal) Python
data into Javascript. Supports Python None, strings, ints and floats,
dates, lists, tuples and dictionaries.
"""
import re
_jsidentifierRE=re.compile(r'[a-zA-Z_\$][a-zA-Z_\$0-9]*$')
def is_valid_js_identifier(s):
try:
return bool(_jsidentifierRE.match(s))
except TypeError:
return 0
class MarshalException(ValueError):
pass
class InvalidIdentifierException(MarshalException):
pass
def get_identifier(s):
if is_valid_js_identifier(s):
return s
raise InvalidIdentifierException, \
"not a valid Javascript identifier: %s" % s
_marshalRegistry={str: repr,
int: repr,
float: repr,
type(None): lambda x: 'null'}
def _seq_to_js(s):
return "[%s]" % ', '.join([to_js(y) for y in s])
_marshalRegistry[list]=_seq_to_js
_marshalRegistry[tuple]=_seq_to_js
def _dict_to_js(d):
s=', '.join(["%s: %s" % (get_identifier(k), to_js(v)) \
for k, v in d.items()])
return "{%s}" % s
_marshalRegistry[dict]=_dict_to_js
try:
import mx.DateTime as M
except ImportError:
pass
else:
def _date_to_js(dt):
return "new Date(%s)" % int(dt.ticks())
_marshalRegistry[type(M.now())]=_date_to_js
def to_js(obj):
# the isinstance test permits type subclasses
for k in _marshalRegistry:
if isinstance(obj, k):
return _marshalRegistry[k](obj)
raise MarshalException, obj | unknown | codeparrot/codeparrot-clean | ||
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Script that will run a search on the database or Solr and return only the article_ids
Useful for performance reasons, when returning Article objects takes too much time
"""
from amcat.scripts import script, types
from amcat.scripts.tools import database
import amcat.scripts.forms
from django import forms
import logging
log = logging.getLogger(__name__)
class ArticleidsForm(amcat.scripts.forms.SelectionForm):
start = forms.IntegerField(initial=0, min_value=0, widget=forms.HiddenInput, required=False)
length = forms.IntegerField(initial=50, min_value=1, max_value=99999999, widget=forms.HiddenInput, required=False)
def clean_start(self):
data = self.cleaned_data['start']
if data == None:
data = 0
return data
def clean_length(self):
data = self.cleaned_data['length']
if data == None:
data = 50
if data == -1:
data = 99999999 # unlimited (well, sort of ;)
return data
class ArticleidsScript(script.Script):
input_type = None
options_form = ArticleidsForm
output_type = types.ArticleidList
def run(self, input=None):
start = self.options['start']
length = self.options['length']
if self.bound_form.use_solr == False: # make database query
return database.get_queryset(**self.options)[start:start+length].values_list('id', flat=True)
else:
return solrlib.article_ids(self.options)
class ArticleidsDictScript(script.Script):
input_type = None
options_form = ArticleidsForm
output_type = types.ArticleidDictPerQuery
def run(self, input=None):
if self.bound_form.use_solr == False: # make database query
raise Exception('This works only for Solr searches')
else:
return solrlib.article_idsDict(self.options)
if __name__ == '__main__':
from amcat.scripts.tools import cli
cli.run_cli(ArticleidsScript) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
static ngx_int_t ngx_select_init(ngx_cycle_t *cycle, ngx_msec_t timer);
static void ngx_select_done(ngx_cycle_t *cycle);
static ngx_int_t ngx_select_add_event(ngx_event_t *ev, ngx_int_t event,
ngx_uint_t flags);
static ngx_int_t ngx_select_del_event(ngx_event_t *ev, ngx_int_t event,
ngx_uint_t flags);
static ngx_int_t ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags);
static void ngx_select_repair_fd_sets(ngx_cycle_t *cycle);
static char *ngx_select_init_conf(ngx_cycle_t *cycle, void *conf);
static fd_set master_read_fd_set;
static fd_set master_write_fd_set;
static fd_set work_read_fd_set;
static fd_set work_write_fd_set;
static ngx_int_t max_fd;
static ngx_uint_t nevents;
static ngx_event_t **event_index;
static ngx_str_t select_name = ngx_string("select");
static ngx_event_module_t ngx_select_module_ctx = {
&select_name,
NULL, /* create configuration */
ngx_select_init_conf, /* init configuration */
{
ngx_select_add_event, /* add an event */
ngx_select_del_event, /* delete an event */
ngx_select_add_event, /* enable an event */
ngx_select_del_event, /* disable an event */
NULL, /* add an connection */
NULL, /* delete an connection */
NULL, /* trigger a notify */
ngx_select_process_events, /* process the events */
ngx_select_init, /* init the events */
ngx_select_done /* done the events */
}
};
ngx_module_t ngx_select_module = {
NGX_MODULE_V1,
&ngx_select_module_ctx, /* module context */
NULL, /* module directives */
NGX_EVENT_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static ngx_int_t
ngx_select_init(ngx_cycle_t *cycle, ngx_msec_t timer)
{
ngx_event_t **index;
if (event_index == NULL) {
FD_ZERO(&master_read_fd_set);
FD_ZERO(&master_write_fd_set);
nevents = 0;
}
if (ngx_process >= NGX_PROCESS_WORKER
|| cycle->old_cycle == NULL
|| cycle->old_cycle->connection_n < cycle->connection_n)
{
index = ngx_alloc(sizeof(ngx_event_t *) * 2 * cycle->connection_n,
cycle->log);
if (index == NULL) {
return NGX_ERROR;
}
if (event_index) {
ngx_memcpy(index, event_index, sizeof(ngx_event_t *) * nevents);
ngx_free(event_index);
}
event_index = index;
}
ngx_io = ngx_os_io;
ngx_event_actions = ngx_select_module_ctx.actions;
ngx_event_flags = NGX_USE_LEVEL_EVENT;
max_fd = -1;
return NGX_OK;
}
static void
ngx_select_done(ngx_cycle_t *cycle)
{
ngx_free(event_index);
event_index = NULL;
}
static ngx_int_t
ngx_select_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags)
{
ngx_connection_t *c;
c = ev->data;
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"select add event fd:%d ev:%i", c->fd, event);
if (ev->index != NGX_INVALID_INDEX) {
ngx_log_error(NGX_LOG_ALERT, ev->log, 0,
"select event fd:%d ev:%i is already set", c->fd, event);
return NGX_OK;
}
if ((event == NGX_READ_EVENT && ev->write)
|| (event == NGX_WRITE_EVENT && !ev->write))
{
ngx_log_error(NGX_LOG_ALERT, ev->log, 0,
"invalid select %s event fd:%d ev:%i",
ev->write ? "write" : "read", c->fd, event);
return NGX_ERROR;
}
if (event == NGX_READ_EVENT) {
FD_SET(c->fd, &master_read_fd_set);
} else if (event == NGX_WRITE_EVENT) {
FD_SET(c->fd, &master_write_fd_set);
}
if (max_fd != -1 && max_fd < c->fd) {
max_fd = c->fd;
}
ev->active = 1;
event_index[nevents] = ev;
ev->index = nevents;
nevents++;
return NGX_OK;
}
static ngx_int_t
ngx_select_del_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags)
{
ngx_event_t *e;
ngx_connection_t *c;
c = ev->data;
ev->active = 0;
if (ev->index == NGX_INVALID_INDEX) {
return NGX_OK;
}
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"select del event fd:%d ev:%i", c->fd, event);
if (event == NGX_READ_EVENT) {
FD_CLR(c->fd, &master_read_fd_set);
} else if (event == NGX_WRITE_EVENT) {
FD_CLR(c->fd, &master_write_fd_set);
}
if (max_fd == c->fd) {
max_fd = -1;
}
if (ev->index < --nevents) {
e = event_index[nevents];
event_index[ev->index] = e;
e->index = ev->index;
}
ev->index = NGX_INVALID_INDEX;
return NGX_OK;
}
static ngx_int_t
ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags)
{
int ready, nready;
ngx_err_t err;
ngx_uint_t i, found;
ngx_event_t *ev;
ngx_queue_t *queue;
struct timeval tv, *tp;
ngx_connection_t *c;
if (max_fd == -1) {
for (i = 0; i < nevents; i++) {
c = event_index[i]->data;
if (max_fd < c->fd) {
max_fd = c->fd;
}
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"change max_fd: %i", max_fd);
}
#if (NGX_DEBUG)
if (cycle->log->log_level & NGX_LOG_DEBUG_ALL) {
for (i = 0; i < nevents; i++) {
ev = event_index[i];
c = ev->data;
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"select event: fd:%d wr:%d", c->fd, ev->write);
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"max_fd: %i", max_fd);
}
#endif
if (timer == NGX_TIMER_INFINITE) {
tp = NULL;
} else {
tv.tv_sec = (long) (timer / 1000);
tv.tv_usec = (long) ((timer % 1000) * 1000);
tp = &tv;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"select timer: %M", timer);
work_read_fd_set = master_read_fd_set;
work_write_fd_set = master_write_fd_set;
ready = select(max_fd + 1, &work_read_fd_set, &work_write_fd_set, NULL, tp);
err = (ready == -1) ? ngx_errno : 0;
if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
ngx_time_update();
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"select ready %d", ready);
if (err) {
ngx_uint_t level;
if (err == NGX_EINTR) {
if (ngx_event_timer_alarm) {
ngx_event_timer_alarm = 0;
return NGX_OK;
}
level = NGX_LOG_INFO;
} else {
level = NGX_LOG_ALERT;
}
ngx_log_error(level, cycle->log, err, "select() failed");
if (err == NGX_EBADF) {
ngx_select_repair_fd_sets(cycle);
}
return NGX_ERROR;
}
if (ready == 0) {
if (timer != NGX_TIMER_INFINITE) {
return NGX_OK;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"select() returned no events without timeout");
return NGX_ERROR;
}
nready = 0;
for (i = 0; i < nevents; i++) {
ev = event_index[i];
c = ev->data;
found = 0;
if (ev->write) {
if (FD_ISSET(c->fd, &work_write_fd_set)) {
found = 1;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"select write %d", c->fd);
}
} else {
if (FD_ISSET(c->fd, &work_read_fd_set)) {
found = 1;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"select read %d", c->fd);
}
}
if (found) {
ev->ready = 1;
ev->available = -1;
queue = ev->accept ? &ngx_posted_accept_events
: &ngx_posted_events;
ngx_post_event(ev, queue);
nready++;
}
}
if (ready != nready) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"select ready != events: %d:%d", ready, nready);
ngx_select_repair_fd_sets(cycle);
}
return NGX_OK;
}
static void
ngx_select_repair_fd_sets(ngx_cycle_t *cycle)
{
int n;
socklen_t len;
ngx_err_t err;
ngx_socket_t s;
for (s = 0; s <= max_fd; s++) {
if (FD_ISSET(s, &master_read_fd_set) == 0) {
continue;
}
len = sizeof(int);
if (getsockopt(s, SOL_SOCKET, SO_TYPE, &n, &len) == -1) {
err = ngx_socket_errno;
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"invalid descriptor #%d in read fd_set", s);
FD_CLR(s, &master_read_fd_set);
}
}
for (s = 0; s <= max_fd; s++) {
if (FD_ISSET(s, &master_write_fd_set) == 0) {
continue;
}
len = sizeof(int);
if (getsockopt(s, SOL_SOCKET, SO_TYPE, &n, &len) == -1) {
err = ngx_socket_errno;
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"invalid descriptor #%d in write fd_set", s);
FD_CLR(s, &master_write_fd_set);
}
}
max_fd = -1;
}
static char *
ngx_select_init_conf(ngx_cycle_t *cycle, void *conf)
{
ngx_event_conf_t *ecf;
ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);
if (ecf->use != ngx_select_module.ctx_index) {
return NGX_CONF_OK;
}
/* disable warning: the default FD_SETSIZE is 1024U in FreeBSD 5.x */
if (cycle->connection_n > FD_SETSIZE) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, 0,
"the maximum number of files "
"supported by select() is %ud", FD_SETSIZE);
return NGX_CONF_ERROR;
}
return NGX_CONF_OK;
} | c | github | https://github.com/nginx/nginx | src/event/modules/ngx_select_module.c |
__author__ = 'kkennedy'
# System Imports
import sys
import os
import logging
import traceback
import wx
import wx.aui
import wx.lib
import wx.lib.mixins.inspection
from wx.lib.agw import genericmessagedialog
# Labtronyx
import labtronyx
# Package Relative Imports
from ..controllers import MainApplicationController
# View Imports
from . import FrameViewBase, PanelViewBase, DialogViewBase
from . import ScriptBrowserPanel, ScriptInfoPanel
from . import InterfaceInfoPanel
from . import ResourceInfoPanel
def main(controller):
app = LabtronyxApp(controller)
app.MainLoop()
class LabtronyxApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):
def __init__(self, controller):
self._controller = controller
wx.App.__init__(self)
def OnInit(self):
self.Init()
self.SetAppName("Labtronyx")
main_view = MainView(self._controller)
main_view.Show()
self.SetTopWindow(main_view)
# Unhandled exception handling
sys.excepthook = self.OnException
return True
def OnException(self, etype, value, trace):
msg = "".join(traceback.format_exception(etype, value, trace))
msgbox = genericmessagedialog.GenericMessageDialog(None, msg, "Unhandled Exception",
wx.OK|wx.ICON_ERROR)
msgbox.ShowModal()
class MainView(FrameViewBase):
"""
Labtronyx Top-Level Window
:type controller: MainApplicationController
"""
def __init__(self, controller):
assert(isinstance(controller, MainApplicationController))
super(MainView, self).__init__(None, controller,
id=-1, title="Labtronyx", size=(640, 480), style=wx.DEFAULT_FRAME_STYLE)
self.mainPanel = wx.Panel(self)
self.aui_mgr = wx.aui.AuiManager()
self.aui_mgr.SetManagedWindow(self.mainPanel)
# Build Menu
self.buildMenubar()
# Build Left Panel
self.pnl_left = wx.Panel(self.mainPanel, style=wx.TAB_TRAVERSAL | wx.CLIP_CHILDREN)
# Resource Tree
self.tree = wx.TreeCtrl(self.pnl_left, -1, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT)
self.host = wx.Choice(self.pnl_left, -1, style=wx.CB_SORT)
self.updateHostSelector()
self.host.SetSelection(0)
host_select_sizer = wx.BoxSizer(wx.HORIZONTAL)
host_select_sizer.Add(self.host, 1, wx.EXPAND)
leftPanelSizer = wx.BoxSizer(wx.VERTICAL)
leftPanelSizer.Add(wx.StaticText(self.pnl_left, -1, "Select Host"), 0, wx.ALL, 5)
leftPanelSizer.Add(host_select_sizer, 0, wx.EXPAND | wx.BOTTOM, 5)
leftPanelSizer.Add(self.tree, 1, wx.EXPAND)
self.pnl_left.SetSizer(leftPanelSizer)
# self.pnl_left.Fit()
self.buildTree()
# Build Main Panel
self.pnl_content = wx.Panel(self.mainPanel, size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL | wx.CLIP_CHILDREN | wx.FULL_REPAINT_ON_RESIZE)
# Build Log
self.log = wx.TextCtrl(self.mainPanel, -1, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
self.buildLog()
# Event bindings
self.Bind(wx.EVT_CHOICE, self.e_OnHostSelect, self.host)
self.Bind(wx.EVT_CLOSE, self.e_OnWindowClose)
self.SetBackgroundColour(wx.NullColour)
self.SetSize((800, 600))
self.aui_mgr.AddPane(self.pnl_content, wx.aui.AuiPaneInfo().CenterPane().Name("Content"))
self.aui_mgr.AddPane(self.log, wx.aui.AuiPaneInfo().Bottom().BestSize((-1, 200)).Caption("Log Messages").
Floatable(False).CloseButton(False).Name("LogPanel"))
self.aui_mgr.AddPane(self.pnl_left, wx.aui.AuiPaneInfo().Left().BestSize((300, -1)).
Floatable(False).CloseButton(False).MinSize((240, -1)).Resizable(True).
Caption("Resources").Name("ResourceTree"))
self.aui_mgr.Update()
self.Fit()
def buildMenubar(self):
self.menubar = wx.MenuBar()
# File
self.menu_file = wx.Menu()
item = self.menu_file.Append(-1, "C&onnect to Host")
self.Bind(wx.EVT_MENU, self.e_MenuConnect, item)
item = self.menu_file.Append(-1, "E&xit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.e_MenuExit, item)
self.menubar.Append(self.menu_file, "&File")
# Set frame menubar
self.SetMenuBar(self.menubar)
def buildTree(self):
# Build image list
isz = (16, 16)
self.il = wx.ImageList(*isz)
self.art_resource = self.il.Add(wx.Image(
os.path.join(self.controller.rootPath, "images", "hard-drive-2x.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.art_interface = self.il.Add(wx.Image(
os.path.join(self.controller.rootPath, "images", "fork-2x.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.art_script = self.il.Add(wx.Image(
os.path.join(self.controller.rootPath, "images", "file-2x.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.tree.SetImageList(self.il)
self.pnode_root = self.tree.AddRoot("Labtronyx") # Add hidden root item
self.tree.SetPyData(self.pnode_root, None)
self.updateTree()
# self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_UP, self.e_OnRightClick)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.e_OnTreeSelect)
def buildLog(self):
# Create handler for logger
self.logger.addHandler(WxLogHandler(self.log))
self.logger.setLevel(logging.DEBUG)
self.logger.info("Logger attached")
def updateHostSelector(self):
selected = self.host.GetStringSelection()
choices = [hcon.hostname for hcon in self.controller.hosts.values()]
self.host.Clear()
self.host.AppendItems(choices)
if selected in choices:
self.host.SetStringSelection(selected)
def get_selected_host_controller(self):
hostname = self.host.GetStringSelection()
return self.controller.get_host(hostname)
def updateTree(self):
host_controller = self.get_selected_host_controller()
if host_controller is None:
return
self.tree.DeleteChildren(self.pnode_root)
# Resources
self.pnode_resources = self.tree.AppendItem(self.pnode_root, 'Resources')
self.tree.SetItemImage(self.pnode_resources, self.art_resource)
self.nodes_resources = {}
# Interfaces
self.pnode_interfaces = self.tree.AppendItem(self.pnode_root, 'Interfaces')
self.tree.SetItemImage(self.pnode_interfaces, self.art_interface)
self.nodes_interfaces = {}
# Scripts
self.pnode_scripts = self.tree.AppendItem(self.pnode_root, 'Scripts')
self.tree.SetItemImage(self.pnode_scripts, self.art_script)
self.nodes_scripts = {}
for uuid, prop in host_controller.properties.items():
if prop.get('pluginType') == 'resource':
node_name = prop.get('resourceID')
child = self.tree.AppendItem(self.pnode_resources, node_name)
self.tree.SetPyData(child, uuid)
self.tree.SetItemImage(child, self.art_resource)
self.nodes_resources[uuid] = child
elif prop.get('pluginType') == 'interface':
node_name = prop.get('interfaceName')
child = self.tree.AppendItem(self.pnode_interfaces, node_name)
self.tree.SetPyData(child, uuid)
self.tree.SetItemImage(child, self.art_interface)
self.nodes_interfaces[uuid] = child
elif prop.get('pluginType') == 'script':
node_name = prop.get('fqn')
child = self.tree.AppendItem(self.pnode_scripts, node_name)
self.tree.SetPyData(child, uuid)
self.tree.SetItemImage(child, self.art_script)
self.nodes_scripts[uuid] = child
self.tree.SortChildren(self.pnode_resources)
self.tree.Expand(self.pnode_resources)
def e_MenuConnect(self, event):
diag = ManagerAddDialog(self, self.controller)
ret_code = diag.ShowModal()
if ret_code != wx.ID_OK:
return
host = diag.getHostname()
port = int(diag.getPort())
ret = self.controller.add_host(host, port)
self.updateHostSelector()
def e_MenuExit(self, event):
self.Close(True)
def e_OnWindowClose(self, event):
self.Destroy()
def e_OnTreeSelect(self, event):
item = event.GetItem()
item_data = self.tree.GetPyData(item)
host_controller = self.get_selected_host_controller()
if host_controller is not None:
if item == self.pnode_resources:
self.clearContentPanel()
elif item == self.pnode_interfaces:
self.clearContentPanel()
elif item == self.pnode_scripts:
self.loadScriptSummary()
elif item_data in host_controller.properties:
item_props = host_controller.properties.get(item_data)
if item_props.get('pluginType') == 'resource':
self.loadResourcePanel(item_data)
elif item_props.get('pluginType') == 'interface':
self.loadInterfacePanel(item_data)
elif item_props.get('pluginType') == 'script':
self.loadScriptPanel(item_data)
else:
self.clearContentPanel()
def e_OnHostSelect(self, event):
self.updateTree()
def clearContentPanel(self):
self.pnl_content.DestroyChildren()
def _loadContentPanel(self, panel, title):
self.pnl_content.Freeze()
# Title
lbl = wx.StaticText(self.pnl_content, -1, title)
lbl.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
panelSizer = wx.BoxSizer(wx.VERTICAL)
panelSizer.Add(lbl, 0, wx.EXPAND | wx.ALL, 5)
panelSizer.Add(wx.StaticLine(self.pnl_content), 0, wx.EXPAND | wx.ALL, 5)
panelSizer.Add(panel, 1, wx.EXPAND | wx.ALL, 5)
self.pnl_content.SetSizer(panelSizer)
self.pnl_content.Layout()
# Force new panel to use all available space
# panel.SetSize(self.pnl_content.GetSize())
self.pnl_content.Thaw()
def loadInterfacePanel(self, int_uuid):
host_controller = self.get_selected_host_controller()
int_controller = host_controller.get_interface(int_uuid)
# Build panel
self.clearContentPanel()
int_panel = InterfaceInfoPanel(self.pnl_content, int_controller)
self._loadContentPanel(int_panel, "Interface Details")
def loadResourcePanel(self, res_uuid):
host_controller = self.get_selected_host_controller()
res_controller = host_controller.get_resource(res_uuid)
# Build panel
self.clearContentPanel()
res_panel = ResourceInfoPanel(self.pnl_content, res_controller)
self._loadContentPanel(res_panel, "Resource Details")
def loadScriptPanel(self, script_uuid):
host_controller = self.get_selected_host_controller()
scr_controller = host_controller.get_script(script_uuid)
self.clearContentPanel()
scr_panel = ScriptInfoPanel(self.pnl_content, scr_controller)
self._loadContentPanel(scr_panel, "Script Details")
def loadScriptSummary(self):
host_controller = self.get_selected_host_controller()
# Build panel
self.clearContentPanel()
new_panel = ScriptBrowserPanel(self.pnl_content, host_controller)
self._loadContentPanel(new_panel, "Scripts")
def _handleEvent(self, event):
self.logger.debug("[EVENT] - %s - %s", event.hostname, event.event)
if event.event in [labtronyx.EventCodes.manager.heartbeat]:
pass
elif event.event in [labtronyx.EventCodes.resource.created,
labtronyx.EventCodes.resource.destroyed]:
self.updateTree()
elif event.event in [labtronyx.EventCodes.script.created,
labtronyx.EventCodes.script.destroyed]:
self.updateTree()
elif event.event in [labtronyx.EventCodes.interface.created,
labtronyx.EventCodes.interface.destroyed]:
self.updateTree()
elif event.event in [labtronyx.EventCodes.resource.changed,
labtronyx.EventCodes.resource.driver_loaded,
labtronyx.EventCodes.resource.driver_unloaded]:
pass
class WxLogHandler(logging.Handler):
def __init__(self, text_control):
logging.Handler.__init__(self)
assert (isinstance(text_control, wx.TextCtrl))
self.control = text_control
def emit(self, record):
wx.CallAfter(self.wx_emit, record)
def wx_emit(self, record):
self.control.AppendText('\n' + self.format(record))
class ManagerAddDialog(DialogViewBase):
def __init__(self, parent, controller):
assert (isinstance(controller, MainApplicationController))
super(ManagerAddDialog, self).__init__(parent, controller, id=wx.ID_ANY, title="Connect to Host")
lbl = wx.StaticText(self, -1, "Connect to Host")
lbl.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
contentSizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=5)
contentSizer.Add(wx.StaticText(self, -1, "Hostname"), 0, wx.ALIGN_RIGHT | wx.RIGHT, 5)
self.txtHostname = wx.TextCtrl(self, -1, size=(150, -1))
contentSizer.Add(self.txtHostname, 1, wx.ALIGN_LEFT | wx.EXPAND)
contentSizer.Add(wx.StaticText(self, -1, "Port"), 0, wx.ALIGN_RIGHT | wx.RIGHT, 5)
self.txtPort = wx.TextCtrl(self, -1, size=(150, -1))
contentSizer.Add(self.txtPort, 1, wx.ALIGN_LEFT | wx.EXPAND)
btnOk = wx.Button(self, wx.ID_OK, "&Ok")
btnOk.SetDefault()
btnCancel = wx.Button(self, wx.ID_CANCEL, "&Cancel")
btnSizer = wx.StdDialogButtonSizer()
btnSizer.AddButton(btnOk)
btnSizer.AddButton(btnCancel)
btnSizer.Realize()
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(lbl, 0, wx.EXPAND | wx.ALL, border=5)
mainSizer.Add(wx.StaticLine(self), 0, wx.EXPAND | wx.ALL, border=5)
mainSizer.Add(contentSizer, 0, wx.EXPAND | wx.ALL, border=5)
mainSizer.Add(wx.StaticLine(self), 0, wx.EXPAND | wx.ALL, border=5)
mainSizer.Add(btnSizer, 0, wx.ALL | wx.ALIGN_RIGHT, border=5)
self.SetSizer(mainSizer)
mainSizer.Fit(self)
def getHostname(self):
return self.txtHostname.GetValue()
def getPort(self):
return self.txtPort.GetValue() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
try:
from .risingBubble import *
except:
from risingBubble import *
name = "pressureincrement"
from proteus.mprans import PresInc
LevelModelType = PresInc.LevelModel
coefficients=PresInc.Coefficients(rho_f_min = (1.0-1.0e-8)*rho_1,
rho_s_min = (1.0-1.0e-8)*rho_s,
nd = nd,
modelIndex=PINC_model,
fluidModelIndex=V_model,
fixNullSpace=True,
nullSpace='NoNullSpace')#'ConstantNullSpace')
#pressure increment should be zero on any pressure dirichlet boundaries
def getDBC_phi(x,flag):
if flag == boundaryTags['top'] and openTop:
return lambda x,t: 0.0
#the advectiveFlux should be zero on any no-flow boundaries
def getAdvectiveFlux_qt(x,flag):
if not (flag == boundaryTags['top'] and openTop):
return lambda x,t: 0.0
def getDiffusiveFlux_phi(x,flag):
if not (flag == boundaryTags['top'] and openTop):
return lambda x,t: 0.0
class getIBC_phi(object):
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:getIBC_phi()}
dirichletConditions = {0:getDBC_phi }
advectiveFluxBoundaryConditions = {0:getAdvectiveFlux_qt}
diffusiveFluxBoundaryConditions = {0:{0:getDiffusiveFlux_phi}} | unknown | codeparrot/codeparrot-clean | ||
/* eslint-env node */
if (process.env.NODE_ENV === "production") {
module.exports = require("./umd/react-router-dom.production.min.js");
} else {
module.exports = require("./umd/react-router-dom.development.js");
} | javascript | github | https://github.com/remix-run/react-router | packages/react-router/node-main-dom-export.js |
use std::io::{self, Write};
use {
bstr::ByteSlice,
grep_matcher::{
LineMatchKind, LineTerminator, Match, Matcher, NoCaptures, NoError,
},
regex::bytes::{Regex, RegexBuilder},
};
use crate::{
searcher::{BinaryDetection, Searcher, SearcherBuilder},
sink::{Sink, SinkContext, SinkFinish, SinkMatch},
};
/// A simple regex matcher.
///
/// This supports setting the matcher's line terminator configuration directly,
/// which we use for testing purposes. That is, the caller explicitly
/// determines whether the line terminator optimization is enabled. (In reality
/// this optimization is detected automatically by inspecting and possibly
/// modifying the regex itself.)
#[derive(Clone, Debug)]
pub(crate) struct RegexMatcher {
regex: Regex,
line_term: Option<LineTerminator>,
every_line_is_candidate: bool,
}
impl RegexMatcher {
/// Create a new regex matcher.
pub(crate) fn new(pattern: &str) -> RegexMatcher {
let regex = RegexBuilder::new(pattern)
.multi_line(true) // permits ^ and $ to match at \n boundaries
.build()
.unwrap();
RegexMatcher { regex, line_term: None, every_line_is_candidate: false }
}
/// Forcefully set the line terminator of this matcher.
///
/// By default, this matcher has no line terminator set.
pub(crate) fn set_line_term(
&mut self,
line_term: Option<LineTerminator>,
) -> &mut RegexMatcher {
self.line_term = line_term;
self
}
/// Whether to return every line as a candidate or not.
///
/// This forces searchers to handle the case of reporting a false positive.
pub(crate) fn every_line_is_candidate(
&mut self,
yes: bool,
) -> &mut RegexMatcher {
self.every_line_is_candidate = yes;
self
}
}
impl Matcher for RegexMatcher {
type Captures = NoCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
Ok(self
.regex
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<NoCaptures, NoError> {
Ok(NoCaptures::new())
}
fn line_terminator(&self) -> Option<LineTerminator> {
self.line_term
}
fn find_candidate_line(
&self,
haystack: &[u8],
) -> Result<Option<LineMatchKind>, NoError> {
if self.every_line_is_candidate {
assert!(self.line_term.is_some());
if haystack.is_empty() {
return Ok(None);
}
// Make it interesting and return the last byte in the current
// line.
let i = haystack
.find_byte(self.line_term.unwrap().as_byte())
.map(|i| i)
.unwrap_or(haystack.len() - 1);
Ok(Some(LineMatchKind::Candidate(i)))
} else {
Ok(self.shortest_match(haystack)?.map(LineMatchKind::Confirmed))
}
}
}
/// An implementation of Sink that prints all available information.
///
/// This is useful for tests because it lets us easily confirm whether data
/// is being passed to Sink correctly.
#[derive(Clone, Debug)]
pub(crate) struct KitchenSink(Vec<u8>);
impl KitchenSink {
/// Create a new implementation of Sink that includes everything in the
/// kitchen.
pub(crate) fn new() -> KitchenSink {
KitchenSink(vec![])
}
/// Return the data written to this sink.
pub(crate) fn as_bytes(&self) -> &[u8] {
&self.0
}
}
impl Sink for KitchenSink {
type Error = io::Error;
fn matched(
&mut self,
_searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, io::Error> {
assert!(!mat.bytes().is_empty());
assert!(mat.lines().count() >= 1);
let mut line_number = mat.line_number();
let mut byte_offset = mat.absolute_byte_offset();
for line in mat.lines() {
if let Some(ref mut n) = line_number {
write!(self.0, "{}:", n)?;
*n += 1;
}
write!(self.0, "{}:", byte_offset)?;
byte_offset += line.len() as u64;
self.0.write_all(line)?;
}
Ok(true)
}
fn context(
&mut self,
_searcher: &Searcher,
context: &SinkContext<'_>,
) -> Result<bool, io::Error> {
assert!(!context.bytes().is_empty());
assert!(context.lines().count() == 1);
if let Some(line_number) = context.line_number() {
write!(self.0, "{}-", line_number)?;
}
write!(self.0, "{}-", context.absolute_byte_offset)?;
self.0.write_all(context.bytes())?;
Ok(true)
}
fn context_break(
&mut self,
_searcher: &Searcher,
) -> Result<bool, io::Error> {
self.0.write_all(b"--\n")?;
Ok(true)
}
fn finish(
&mut self,
_searcher: &Searcher,
sink_finish: &SinkFinish,
) -> Result<(), io::Error> {
writeln!(self.0, "")?;
writeln!(self.0, "byte count:{}", sink_finish.byte_count())?;
if let Some(offset) = sink_finish.binary_byte_offset() {
writeln!(self.0, "binary offset:{}", offset)?;
}
Ok(())
}
}
/// A type for expressing tests on a searcher.
///
/// The searcher code has a lot of different code paths, mostly for the
/// purposes of optimizing a bunch of different use cases. The intent of the
/// searcher is to pick the best code path based on the configuration, which
/// means there is no obviously direct way to ask that a specific code path
/// be exercised. Thus, the purpose of this tester is to explicitly check as
/// many code paths that make sense.
///
/// The tester works by assuming you want to test all pertinent code paths.
/// These can be trimmed down as necessary via the various builder methods.
#[derive(Debug)]
pub(crate) struct SearcherTester {
haystack: String,
pattern: String,
filter: Option<::regex::Regex>,
print_labels: bool,
expected_no_line_number: Option<String>,
expected_with_line_number: Option<String>,
expected_slice_no_line_number: Option<String>,
expected_slice_with_line_number: Option<String>,
by_line: bool,
multi_line: bool,
invert_match: bool,
line_number: bool,
binary: BinaryDetection,
auto_heap_limit: bool,
after_context: usize,
before_context: usize,
passthru: bool,
}
impl SearcherTester {
/// Create a new tester for testing searchers.
pub(crate) fn new(haystack: &str, pattern: &str) -> SearcherTester {
SearcherTester {
haystack: haystack.to_string(),
pattern: pattern.to_string(),
filter: None,
print_labels: false,
expected_no_line_number: None,
expected_with_line_number: None,
expected_slice_no_line_number: None,
expected_slice_with_line_number: None,
by_line: true,
multi_line: true,
invert_match: false,
line_number: true,
binary: BinaryDetection::none(),
auto_heap_limit: true,
after_context: 0,
before_context: 0,
passthru: false,
}
}
/// Execute the test. If the test succeeds, then this returns successfully.
/// If the test fails, then it panics with an informative message.
pub(crate) fn test(&self) {
// Check for configuration errors.
if self.expected_no_line_number.is_none() {
panic!("an 'expected' string with NO line numbers must be given");
}
if self.line_number && self.expected_with_line_number.is_none() {
panic!(
"an 'expected' string with line numbers must be given, \
or disable testing with line numbers"
);
}
let configs = self.configs();
if configs.is_empty() {
panic!("test configuration resulted in nothing being tested");
}
if self.print_labels {
for config in &configs {
let labels = vec![
format!("reader-{}", config.label),
format!("slice-{}", config.label),
];
for label in &labels {
if self.include(label) {
println!("{}", label);
} else {
println!("{} (ignored)", label);
}
}
}
}
for config in &configs {
let label = format!("reader-{}", config.label);
if self.include(&label) {
let got = config.search_reader(&self.haystack);
assert_eq_printed!(config.expected_reader, got, "{}", label);
}
let label = format!("slice-{}", config.label);
if self.include(&label) {
let got = config.search_slice(&self.haystack);
assert_eq_printed!(config.expected_slice, got, "{}", label);
}
}
}
/// Set a regex pattern to filter the tests that are run.
///
/// By default, no filter is present. When a filter is set, only test
/// configurations with a label matching the given pattern will be run.
///
/// This is often useful when debugging tests, e.g., when you want to do
/// printf debugging and only want one particular test configuration to
/// execute.
#[allow(dead_code)]
pub(crate) fn filter(&mut self, pattern: &str) -> &mut SearcherTester {
self.filter = Some(::regex::Regex::new(pattern).unwrap());
self
}
/// When set, the labels for all test configurations are printed before
/// executing any test.
///
/// Note that in order to see these in tests that aren't failing, you'll
/// want to use `cargo test -- --nocapture`.
#[allow(dead_code)]
pub(crate) fn print_labels(&mut self, yes: bool) -> &mut SearcherTester {
self.print_labels = yes;
self
}
/// Set the expected search results, without line numbers.
pub(crate) fn expected_no_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_no_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, with line numbers.
pub(crate) fn expected_with_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_with_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, without line numbers, when performing
/// a search on a slice. When not present, `expected_no_line_number` is
/// used instead.
pub(crate) fn expected_slice_no_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_slice_no_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, with line numbers, when performing a
/// search on a slice. When not present, `expected_with_line_number` is
/// used instead.
#[allow(dead_code)]
pub(crate) fn expected_slice_with_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_slice_with_line_number = Some(exp.to_string());
self
}
/// Whether to test search with line numbers or not.
///
/// This is enabled by default. When enabled, the string that is expected
/// when line numbers are present must be provided. Otherwise, the expected
/// string isn't required.
pub(crate) fn line_number(&mut self, yes: bool) -> &mut SearcherTester {
self.line_number = yes;
self
}
/// Whether to test search using the line-by-line searcher or not.
///
/// By default, this is enabled.
pub(crate) fn by_line(&mut self, yes: bool) -> &mut SearcherTester {
self.by_line = yes;
self
}
/// Whether to test search using the multi line searcher or not.
///
/// By default, this is enabled.
#[allow(dead_code)]
pub(crate) fn multi_line(&mut self, yes: bool) -> &mut SearcherTester {
self.multi_line = yes;
self
}
/// Whether to perform an inverted search or not.
///
/// By default, this is disabled.
pub(crate) fn invert_match(&mut self, yes: bool) -> &mut SearcherTester {
self.invert_match = yes;
self
}
/// Whether to enable binary detection on all searches.
///
/// By default, this is disabled.
pub(crate) fn binary_detection(
&mut self,
detection: BinaryDetection,
) -> &mut SearcherTester {
self.binary = detection;
self
}
/// Whether to automatically attempt to test the heap limit setting or not.
///
/// By default, one of the test configurations includes setting the heap
/// limit to its minimal value for normal operation, which checks that
/// everything works even at the extremes. However, in some cases, the heap
/// limit can (expectedly) alter the output slightly. For example, it can
/// impact the number of bytes searched when performing binary detection.
/// For convenience, it can be useful to disable the automatic heap limit
/// test.
pub(crate) fn auto_heap_limit(
&mut self,
yes: bool,
) -> &mut SearcherTester {
self.auto_heap_limit = yes;
self
}
/// Set the number of lines to include in the "after" context.
///
/// The default is `0`, which is equivalent to not printing any context.
pub(crate) fn after_context(
&mut self,
lines: usize,
) -> &mut SearcherTester {
self.after_context = lines;
self
}
/// Set the number of lines to include in the "before" context.
///
/// The default is `0`, which is equivalent to not printing any context.
pub(crate) fn before_context(
&mut self,
lines: usize,
) -> &mut SearcherTester {
self.before_context = lines;
self
}
/// Whether to enable the "passthru" feature or not.
///
/// When passthru is enabled, it effectively treats all non-matching lines
/// as contextual lines. In other words, enabling this is akin to
/// requesting an unbounded number of before and after contextual lines.
///
/// This is disabled by default.
pub(crate) fn passthru(&mut self, yes: bool) -> &mut SearcherTester {
self.passthru = yes;
self
}
/// Return the minimum size of a buffer required for a successful search.
///
/// Generally, this corresponds to the maximum length of a line (including
/// its terminator), but if context settings are enabled, then this must
/// include the sum of the longest N lines.
///
/// Note that this must account for whether the test is using multi line
/// search or not, since multi line search requires being able to fit the
/// entire haystack into memory.
fn minimal_heap_limit(&self, multi_line: bool) -> usize {
if multi_line {
1 + self.haystack.len()
} else if self.before_context == 0 && self.after_context == 0 {
1 + self.haystack.lines().map(|s| s.len()).max().unwrap_or(0)
} else {
let mut lens: Vec<usize> =
self.haystack.lines().map(|s| s.len()).collect();
lens.sort();
lens.reverse();
let context_count = if self.passthru {
self.haystack.lines().count()
} else {
// Why do we add 2 here? Well, we need to add 1 in order to
// have room to search at least one line. We add another
// because the implementation will occasionally include
// an additional line when handling the context. There's
// no particularly good reason, other than keeping the
// implementation simple.
2 + self.before_context + self.after_context
};
// We add 1 to each line since `str::lines` doesn't include the
// line terminator.
lens.into_iter()
.take(context_count)
.map(|len| len + 1)
.sum::<usize>()
}
}
/// Returns true if and only if the given label should be included as part
/// of executing `test`.
///
/// Inclusion is determined by the filter specified. If no filter has been
/// given, then this always returns `true`.
fn include(&self, label: &str) -> bool {
let re = match self.filter {
None => return true,
Some(ref re) => re,
};
re.is_match(label)
}
/// Configs generates a set of all search configurations that should be
/// tested. The configs generated are based on the configuration in this
/// builder.
fn configs(&self) -> Vec<TesterConfig> {
let mut configs = vec![];
let matcher = RegexMatcher::new(&self.pattern);
let mut builder = SearcherBuilder::new();
builder
.line_number(false)
.invert_match(self.invert_match)
.binary_detection(self.binary.clone())
.after_context(self.after_context)
.before_context(self.before_context)
.passthru(self.passthru);
if self.by_line {
let mut matcher = matcher.clone();
let mut builder = builder.clone();
let expected_reader =
self.expected_no_line_number.as_ref().unwrap().to_string();
let expected_slice = match self.expected_slice_no_line_number {
None => expected_reader.clone(),
Some(ref e) => e.to_string(),
};
configs.push(TesterConfig {
label: "byline-noterm-nonumber".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
if self.auto_heap_limit {
builder.heap_limit(Some(self.minimal_heap_limit(false)));
configs.push(TesterConfig {
label: "byline-noterm-nonumber-heaplimit".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
matcher.set_line_term(Some(LineTerminator::byte(b'\n')));
configs.push(TesterConfig {
label: "byline-term-nonumber".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.every_line_is_candidate(true);
configs.push(TesterConfig {
label: "byline-term-nonumber-candidates".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
}
if self.by_line && self.line_number {
let mut matcher = matcher.clone();
let mut builder = builder.clone();
let expected_reader =
self.expected_with_line_number.as_ref().unwrap().to_string();
let expected_slice = match self.expected_slice_with_line_number {
None => expected_reader.clone(),
Some(ref e) => e.to_string(),
};
builder.line_number(true);
configs.push(TesterConfig {
label: "byline-noterm-number".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.set_line_term(Some(LineTerminator::byte(b'\n')));
configs.push(TesterConfig {
label: "byline-term-number".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.every_line_is_candidate(true);
configs.push(TesterConfig {
label: "byline-term-number-candidates".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
}
if self.multi_line {
let mut builder = builder.clone();
let expected_slice = match self.expected_slice_no_line_number {
None => {
self.expected_no_line_number.as_ref().unwrap().to_string()
}
Some(ref e) => e.to_string(),
};
builder.multi_line(true);
configs.push(TesterConfig {
label: "multiline-nonumber".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
if self.auto_heap_limit {
builder.heap_limit(Some(self.minimal_heap_limit(true)));
configs.push(TesterConfig {
label: "multiline-nonumber-heaplimit".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
}
if self.multi_line && self.line_number {
let mut builder = builder.clone();
let expected_slice = match self.expected_slice_with_line_number {
None => self
.expected_with_line_number
.as_ref()
.unwrap()
.to_string(),
Some(ref e) => e.to_string(),
};
builder.multi_line(true);
builder.line_number(true);
configs.push(TesterConfig {
label: "multiline-number".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(Some(self.minimal_heap_limit(true)));
configs.push(TesterConfig {
label: "multiline-number-heaplimit".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
configs
}
}
#[derive(Debug)]
struct TesterConfig {
label: String,
expected_reader: String,
expected_slice: String,
builder: SearcherBuilder,
matcher: RegexMatcher,
}
impl TesterConfig {
/// Execute a search using a reader. This exercises the incremental search
/// strategy, where the entire contents of the corpus aren't necessarily
/// in memory at once.
fn search_reader(&self, haystack: &str) -> String {
let mut sink = KitchenSink::new();
let mut searcher = self.builder.build();
let result = searcher.search_reader(
&self.matcher,
haystack.as_bytes(),
&mut sink,
);
if let Err(err) = result {
let label = format!("reader-{}", self.label);
panic!("error running '{}': {}", label, err);
}
String::from_utf8(sink.as_bytes().to_vec()).unwrap()
}
/// Execute a search using a slice. This exercises the search routines that
/// have the entire contents of the corpus in memory at one time.
fn search_slice(&self, haystack: &str) -> String {
let mut sink = KitchenSink::new();
let mut searcher = self.builder.build();
let result = searcher.search_slice(
&self.matcher,
haystack.as_bytes(),
&mut sink,
);
if let Err(err) = result {
let label = format!("slice-{}", self.label);
panic!("error running '{}': {}", label, err);
}
String::from_utf8(sink.as_bytes().to_vec()).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn m(start: usize, end: usize) -> Match {
Match::new(start, end)
}
#[test]
fn empty_line1() {
let haystack = b"";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
}
#[test]
fn empty_line2() {
let haystack = b"\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(1, 1))));
}
#[test]
fn empty_line3() {
let haystack = b"\n\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(1, 1))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
}
#[test]
fn empty_line4() {
let haystack = b"a\n\nb\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 3), Ok(Some(m(5, 5))));
assert_eq!(matcher.find_at(haystack, 4), Ok(Some(m(5, 5))));
assert_eq!(matcher.find_at(haystack, 5), Ok(Some(m(5, 5))));
}
#[test]
fn empty_line5() {
let haystack = b"a\n\nb\nc";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 3), Ok(None));
assert_eq!(matcher.find_at(haystack, 4), Ok(None));
assert_eq!(matcher.find_at(haystack, 5), Ok(None));
assert_eq!(matcher.find_at(haystack, 6), Ok(None));
}
#[test]
fn empty_line6() {
let haystack = b"a\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
}
} | rust | github | https://github.com/BurntSushi/ripgrep | crates/searcher/src/testutil.rs |
#include "mbedtls/mbedtls_config.h"
#if defined (MBEDTLS_ARCH_IS_X86)
#undef MBEDTLS_AESNI_C
#endif | c | github | https://github.com/nodejs/node | deps/LIEF/config/mbedtls/config.h |
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class IndicesClient(NamespacedClient):
@query_params('analyzer', 'attributes', 'char_filter', 'explain', 'field',
'filter', 'format', 'prefer_local', 'text', 'tokenizer')
def analyze(self, index=None, body=None, params=None):
"""
Perform the analysis process on a text and return the tokens breakdown of the text.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html>`_
:arg index: The name of the index to scope the operation
:arg body: The text on which the analysis should be performed
:arg analyzer: The name of the analyzer to use
:arg attributes: A comma-separated list of token attributes to output,
this parameter works only with `explain=true`
:arg char_filter: A comma-separated list of character filters to use for
the analysis
:arg explain: With `true`, outputs more advanced details. (default:
false)
:arg field: Use the analyzer configured for this field (instead of
passing the analyzer name)
:arg filter: A comma-separated list of filters to use for the analysis
:arg format: Format of the output, default 'detailed', valid choices
are: 'detailed', 'text'
:arg prefer_local: With `true`, specify that a local shard should be
used if available, with `false`, use a random shard (default: true)
:arg text: The text on which the analysis should be performed (when
request body is not used)
:arg tokenizer: The name of the tokenizer to use for the analysis
"""
return self.transport.perform_request('GET', _make_path(index,
'_analyze'), params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'force',
'ignore_unavailable', 'operation_threading')
def refresh(self, index=None, params=None):
"""
Explicitly refresh one or more index, making all operations performed
since the last refresh available for search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg force: Force a refresh even if not required, default False
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
"""
return self.transport.perform_request('POST', _make_path(index,
'_refresh'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'force',
'ignore_unavailable', 'wait_if_ongoing')
def flush(self, index=None, params=None):
"""
Explicitly flush one or more indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg force: Whether a flush should be forced even if it is not
necessarily needed ie. if no changes will be committed to the index.
This is useful if transaction log IDs should be incremented even if
no uncommitted changes are present. (This setting can be considered
as internal)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg wait_if_ongoing: If set to true the flush operation will block
until the flush can be executed if another flush operation is
already executing. The default is true. If set to false the flush
will be skipped iff if another flush operation is already running.
"""
return self.transport.perform_request('POST', _make_path(index,
'_flush'), params=params)
@query_params('master_timeout', 'timeout', 'update_all_types',
'wait_for_active_shards')
def create(self, index, body=None, params=None):
"""
Create an index in Elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html>`_
:arg index: The name of the index
:arg body: The configuration for the index (`settings` and `mappings`)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
:arg update_all_types: Whether to update the mapping for all fields with
the same name across all types or not
:arg wait_for_active_shards: Set the number of active shards to wait for
before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('PUT', _make_path(index),
params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'flat_settings',
'human', 'ignore_unavailable', 'include_defaults', 'local')
def get(self, index, feature=None, params=None):
"""
The get index API allows to retrieve information about one or more indexes.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html>`_
:arg index: A comma-separated list of index names
:arg feature: A comma-separated list of features
:arg allow_no_indices: Ignore if a wildcard expression resolves to no
concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get expanded
to open or closed indices (default: open), default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flat_settings: Return settings in flat format (default: false)
:arg human: Whether to return version and creation date values in human-
readable format., default False
:arg ignore_unavailable: Ignore unavailable indexes (default: false)
:arg include_defaults: Whether to return all default setting for each of
the indices., default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('GET', _make_path(index,
feature), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'master_timeout', 'timeout')
def open(self, index, params=None):
"""
Open a closed index to make it available for search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html>`_
:arg index: The name of the index
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'closed', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('POST', _make_path(index,
'_open'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'master_timeout', 'timeout')
def close(self, index, params=None):
"""
Close an index to remove it's overhead from the cluster. Closed index
is blocked for read/write operations.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html>`_
:arg index: The name of the index
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('POST', _make_path(index,
'_close'), params=params)
@query_params('master_timeout', 'timeout')
def delete(self, index, params=None):
"""
Delete an index in Elasticsearch
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_
:arg index: A comma-separated list of indices to delete; use `_all` or
`*` string to delete all indices
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('DELETE', _make_path(index),
params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def exists(self, index, params=None):
"""
Return a boolean indicating whether given index exists.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html>`_
:arg index: A comma-separated list of indices to check
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request('HEAD', _make_path(index),
params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def exists_type(self, index, doc_type, params=None):
"""
Check if a type/types exists in an index/indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-types-exists.html>`_
:arg index: A comma-separated list of index names; use `_all` to check
the types across all indices
:arg doc_type: A comma-separated list of document types to check
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
for param in (index, doc_type):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('HEAD', _make_path(index, doc_type),
params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'master_timeout', 'timeout', 'update_all_types')
def put_mapping(self, doc_type, body, index=None, params=None):
"""
Register specific mapping definition for a specific type.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html>`_
:arg doc_type: The name of the document type
:arg body: The mapping definition
:arg index: A comma-separated list of index names the mapping should be
added to (supports wildcards); use `_all` or omit to add the mapping
on all indices.
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
:arg update_all_types: Whether to update the mapping for all fields with
the same name across all types or not
"""
for param in (doc_type, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path(index,
'_mapping', doc_type), params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def get_mapping(self, index=None, doc_type=None, params=None):
"""
Retrieve mapping definition of index or index/type.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html>`_
:arg index: A comma-separated list of index names
:arg doc_type: A comma-separated list of document types
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
return self.transport.perform_request('GET', _make_path(index,
'_mapping', doc_type), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'include_defaults', 'local')
def get_field_mapping(self, fields, index=None, doc_type=None, params=None):
"""
Retrieve mapping definition of a specific field.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html>`_
:arg fields: A comma-separated list of fields
:arg index: A comma-separated list of index names
:arg doc_type: A comma-separated list of document types
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg include_defaults: Whether the default mapping values should be
returned as well
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
if fields in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'fields'.")
return self.transport.perform_request('GET', _make_path(index,
'_mapping', doc_type, 'field', fields), params=params)
@query_params('master_timeout', 'timeout')
def put_alias(self, index, name, body=None, params=None):
"""
Create an alias for a specific index/indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names the alias should point
to (supports wildcards); use `_all` to perform the operation on all
indices.
:arg name: The name of the alias to be created or updated
:arg body: The settings for the alias, such as `routing` or `filter`
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit timeout for the operation
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path(index,
'_alias', name), params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def exists_alias(self, index=None, name=None, params=None):
"""
Return a boolean indicating whether given alias exists.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names to filter aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default ['open', 'closed'],
valid choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
return self.transport.perform_request('HEAD', _make_path(index, '_alias',
name), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local')
def get_alias(self, index=None, name=None, params=None):
"""
Retrieve a specified alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names to filter aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'all', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
return self.transport.perform_request('GET', _make_path(index,
'_alias', name), params=params)
@query_params('master_timeout', 'timeout')
def update_aliases(self, body, params=None):
"""
Update specified aliases.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg body: The definition of `actions` to perform
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Request timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', '/_aliases',
params=params, body=body)
@query_params('master_timeout', 'timeout')
def delete_alias(self, index, name, params=None):
"""
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit timeout for the operation
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('DELETE', _make_path(index,
'_alias', name), params=params)
@query_params('create', 'flat_settings', 'master_timeout', 'order',
'timeout')
def put_template(self, name, body, params=None):
"""
Create an index template that will automatically be applied to new
indices created.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_
:arg name: The name of the template
:arg body: The template definition
:arg create: Whether the index template should only be added if new or
can also replace an existing one, default False
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Specify timeout for connection to master
:arg order: The order for this template when merging multiple matching
ones (higher numbers are merged later, overriding the lower numbers)
:arg timeout: Explicit operation timeout
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_template',
name), params=params, body=body)
@query_params('local', 'master_timeout')
def exists_template(self, name, params=None):
"""
Return a boolean indicating whether given template exists.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_
:arg name: The name of the template
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request('HEAD', _make_path('_template',
name), params=params)
@query_params('flat_settings', 'local', 'master_timeout')
def get_template(self, name=None, params=None):
"""
Retrieve an index template by its name.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_
:arg name: The name of the template
:arg flat_settings: Return settings in flat format (default: false)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
"""
return self.transport.perform_request('GET', _make_path('_template',
name), params=params)
@query_params('master_timeout', 'timeout')
def delete_template(self, name, params=None):
"""
Delete an index template by its name.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_
:arg name: The name of the template
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request('DELETE',
_make_path('_template', name), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'flat_settings',
'human', 'ignore_unavailable', 'include_defaults', 'local')
def get_settings(self, index=None, name=None, params=None):
"""
Retrieve settings for one or more (or all) indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg name: The name of the settings that should be included
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default ['open', 'closed'],
valid choices are: 'open', 'closed', 'none', 'all'
:arg flat_settings: Return settings in flat format (default: false)
:arg human: Whether to return version and creation date values in human-
readable format., default False
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg include_defaults: Whether to return all default setting for each of
the indices., default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
"""
return self.transport.perform_request('GET', _make_path(index,
'_settings', name), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'flat_settings',
'ignore_unavailable', 'master_timeout', 'preserve_existing')
def put_settings(self, body, index=None, params=None):
"""
Change specific index level settings in real time.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html>`_
:arg body: The index settings to be updated
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flat_settings: Return settings in flat format (default: false)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg master_timeout: Specify timeout for connection to master
:arg preserve_existing: Whether to update existing settings. If set to
`true` existing settings on an index remain unchanged, the default
is `false`
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('PUT', _make_path(index,
'_settings'), params=params, body=body)
@query_params('completion_fields', 'fielddata_fields', 'fields', 'groups',
'human', 'level', 'types')
def stats(self, index=None, metric=None, params=None):
"""
Retrieve statistics on different operations happening on an index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg metric: Limit the information returned the specific metrics.
:arg completion_fields: A comma-separated list of fields for `fielddata`
and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for `fielddata`
index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata` and
`completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for `search` index
metric
:arg human: Whether to return time and byte values in human-readable
format., default False
:arg level: Return stats aggregated at cluster, index or shard level,
default 'indices', valid choices are: 'cluster', 'indices', 'shards'
:arg types: A comma-separated list of document types for the `indexing`
index metric
"""
return self.transport.perform_request('GET', _make_path(index,
'_stats', metric), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'human',
'ignore_unavailable', 'operation_threading', 'verbose')
def segments(self, index=None, params=None):
"""
Provide low level segments information that a Lucene index (shard level) is built with.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg human: Whether to return time and byte values in human-readable
format., default False
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg verbose: Includes detailed memory usage by Lucene., default False
"""
return self.transport.perform_request('GET', _make_path(index,
'_segments'), params=params)
@query_params('allow_no_indices', 'analyze_wildcard', 'analyzer',
'default_operator', 'df', 'expand_wildcards', 'explain',
'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'operation_threading', 'q', 'rewrite')
def validate_query(self, index=None, doc_type=None, body=None, params=None):
"""
Validate a potentially expensive query without executing it.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html>`_
:arg index: A comma-separated list of index names to restrict the
operation; use `_all` or empty string to perform the operation on
all indices
:arg doc_type: A comma-separated list of document types to restrict the
operation; leave empty to perform the operation on all types
:arg body: The query definition specified with the Query DSL
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Return detailed information about the error
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg operation_threading: TODO: ?
:arg q: Query in the Lucene query string syntax
:arg rewrite: Provide a more detailed explanation showing the actual
Lucene query that will be executed.
"""
return self.transport.perform_request('GET', _make_path(index,
doc_type, '_validate', 'query'), params=params, body=body)
@query_params('allow_no_indices', 'expand_wildcards', 'field_data',
'fielddata', 'fields', 'ignore_unavailable', 'query', 'recycler',
'request')
def clear_cache(self, index=None, params=None):
"""
Clear either all caches or specific cached associated with one ore more indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html>`_
:arg index: A comma-separated list of index name to limit the operation
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg field_data: Clear field data
:arg fielddata: Clear field data
:arg fields: A comma-separated list of fields to clear when using the
`field_data` parameter (default: all)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg query: Clear query caches
:arg recycler: Clear the recycler cache
:arg request: Clear request cache
"""
return self.transport.perform_request('POST', _make_path(index,
'_cache', 'clear'), params=params)
@query_params('active_only', 'detailed', 'human')
def recovery(self, index=None, params=None):
"""
The indices recovery API provides insight into on-going shard
recoveries. Recovery status may be reported for specific indices, or
cluster-wide.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg active_only: Display only those recoveries that are currently on-
going, default False
:arg detailed: Whether to display detailed information about shard
recovery, default False
:arg human: Whether to return time and byte values in human-readable
format., default False
"""
return self.transport.perform_request('GET', _make_path(index,
'_recovery'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'only_ancient_segments', 'wait_for_completion')
def upgrade(self, index=None, params=None):
"""
Upgrade one or more indices to the latest format through an API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-upgrade.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg only_ancient_segments: If true, only ancient (an older Lucene major
release) segments will be upgraded
:arg wait_for_completion: Specify whether the request should block until
the all segments are upgraded (default: false)
"""
return self.transport.perform_request('POST', _make_path(index,
'_upgrade'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'human',
'ignore_unavailable')
def get_upgrade(self, index=None, params=None):
"""
Monitor how much of one or more index is upgraded.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-upgrade.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg human: Whether to return time and byte values in human-readable
format., default False
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
return self.transport.perform_request('GET', _make_path(index,
'_upgrade'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable')
def flush_synced(self, index=None, params=None):
"""
Perform a normal flush, then add a generated unique marker (sync_id) to all shards.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
return self.transport.perform_request('POST', _make_path(index,
'_flush', 'synced'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'operation_threading', 'status')
def shard_stores(self, index=None, params=None):
"""
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all'
"""
return self.transport.perform_request('GET', _make_path(index,
'_shard_stores'), params=params)
@query_params('allow_no_indices', 'expand_wildcards', 'flush',
'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes',
'operation_threading', 'wait_for_merge')
def forcemerge(self, index=None, params=None):
"""
The force merge API allows to force merging of one or more indices
through an API. The merge relates to the number of segments a Lucene
index holds within each shard. The force merge operation allows to
reduce the number of segments by merging them.
This call will block until the merge is complete. If the http
connection is lost, the request will continue in the background, and
any new requests will block until the previous force merge is complete.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flush: Specify whether the index should be flushed after performing
the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should be merged
into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should only
expunge deleted documents
:arg operation_threading: TODO: ?
:arg wait_for_merge: Specify whether the request should block until the
merge process is finished (default: true)
"""
return self.transport.perform_request('POST', _make_path(index,
'_forcemerge'), params=params)
@query_params('master_timeout', 'timeout', 'wait_for_active_shards')
def shrink(self, index, target, body=None, params=None):
"""
The shrink index API allows you to shrink an existing index into a new
index with fewer primary shards. The number of primary shards in the
target index must be a factor of the shards in the source index. For
example an index with 8 primary shards can be shrunk into 4, 2 or 1
primary shards or an index with 15 primary shards can be shrunk into 5,
3 or 1. If the number of shards in the index is a prime number it can
only be shrunk into a single primary shard. Before shrinking, a
(primary or replica) copy of every shard in the index must be present
on the same node.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_
:arg index: The name of the source index to shrink
:arg target: The name of the target index to shrink into
:arg body: The configuration for the target index (`settings` and
`aliases`)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to wait for
on the shrunken index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path(index,
'_shrink', target), params=params, body=body)
@query_params('master_timeout', 'timeout', 'wait_for_active_shards')
def rollover(self, alias, new_index=None, body=None, params=None):
"""
The rollover index API rolls an alias over to a new index when the
existing index is considered to be too large or too old.
The API accepts a single alias name and a list of conditions. The alias
must point to a single index only. If the index satisfies the specified
conditions then a new index is created and the alias is switched to
point to the new alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html>`_
:arg alias: The name of the alias to rollover
:arg new_index: The name of the rollover index
:arg body: The conditions that needs to be met for executing rollover
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to wait for
on the newly created rollover index before the operation returns.
"""
if alias in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'alias'.")
return self.transport.perform_request('POST', _make_path(alias,
'_rollover', new_index), params=params, body=body) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package convert
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/providers"
proto "github.com/hashicorp/terraform/internal/tfplugin6"
"github.com/zclconf/go-cty/cty"
)
var (
equateEmpty = cmpopts.EquateEmpty()
typeComparer = cmp.Comparer(cty.Type.Equals)
valueComparer = cmp.Comparer(cty.Value.RawEquals)
)
// Test that we can convert configschema to protobuf types and back again.
func TestConvertSchemaBlocks(t *testing.T) {
tests := map[string]struct {
Block *proto.Schema_Block
Want *configschema.Block
}{
"attributes": {
&proto.Schema_Block{
Attributes: []*proto.Schema_Attribute{
{
Name: "computed",
Type: []byte(`["list","bool"]`),
Computed: true,
},
{
Name: "optional",
Type: []byte(`"string"`),
Optional: true,
},
{
Name: "optional_computed",
Type: []byte(`["map","bool"]`),
Optional: true,
Computed: true,
},
{
Name: "required",
Type: []byte(`"number"`),
Required: true,
},
{
Name: "nested_type",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_SINGLE,
Attributes: []*proto.Schema_Attribute{
{
Name: "computed",
Type: []byte(`["list","bool"]`),
Computed: true,
},
{
Name: "optional",
Type: []byte(`"string"`),
Optional: true,
},
{
Name: "optional_computed",
Type: []byte(`["map","bool"]`),
Optional: true,
Computed: true,
},
{
Name: "required",
Type: []byte(`"number"`),
Required: true,
},
},
},
Required: true,
},
{
Name: "deeply_nested_type",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_SINGLE,
Attributes: []*proto.Schema_Attribute{
{
Name: "first_level",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_SINGLE,
Attributes: []*proto.Schema_Attribute{
{
Name: "computed",
Type: []byte(`["list","bool"]`),
Computed: true,
},
{
Name: "optional",
Type: []byte(`"string"`),
Optional: true,
},
{
Name: "optional_computed",
Type: []byte(`["map","bool"]`),
Optional: true,
Computed: true,
},
{
Name: "required",
Type: []byte(`"number"`),
Required: true,
},
{
Name: "write_only",
Type: []byte(`"string"`),
Optional: true,
WriteOnly: true,
},
},
},
Computed: true,
},
},
},
Required: true,
},
{
Name: "nested_list",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_LIST,
Attributes: []*proto.Schema_Attribute{
{
Name: "required",
Type: []byte(`"string"`),
Computed: true,
},
},
},
Required: true,
},
{
Name: "nested_set",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_SET,
Attributes: []*proto.Schema_Attribute{
{
Name: "required",
Type: []byte(`"string"`),
Computed: true,
},
},
},
Required: true,
},
{
Name: "nested_map",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_MAP,
Attributes: []*proto.Schema_Attribute{
{
Name: "required",
Type: []byte(`"string"`),
Computed: true,
},
},
},
Required: true,
},
},
},
&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"computed": {
Type: cty.List(cty.Bool),
Computed: true,
},
"optional": {
Type: cty.String,
Optional: true,
},
"optional_computed": {
Type: cty.Map(cty.Bool),
Optional: true,
Computed: true,
},
"required": {
Type: cty.Number,
Required: true,
},
"nested_type": {
NestedType: &configschema.Object{
Attributes: map[string]*configschema.Attribute{
"computed": {
Type: cty.List(cty.Bool),
Computed: true,
},
"optional": {
Type: cty.String,
Optional: true,
},
"optional_computed": {
Type: cty.Map(cty.Bool),
Optional: true,
Computed: true,
},
"required": {
Type: cty.Number,
Required: true,
},
},
Nesting: configschema.NestingSingle,
},
Required: true,
},
"deeply_nested_type": {
NestedType: &configschema.Object{
Attributes: map[string]*configschema.Attribute{
"first_level": {
NestedType: &configschema.Object{
Nesting: configschema.NestingSingle,
Attributes: map[string]*configschema.Attribute{
"computed": {
Type: cty.List(cty.Bool),
Computed: true,
},
"optional": {
Type: cty.String,
Optional: true,
},
"optional_computed": {
Type: cty.Map(cty.Bool),
Optional: true,
Computed: true,
},
"required": {
Type: cty.Number,
Required: true,
},
"write_only": {
Type: cty.String,
Optional: true,
WriteOnly: true,
},
},
},
Computed: true,
},
},
Nesting: configschema.NestingSingle,
},
Required: true,
},
"nested_list": {
NestedType: &configschema.Object{
Nesting: configschema.NestingList,
Attributes: map[string]*configschema.Attribute{
"required": {
Type: cty.String,
Computed: true,
},
},
},
Required: true,
},
"nested_map": {
NestedType: &configschema.Object{
Nesting: configschema.NestingMap,
Attributes: map[string]*configschema.Attribute{
"required": {
Type: cty.String,
Computed: true,
},
},
},
Required: true,
},
"nested_set": {
NestedType: &configschema.Object{
Nesting: configschema.NestingSet,
Attributes: map[string]*configschema.Attribute{
"required": {
Type: cty.String,
Computed: true,
},
},
},
Required: true,
},
},
},
},
"blocks": {
&proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "list",
Nesting: proto.Schema_NestedBlock_LIST,
Block: &proto.Schema_Block{},
},
{
TypeName: "map",
Nesting: proto.Schema_NestedBlock_MAP,
Block: &proto.Schema_Block{},
},
{
TypeName: "set",
Nesting: proto.Schema_NestedBlock_SET,
Block: &proto.Schema_Block{},
},
{
TypeName: "single",
Nesting: proto.Schema_NestedBlock_SINGLE,
Block: &proto.Schema_Block{
Attributes: []*proto.Schema_Attribute{
{
Name: "foo",
Type: []byte(`"dynamic"`),
Required: true,
},
},
},
},
},
},
&configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": &configschema.NestedBlock{
Nesting: configschema.NestingList,
},
"map": &configschema.NestedBlock{
Nesting: configschema.NestingMap,
},
"set": &configschema.NestedBlock{
Nesting: configschema.NestingSet,
},
"single": &configschema.NestedBlock{
Nesting: configschema.NestingSingle,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.DynamicPseudoType,
Required: true,
},
},
},
},
},
},
},
"deep block nesting": {
&proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "single",
Nesting: proto.Schema_NestedBlock_SINGLE,
Block: &proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "list",
Nesting: proto.Schema_NestedBlock_LIST,
Block: &proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "set",
Nesting: proto.Schema_NestedBlock_SET,
Block: &proto.Schema_Block{},
},
},
},
},
},
},
},
},
},
&configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"single": &configschema.NestedBlock{
Nesting: configschema.NestingSingle,
Block: configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": &configschema.NestedBlock{
Nesting: configschema.NestingList,
Block: configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"set": &configschema.NestedBlock{
Nesting: configschema.NestingSet,
},
},
},
},
},
},
},
},
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
converted := ProtoToConfigSchema(tc.Block)
if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) {
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty))
}
})
}
}
// Test that we can convert configschema to protobuf types and back again.
func TestConvertProtoSchemaBlocks(t *testing.T) {
tests := map[string]struct {
Want *proto.Schema_Block
Block *configschema.Block
}{
"attributes": {
&proto.Schema_Block{
Attributes: []*proto.Schema_Attribute{
{
Name: "computed",
Type: []byte(`["list","bool"]`),
Computed: true,
},
{
Name: "object",
NestedType: &proto.Schema_Object{
Nesting: proto.Schema_Object_SINGLE,
Attributes: []*proto.Schema_Attribute{
{
Name: "optional",
Type: []byte(`"string"`),
Optional: true,
},
{
Name: "write_only",
Type: []byte(`"string"`),
Optional: true,
WriteOnly: true,
},
},
},
},
{
Name: "optional",
Type: []byte(`"string"`),
Optional: true,
},
{
Name: "optional_computed",
Type: []byte(`["map","bool"]`),
Optional: true,
Computed: true,
},
{
Name: "required",
Type: []byte(`"number"`),
Required: true,
},
{
Name: "write_only",
Type: []byte(`"string"`),
Optional: true,
WriteOnly: true,
},
},
},
&configschema.Block{
Attributes: map[string]*configschema.Attribute{
"computed": {
Type: cty.List(cty.Bool),
Computed: true,
},
"object": {
NestedType: &configschema.Object{
Nesting: configschema.NestingSingle,
Attributes: map[string]*configschema.Attribute{
"optional": {
Type: cty.String,
Optional: true,
},
"write_only": {
Type: cty.String,
Optional: true,
WriteOnly: true,
},
},
},
},
"optional": {
Type: cty.String,
Optional: true,
},
"optional_computed": {
Type: cty.Map(cty.Bool),
Optional: true,
Computed: true,
},
"required": {
Type: cty.Number,
Required: true,
},
"write_only": {
Type: cty.String,
Optional: true,
WriteOnly: true,
},
},
},
},
"blocks": {
&proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "list",
Nesting: proto.Schema_NestedBlock_LIST,
Block: &proto.Schema_Block{},
},
{
TypeName: "map",
Nesting: proto.Schema_NestedBlock_MAP,
Block: &proto.Schema_Block{},
},
{
TypeName: "set",
Nesting: proto.Schema_NestedBlock_SET,
Block: &proto.Schema_Block{},
},
{
TypeName: "single",
Nesting: proto.Schema_NestedBlock_SINGLE,
Block: &proto.Schema_Block{
Attributes: []*proto.Schema_Attribute{
{
Name: "foo",
Type: []byte(`"dynamic"`),
Required: true,
},
},
},
},
},
},
&configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": &configschema.NestedBlock{
Nesting: configschema.NestingList,
},
"map": &configschema.NestedBlock{
Nesting: configschema.NestingMap,
},
"set": &configschema.NestedBlock{
Nesting: configschema.NestingSet,
},
"single": &configschema.NestedBlock{
Nesting: configschema.NestingSingle,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.DynamicPseudoType,
Required: true,
},
},
},
},
},
},
},
"deep block nesting": {
&proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "single",
Nesting: proto.Schema_NestedBlock_SINGLE,
Block: &proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "list",
Nesting: proto.Schema_NestedBlock_LIST,
Block: &proto.Schema_Block{
BlockTypes: []*proto.Schema_NestedBlock{
{
TypeName: "set",
Nesting: proto.Schema_NestedBlock_SET,
Block: &proto.Schema_Block{},
},
},
},
},
},
},
},
},
},
&configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"single": &configschema.NestedBlock{
Nesting: configschema.NestingSingle,
Block: configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list": &configschema.NestedBlock{
Nesting: configschema.NestingList,
Block: configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"set": &configschema.NestedBlock{
Nesting: configschema.NestingSet,
},
},
},
},
},
},
},
},
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
converted := ConfigSchemaToProto(tc.Block)
if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) {
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported))
}
})
}
}
func TestProtoToResourceIdentitySchema(t *testing.T) {
tests := map[string]struct {
Attributes []*proto.ResourceIdentitySchema_IdentityAttribute
Want *configschema.Object
}{
"simple": {
[]*proto.ResourceIdentitySchema_IdentityAttribute{
{
Name: "id",
Type: []byte(`"string"`),
RequiredForImport: true,
OptionalForImport: false,
Description: "Something",
},
},
&configschema.Object{
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Description: "Something",
Required: true,
},
},
Nesting: configschema.NestingSingle,
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
converted := ProtoToIdentitySchema(tc.Attributes)
if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) {
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty))
}
})
}
}
func TestResourceIdentitySchemaToProto(t *testing.T) {
tests := map[string]struct {
Want *proto.ResourceIdentitySchema
Schema providers.IdentitySchema
}{
"attributes": {
&proto.ResourceIdentitySchema{
Version: 1,
IdentityAttributes: []*proto.ResourceIdentitySchema_IdentityAttribute{
{
Name: "optional",
Type: []byte(`"string"`),
OptionalForImport: true,
},
{
Name: "required",
Type: []byte(`"number"`),
RequiredForImport: true,
},
},
},
providers.IdentitySchema{
Version: 1,
Body: &configschema.Object{
Attributes: map[string]*configschema.Attribute{
"optional": {
Type: cty.String,
Optional: true,
},
"required": {
Type: cty.Number,
Required: true,
},
},
},
},
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
converted := ResourceIdentitySchemaToProto(tc.Schema)
if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) {
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported))
}
})
}
} | go | github | https://github.com/hashicorp/terraform | internal/plugin6/convert/schema_test.go |
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""GnomeVFS backend for Virtual File System.
"""
import os
from twisted.internet.defer import succeed
from twisted.spread.flavors import Copyable, RemoteCopy
from twisted.spread.jelly import setUnjellyableForClass
from zope.interface import implements
from flumotion.common import log
from flumotion.common.errors import AccessDeniedError
from flumotion.common.interfaces import IDirectory, IFile
# gnomevfs is only imported inside nested scopes so that
# pychecker can ignore them, If pychecker ever gets fixed,
# move it back where it belongs
__pychecker__ = 'keepgoing'
class GnomeVFSFile(Copyable, RemoteCopy):
"""I am object implementing L{IFile} on top of GnomeVFS,
see L{IFile} for more information.
"""
implements(IFile)
def __init__(self, parent, fileInfo):
self.parent = parent
self.filename = fileInfo.name
self.iconNames = ['gnome-fs-regular']
# IFile
def getPath(self):
return os.path.join(self.parent, self.filename)
class GnomeVFSDirectory(Copyable, RemoteCopy):
"""I am object implementing L{IDirectory} on top of GnomeVFS,
see L{IDirectory} for more information.
"""
implements(IDirectory)
def __init__(self, path, name=None):
import gnomevfs
if not os.path.exists(path):
self.path = '/'
else:
self.path = os.path.abspath(path)
if name is None:
fileInfo = gnomevfs.get_file_info(self.path)
name = fileInfo.name
self.filename = name
self.iconNames = ['gnome-fs-directory']
self._cachedFiles = None
# IFile
def getPath(self):
return self.path
# IDirectory
def getFiles(self):
return succeed(self._cachedFiles)
def cacheFiles(self):
"""
Fetches the files contained on the directory for posterior usage of
them. This should be called on the worker side to work or the files
wouldn't be the expected ones.
"""
import gnomevfs
log.debug('vfsgnome', 'getting files for %s' % (self.path, ))
retval = []
try:
fileInfos = gnomevfs.open_directory(self.path)
except gnomevfs.AccessDeniedError:
raise AccessDeniedError
if self.path != '/':
retval.append(GnomeVFSDirectory(os.path.dirname(self.path),
name='..'))
for fileInfo in fileInfos:
filename = fileInfo.name
if filename.startswith('.'):
continue
if fileInfo.type == gnomevfs.FILE_TYPE_DIRECTORY:
obj = GnomeVFSDirectory(os.path.join(self.path,
fileInfo.name))
else:
obj = GnomeVFSFile(self.path, fileInfo)
retval.append(obj)
log.log('vfsgnome', 'returning %r' % (retval, ))
self._cachedFiles = retval
def registerGnomeVFSJelly():
"""Register the jelly used by the GnomeVFS VFS backend.
"""
setUnjellyableForClass(GnomeVFSFile, GnomeVFSFile)
setUnjellyableForClass(GnomeVFSDirectory, GnomeVFSDirectory)
log.info('jelly', 'GnomeVFS registered') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
S3 Shapefile codec
@copyright: 2013-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SHP",)
import os
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.contenttype import contenttype
from gluon.storage import Storage
from gluon.streamer import DEFAULT_CHUNK_SIZE
from ..s3codec import S3Codec
from ..s3utils import s3_unicode, s3_strip_markup
# =============================================================================
class S3SHP(S3Codec):
"""
Simple Shapefile format codec
"""
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
pass
# -------------------------------------------------------------------------
def extractResource(self, resource, list_fields):
"""
Extract the items from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = Storage(current.request.get_vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(query)
data = resource.select(list_fields,
left=left,
limit=None,
orderby=orderby,
represent=True,
show_links=False)
rfields = data["rfields"]
types = []
colnames = []
heading = {}
for rfield in rfields:
if rfield.show:
colnames.append(rfield.colname)
heading[rfield.colname] = rfield.label
if rfield.virtual:
types.append("string")
else:
types.append(rfield.ftype)
items = data["rows"]
return (title, types, colnames, heading, items)
# -------------------------------------------------------------------------
def encode(self, data_source, **attr):
"""
Export data as a Shapefile
@param data_source: the source of the data that is to be encoded
as a shapefile. This may be:
resource: the resource
item: a list of pre-fetched values
the headings are in the first row
the data types are in the second row
@param attr: dictionary of parameters:
* title: The export filename
* list_fields: Fields to include in list views
"""
# Get the attributes
title = attr.get("title")
# Extract the data from the data_source
if isinstance(data_source, (list, tuple)):
headers = data_source[0]
#types = data_source[1]
items = data_source[2:]
else:
current.s3db.gis_location.wkt.represent = None
list_fields = attr.get("list_fields")
if not list_fields:
list_fields = data_source.list_fields()
if data_source.tablename == "gis_location":
wkt_field = "wkt"
else:
wkt_field = "location_id$wkt"
if wkt_field not in list_fields:
list_fields.append(wkt_field)
(_title, types, lfields, headers, items) = self.extractResource(data_source,
list_fields)
if not title:
title = _title
# Create the data structure
output = []
oappend = output.append
# Header row
headers["gis_location.wkt"] = "WKT"
fields = []
fappend = fields.append
header = []
happend = header.append
for selector in lfields:
h = s3_unicode(headers[selector].replace(" ", "_"))
happend(h)
if selector != "gis_location.wkt":
# Don't include the WKT field as an Attribute in the Shapefile
fappend(h)
oappend('"%s"' % '","'.join(header))
fields = ",".join(fields)
for item in items:
row = []
rappend = row.append
for selector in lfields:
represent = s3_strip_markup(s3_unicode(item[selector]))
rappend(represent)
oappend('"%s"' % '","'.join(row))
# Write out as CSV
import tempfile
web2py_path = os.getcwd()
if os.path.exists(os.path.join(web2py_path, "temp")): # use web2py/temp
TEMP = os.path.join(web2py_path, "temp")
else:
TEMP = tempfile.gettempdir()
os_handle_temp, temp_filepath = tempfile.mkstemp(dir=TEMP, suffix=".csv")
with open(temp_filepath, "w") as f:
for line in output:
f.write("%s\n" % line.encode("utf-8"))
# Convert to Shapefile
# @ToDo: migrate to GDAL Python bindings
# Write out VRT file
temp_filename = temp_filepath.rsplit(os.path.sep, 1)[1]
vrt = \
'''<OGRVRTDataSource>
<OGRVRTLayer name="%s">
<SrcDataSource>%s</SrcDataSource>
<GeometryType>wkbGeometryCollection</GeometryType>
<TargetSRS>EPSG:4326</TargetSRS>
<GeometryField encoding="WKT" field="WKT"/>
</OGRVRTLayer>
</OGRVRTDataSource>''' % (temp_filename.rsplit(".", 1)[0], temp_filename)
os_handle_vrt, vrt_filename = tempfile.mkstemp(dir=TEMP, suffix=".vrt")
with open(vrt_filename, "w") as f:
f.write(vrt)
# @ToDo: Check that the data exists before writing out file
# Write Points
os.chdir(TEMP)
# Use + not %s as % within string
cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_point.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt POINT -where "WKT LIKE \'%POINT%\'"'
#os.system("rm %s_point.*" % title)
os.system(cmd)
# Write Lines
cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_line.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt MULTILINESTRING -where "WKT LIKE \'%LINESTRING%\'"'
#os.system("rm %s_line.*" % title)
os.system(cmd)
# Write Polygons
cmd = 'ogr2ogr -a_srs "EPSG:4326" -f "ESRI Shapefile" ' + title + '_polygon.shp ' + vrt_filename + ' -select ' + fields + ' -skipfailures -nlt MULTIPOLYGON -where "WKT LIKE \'%POLYGON%\'"'
#os.system("rm %s_polygon.*" % title)
os.system(cmd)
os.close(os_handle_temp)
os.unlink(temp_filepath)
os.close(os_handle_vrt)
os.unlink(vrt_filename)
# Zip up
import zipfile
request = current.request
filename = "%s_%s.zip" % (request.env.server_name, title)
fzip = zipfile.ZipFile(filename, "w")
for item in ("point", "line", "polygon"):
for exten in ("shp", "shx", "prj", "dbf"):
tfilename = "%s_%s.%s" % (title, item, exten)
fzip.write(tfilename)
os.unlink(tfilename)
fzip.close()
# Restore path
os.chdir(web2py_path)
# Response headers
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".zip")
response.headers["Content-disposition"] = disposition
stream = open(os.path.join(TEMP, filename), "rb")
return response.stream(stream, chunk_size=DEFAULT_CHUNK_SIZE,
request=request)
# -------------------------------------------------------------------------
def decode(self, resource, source, **attr):
"""
Import data from a Shapefile
@param resource: the S3Resource
@param source: the source
@return: an S3XML ElementTree
@ToDo: Handle encodings within Shapefiles other than UTF-8
"""
# @ToDo: Complete this!
# Sample code coming from this working script:
# http://eden.sahanafoundation.org/wiki/BluePrint/GIS/ShapefileLayers#ImportintonativeTables
# We also have sample code to read SHP from GDAL in:
# gis_layer_shapefile_onaccept() & import_admin_areas() [GADM]
raise NotImplementedError
try:
from lxml import etree
except ImportError:
import sys
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
from osgeo import ogr
except ImportError:
import sys
print >> sys.stderr, "ERROR: GDAL module needed for Shapefile handling"
raise
# @ToDo: Check how this would happen
shapefilename = source
layername = os.path.splitext(os.path.basename(shapefilename))[0]
# Create the datasource
ds = ogr.Open(shapefilename)
# Open the shapefile
if ds is None:
# @ToDo: Bail gracefully
raise
# Get the layer and iterate through the features
lyr = ds.GetLayer(0)
root = etree.Element("shapefile", name=layername)
OFTInteger = ogr.OFTInteger
OFTReal = ogr.OFTReal
OFTString = ogr.OFTString
for feat in lyr:
featurenode = etree.SubElement(root, "feature")
feat_defn = lyr.GetLayerDefn()
GetFieldDefn = feat_defn.GetFieldDefn
for i in range(feat_defn.GetFieldCount()):
field_defn = GetFieldDefn(i)
fieldnode = etree.SubElement(featurenode, field_defn.GetName())
if field_defn.GetType() == OFTInteger:
fieldnode.text = str(feat.GetFieldAsInteger(i))
elif field_defn.GetType() == OFTReal:
fieldnode.text = str(feat.GetFieldAsDouble(i))
elif field_defn.GetType() == OFTString:
FieldString = str(feat.GetFieldAsString(i))
# @ToDo: Don't assume UTF-8
fieldnode.text = FieldString.decode(encoding="UTF-8",
errors="strict")
wktnode = etree.SubElement(featurenode, "wkt")
geom = feat.GetGeometryRef()
wktnode.text = geom.ExportToWkt()
# @ToDo: Convert using XSLT
# Debug: Write out the etree
#xmlString = etree.tostring(root, pretty_print=True)
#f = open("test.xml","w")
#f.write(xmlString)
return root
# End ========================================================================= | unknown | codeparrot/codeparrot-clean | ||
#
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
import re
import socket
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.iosxr.providers.providers import CliProvider
class Neighbors(CliProvider):
def render(self, config=None):
commands = list()
safe_list = list()
router_context = 'router bgp %s' % self.get_value('config.bgp_as')
context_config = None
for item in self.get_value('config.neighbors'):
context_commands = list()
neighbor = item['neighbor']
try:
socket.inet_aton(neighbor)
context = 'neighbor %s' % neighbor
except socket.error:
context = 'neighbor-group %s' % neighbor
if config:
context_path = [router_context, context]
context_config = self.get_config_context(config, context_path, indent=1)
for key, value in iteritems(item):
if value is not None:
meth = getattr(self, '_render_%s' % key, None)
if meth:
resp = meth(item, context_config)
if resp:
context_commands.extend(to_list(resp))
if context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append('exit')
safe_list.append(context)
if config and safe_list:
commands.extend(self._negate_config(config, safe_list))
return commands
def _negate_config(self, config, safe_list=None):
commands = list()
matches = re.findall(r'(neighbor \S+)', config, re.M)
for item in set(matches).difference(safe_list):
commands.append('no %s' % item)
return commands
def _render_remote_as(self, item, config=None):
cmd = 'remote-as %s' % item['remote_as']
if not config or cmd not in config:
return cmd
def _render_description(self, item, config=None):
cmd = 'description %s' % item['description']
if not config or cmd not in config:
return cmd
def _render_enabled(self, item, config=None):
cmd = 'shutdown'
if item['enabled'] is True:
cmd = 'no %s' % cmd
if not config or cmd not in config:
return cmd
def _render_update_source(self, item, config=None):
cmd = 'update-source %s' % item['update_source'].replace(' ', '')
if not config or cmd not in config:
return cmd
def _render_password(self, item, config=None):
cmd = 'password %s' % item['password']
if not config or cmd not in config:
return cmd
def _render_ebgp_multihop(self, item, config=None):
cmd = 'ebgp-multihop %s' % item['ebgp_multihop']
if not config or cmd not in config:
return cmd
def _render_tcp_mss(self, item, config=None):
cmd = 'tcp mss %s' % item['tcp_mss']
if not config or cmd not in config:
return cmd
def _render_advertisement_interval(self, item, config=None):
cmd = 'advertisement-interval %s' % item['advertisement_interval']
if not config or cmd not in config:
return cmd
def _render_neighbor_group(self, item, config=None):
cmd = 'use neighbor-group %s' % item['neighbor_group']
if not config or cmd not in config:
return cmd
def _render_timers(self, item, config):
"""generate bgp timer related configuration
"""
keepalive = item['timers']['keepalive']
holdtime = item['timers']['holdtime']
min_neighbor_holdtime = item['timers']['min_neighbor_holdtime']
if keepalive and holdtime:
cmd = 'timers %s %s' % (keepalive, holdtime)
if min_neighbor_holdtime:
cmd += ' %s' % min_neighbor_holdtime
if not config or cmd not in config:
return cmd
else:
raise ValueError("required both options for timers: keepalive and holdtime") | unknown | codeparrot/codeparrot-clean | ||
"""
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: Optional[Union[str, Tuple[str, str]]]
) -> Tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
short_caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[Union[str, Tuple[str, str]]] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> Optional[str]:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Opus(AutotoolsPackage):
"""Opus is a totally open, royalty-free, highly versatile audio codec."""
homepage = "http://opus-codec.org/"
url = "http://downloads.xiph.org/releases/opus/opus-1.1.4.tar.gz"
version('1.1.4', sha256='9122b6b380081dd2665189f97bfd777f04f92dc3ab6698eea1dbb27ad59d8692')
version('1.1.3', sha256='58b6fe802e7e30182e95d0cde890c0ace40b6f125cffc50635f0ad2eef69b633')
version('1.1.2', sha256='0e290078e31211baa7b5886bcc8ab6bc048b9fc83882532da4a1a45e58e907fd')
version('1.1.1', sha256='9b84ff56bd7720d5554103c557664efac2b8b18acc4bbcc234cb881ab9a3371e')
version('1.1', sha256='b9727015a58affcf3db527322bf8c4d2fcf39f5f6b8f15dbceca20206cbe1d95')
version('1.0.3', sha256='191a089c92dbc403de6980463dd3604b65beb12d283c607e246c8076363cb49c')
version('1.0.2', sha256='da615edbee5d019c1833071d69a4782c19f178cf9ca1401375036ecef25cd78a')
version('1.0.1', sha256='80fa5c3caf2ac0fd68f8a22cce1564fc46b368c773a17554887d0066fe1841ef')
version('1.0.0', sha256='9250fcc74472d45c1e14745542ec9c8d09982538aefed56962495614be3e0d2d')
version('0.9.14', sha256='b1cad6846a8f819a141009fe3f8f10c946e8eff7e9c2339cd517bb136cc59eae')
version('0.9.10', sha256='4e379a98ba95bbbfe9087ef10fdd05c8ac9060b6d695f587ea82a7b43a0df4fe')
version('0.9.9', sha256='2f62359f09151fa3b242040dc9b4c5b6bda15557c5daea59c8420f1a2ff328b7')
version('0.9.8', sha256='4aa30d2e0652ffb4a7a22cc8a29c4ce78267626f560a2d9213b1d2d4e618cf36')
version('0.9.7', sha256='1b69772c31c5cbaa43d1dfa5b1c495fc29712e8e0ff69d6f8ad46459e5c6715f')
version('0.9.6', sha256='3bfaeb25f4b4a625a0bc994d6fc6f6776a05193f60099e0a99f7530c6b256309')
version('0.9.5', sha256='53801066fa97329768e7b871fd1495740269ec46802e1c9051aa7e78c6edee5b')
version('0.9.3', sha256='d916e34c18a396eb7dffc47af754f441af52a290b761e20db9aedb65928c699e')
version('0.9.2', sha256='6e85c1b57e1d7b7dfe2928bf92586b96b73a9067e054ede45bd8e6d24bd30582')
version('0.9.1', sha256='206221afc47b87496588013bd4523e1e9f556336c0813f4372773fc536dd4293')
version('0.9.0', sha256='b2f75c4ac5ab837845eb028413fae2a28754bfb0a6d76416e2af1441ef447649')
depends_on('libvorbis') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains classes relevant to the main adaptation function of the Controller
Adaptation Sublayer
"""
import pprint
import time
import urlparse
import weakref
from escape.adapt import log as log, LAYER_NAME
from escape.adapt.adapters import UnifyRESTAdapter
from escape.adapt.managers import UnifyDomainManager, BaseResultEvent
from escape.adapt.virtualization import DomainVirtualizer
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
from escape.util.com_logger import MessageDumper
from escape.util.config import CONFIG
from escape.util.config import ConfigurationError
from escape.util.conversion import NFFGConverter
from escape.util.domain import DomainChangedEvent, AbstractDomainManager, \
AbstractRemoteDomainManager
from escape.util.misc import notify_remote_visualizer, VERBOSE
from escape.util.stat import stats
from escape.util.virtualizer_helper import get_nfs_from_info, \
strip_info_by_nfs, get_bb_nf_from_path
from pox.lib.recoco import Timer
from virtualizer_info import Info
class InstallationFinishedEvent(BaseResultEvent):
"""
Event for signalling end of mapping process.
"""
def __init__ (self, id, result):
"""
Init.
:param result: result of the installation
:type: result: str
"""
super(InstallationFinishedEvent, self).__init__()
self.id = id
self.result = result
@classmethod
def get_result_from_status (cls, deploy_status):
"""
Convert deploy status to overall result.
:param deploy_status: deploy status object
:type deploy_status: :any:`DomainRequestStatus`
:return: overall service status
:type: str
"""
if deploy_status.success:
return cls.DEPLOYED
elif deploy_status.still_pending:
return cls.IN_PROGRESS
elif deploy_status.failed:
return cls.DEPLOY_ERROR
elif deploy_status.reset_failed:
return cls.RESET_ERROR
elif deploy_status.reset:
return cls.RESET
else:
return cls.UNKNOWN
class InfoRequestFinishedEvent(BaseResultEvent):
"""
Event for signalling end of Info request processing.
"""
def __init__ (self, result, status=None):
"""
Init.
:param result: overall result
:type result: str
:param status: deploy status
:type status: :any:`DomainRequestStatus`
"""
super(InfoRequestFinishedEvent, self).__init__()
self.result = result
self.status = status
@classmethod
def get_result_from_status (cls, req_status):
"""
Convert request status to overall result.
:param req_status: deploy status
:type req_status: :any:`DomainRequestStatus`
:return: overall result
:rtype: str
"""
if req_status.success:
return cls.SUCCESS
elif req_status.still_pending:
return cls.IN_PROGRESS
elif req_status.failed:
return cls.ERROR
else:
return cls.UNKNOWN
class ComponentConfigurator(object):
"""
Initialize, configure and store DomainManager objects.
Use global config to create managers and adapters.
Follows Component Configurator design pattern.
"""
def __init__ (self, ca, lazy_load=True):
"""
For domain adapters the configurator checks the CONFIG first.
.. warning::
Adapter classes must be subclass of AbstractESCAPEAdapter!
.. note::
Arbitrary domain adapters is searched in
:mod:`escape.adapt.domain_adapters`
:param ca: ControllerAdapter instance
:type ca: :any:`ControllerAdapter`
:param lazy_load: load adapters only at first reference (default: True)
:type lazy_load: bool
"""
log.debug("Init DomainConfigurator - lazy load: %s" % lazy_load)
super(ComponentConfigurator, self).__init__()
self.__repository = dict()
self._lazy_load = lazy_load
self._ca = ca
if not lazy_load:
# Initiate adapters from CONFIG
self.load_default_mgrs()
@property
def components (self):
"""
Return the dict of initiated Domain managers.
:return: container of initiated DomainManagers
:rtype: dict
"""
return self.__repository
@property
def domains (self):
"""
Return the list of domain_names which have been managed by DomainManagers.
:return: list of already managed domains
:rtype: list
"""
return [mgr.domain_name for mgr in self.__repository.itervalues()]
@property
def initiated (self):
return self.__repository.iterkeys()
def __len__ (self):
"""
Return the number of initiated components.
:return: number of initiated components:
:rtype: int
"""
return len(self.__repository)
def __iter__ (self):
"""
Return with an iterator over the (name, DomainManager) items.
"""
return self.__repository.iteritems()
def __getitem__ (self, item):
"""
Return with the DomainManager given by name: ``item``.
:param item: component name
:type item: str
:return: component
:rtype: :any:`AbstractDomainManager`
"""
return self.get_mgr(item)
##############################################################################
# General DomainManager handling functions: create/start/stop/get
##############################################################################
def get_mgr (self, name):
"""
Return the DomainManager with given name and create+start if needed.
:param name: name of domain manager
:type name: str
:return: None
"""
try:
return self.__repository[name]
except KeyError:
if self._lazy_load:
return self.start_mgr(name)
else:
raise AttributeError(
"No component is registered with the name: %s" % name)
def start_mgr (self, name, mgr_params=None, autostart=True):
"""
Create, initialize and start a DomainManager with given name and start
the manager by default.
:param name: name of domain manager
:type name: str
:param mgr_params: mgr parameters
:type mgr_params: dict
:param autostart: also start the domain manager (default: True)
:type autostart: bool
:return: domain manager
:rtype: :any:`AbstractDomainManager`
"""
# If not started
if not self.is_started(name):
# Load from CONFIG
mgr = self.load_component(name, params=mgr_params)
self.__repository[name] = mgr
if mgr is not None:
# Call init - give self for the DomainManager to initiate the
# necessary DomainAdapters itself
mgr.init(self)
# Autostart if needed
if autostart:
mgr.run()
# Save into repository
log.debug("Register DomainManager: %s into repository..." % name)
else:
log.warning("%s domain component has been already started! Skip "
"reinitialization..." % name)
# Return with manager
return self.__repository[name]
def register_mgr (self, name, mgr, autostart=False):
"""
Initialize the given manager object and with init() call and store it in
the ComponentConfigurator with the given name.
:param name: name of the component, must be unique
:type name: str
:param mgr: created DomainManager object
:type mgr: :any:`AbstractDomainManager`
:param autostart: also start the DomainManager (default: False)
:type autostart: bool
:return: None
"""
if self.is_started(name=name):
log.warning("DomainManager with name: %s has already exist! Skip init...")
return
self.__repository[name] = mgr
# Call init - give self for the DomainManager to initiate the
# necessary DomainAdapters itself
mgr.init(self)
# Autostart if needed
if autostart:
mgr.run()
# Save into repository
def stop_mgr (self, name):
"""
Stop DomainManager with given name and remove from the
repository also.
:param name: name of domain manager
:type name: str
:return: None
"""
# If started
if self.is_started(name):
# Call finalize
self.__repository[name].finit()
else:
log.warning(
"Missing domain component: %s! Skipping stop task..." % name)
def remove_mgr (self, name):
"""
Stop and derefer a DomainManager with given name and remove from the
repository also.
:param name: name of domain manager
:type name: str
:return: None
"""
# If started
if self.is_started(name):
# Call finalize
self.__repository[name].finit()
# Remove from repository
log.debug("Remove DomainManager: %s from repository..." % name)
del self.__repository[name]
else:
log.warning(
"Missing domain component: %s! Skipping stop task..." % name)
def is_started (self, name):
"""
Return with the value the given domain manager is started or not.
:param name: name of domain manager
:type name: str
:return: is loaded or not
:rtype: bool
"""
return name in self.__repository
def get_component_by_domain (self, domain_name):
"""
Return with the initiated Domain Manager configured with the given
domain_name.
:param domain_name: name of the domain used in :class:`NFFG` descriptions
:type domain_name: str
:return: the initiated domain Manager
:rtype: AbstractDomainManager
"""
for component in self.__repository.itervalues():
if component.domain_name == domain_name:
return component
def get_component_name_by_domain (self, domain_name):
"""
Return with the initiated Domain Manager name configured with the given
domain_name.
:param domain_name: name of the domain used in :class:`NFFG` descriptions
:type domain_name: str
:return: the initiated domain Manager name
:rtype: str
"""
for name, component in self.__repository.iteritems():
if component.domain_name == domain_name:
return name
##############################################################################
# High-level configuration-related functions
##############################################################################
def load_component (self, component_name, params=None, parent=None):
"""
Load given component (DomainAdapter/DomainManager) from config.
Initiate the given component class, pass the additional attributes,
register the event listeners and return with the newly created object.
:param component_name: component's config name
:type component_name: str
:param params: component parameters
:type params: dict
:param parent: define the parent of the actual component's configuration
:type parent: dict
:return: initiated component
:rtype: :any:`AbstractESCAPEAdapter` or :any:`AbstractDomainManager`
"""
try:
# Get component class
component_class = CONFIG.get_component(component=component_name,
parent=parent)
# If it's found
if component_class is not None:
# Get optional parameters of this component
if not params:
params = CONFIG.get_component_params(component=component_name,
parent=parent)
# Initialize component
component = component_class(**params)
# Set up listeners for e.g. DomainChangedEvents
component.addListeners(self._ca)
# Set up listeners for DeployNFFGEvent
component.addListeners(self._ca._layer_API)
# Return the newly created object
return component
else:
log.error("Configuration of '%s' is missing. Skip initialization!" %
component_name)
raise ConfigurationError("Missing component configuration!")
except TypeError as e:
if "takes at least" in e.message:
log.error("Mandatory configuration field is missing from:\n%s" %
pprint.pformat(params))
raise
except AttributeError:
log.error("%s is not found. Skip component initialization!" %
component_name)
raise
except ImportError:
log.error("Could not import module: %s. Skip component initialization!" %
component_name)
raise
def load_default_mgrs (self):
"""
Initiate and start default DomainManagers defined in CONFIG.
:return: None
"""
log.info("Initialize additional DomainManagers from config...")
# very dummy initialization
enabled_mgrs = CONFIG.get_managers()
if not enabled_mgrs:
log.info("No DomainManager has been configured!")
return
for mgr_name in enabled_mgrs:
# Get manager parameters from config
mgr_cfg = CONFIG.get_component_params(component=mgr_name)
if 'domain_name' in mgr_cfg:
if mgr_cfg['domain_name'] in self.domains:
log.warning("Domain name collision! Domain Manager: %s has already "
"initiated with the domain name: %s" % (
self.get_component_by_domain(
domain_name=mgr_cfg['domain_name']),
mgr_cfg['domain_name']))
else:
# If no domain name was given, use the manager config name by default
mgr_cfg['domain_name'] = mgr_name
# Get manager class
mgr_class = CONFIG.get_component(component=mgr_name)
if mgr_class is None:
log.fatal("Missing DomainManager config: %s" % mgr_name)
raise ConfigurationError(
"Missing configuration for added DomainManager: %s" % mgr_name)
if mgr_class.IS_INTERNAL_MANAGER:
loaded_local_mgr = [name for name, mgr in self.__repository.iteritems()
if mgr.IS_INTERNAL_MANAGER]
if loaded_local_mgr:
log.warning("A local DomainManager has already been initiated with "
"the name: %s! Skip initiating DomainManager: %s" %
(loaded_local_mgr, mgr_name))
return
log.debug("Load DomainManager based on config: %s" % mgr_name)
# Start domain manager
self.start_mgr(name=mgr_name, mgr_params=mgr_cfg, autostart=True)
def load_local_domain_mgr (self):
"""
Initiate the DomainManager for the internal domain.
:return: None
"""
from escape.infr.topo_manager import InternalDomainManager
loaded_local_mgr = [name for name, mgr in self.__repository.iteritems() if
mgr.IS_INTERNAL_MANAGER]
if loaded_local_mgr:
log.warning("A local DomainManager has already been initiated with the "
"name: %s! Skip initiation of default local DomainManager: %s"
% (loaded_local_mgr, InternalDomainManager.name))
return
log.debug("Init DomainManager for local domain based on config: %s" %
InternalDomainManager.name)
# Internal domain is hard coded with the name: INTERNAL
self.start_mgr(name=InternalDomainManager.name)
def reset_initiated_mgrs (self):
"""
Reset initiated DomainManagers based on the first received config.
:return: None
"""
log.info("Resetting detected domains before shutdown...")
for name, mgr in self:
if not mgr.IS_EXTERNAL_MANAGER:
try:
mgr.reset_domain()
except:
log.exception("Got exception during domain resetting!")
def clear_initiated_mgrs (self):
"""
Clear initiated DomainManagers based on the first received config.
:return: None
"""
log.info("Cleanup detected domains before shutdown...")
for name, mgr in self:
if not mgr.IS_EXTERNAL_MANAGER:
try:
mgr.clear_domain()
except:
log.exception("Got exception during domain cleanup!")
def stop_initiated_mgrs (self):
"""
Stop initiated DomainManagers.
:return: None
"""
log.info("Shutdown initiated DomainManagers...")
for name, mgr in self:
try:
self.stop_mgr(name=name)
except:
log.exception("Got exception during domain resetting!")
# Do not del mgr in for loop because of the iterator use
self.__repository.clear()
class ControllerAdapter(object):
"""
Higher-level class for :class:`NFFG` adaptation between multiple domains.
"""
EXTERNAL_MDO_META_NAME = 'unify-slor'
"""Attribute name used topology from TADS to identify external MdO URL"""
EXTERNAL_DOMAIN_NAME_JOINER = '-'
def __init__ (self, layer_API, with_infr=False):
"""
Initialize Controller adapter.
For domain components the ControllerAdapter checks the CONFIG first.
:param layer_API: layer API instance
:type layer_API: :any:`ControllerAdaptationAPI`
:param with_infr: using emulated infrastructure (default: False)
:type with_infr: bool
"""
log.debug("Init ControllerAdapter - with IL: %s" % with_infr)
super(ControllerAdapter, self).__init__()
# Set a weak reference to avoid circular dependencies
self._layer_API = weakref.proxy(layer_API)
self._with_infr = with_infr
# Timer for VNFM
self.__vnfm_timer = None
# Set virtualizer-related components
self.DoVManager = GlobalResourceManager()
self.domains = ComponentConfigurator(self)
self.status_mgr = DomainRequestManager()
self.init_managers(with_infr=with_infr)
# Here every domainManager is up and running
# Notify the remote visualizer about collected data if it's needed
notify_remote_visualizer(data=self.DoVManager.dov.get_resource_info(),
unique_id="DOV",
params={"event": "create"})
def init_managers (self, with_infr=False):
"""
:param with_infr: using emulated infrastructure (default: False)
:type with_infr: bool
:return: None
"""
try:
if with_infr:
# Init internal domain manager if Infrastructure Layer is started
self.domains.load_local_domain_mgr()
# Init default domain managers
self.domains.load_default_mgrs()
except (ImportError, AttributeError, ConfigurationError) as e:
from escape.util.misc import quit_with_error
quit_with_error(msg="Shutting down ESCAPEv2 due to an unexpected error!",
logger=log, exception=e)
def shutdown (self):
"""
Shutdown ControllerAdapter, related components and stop DomainManagers.
:return: None
"""
# Clear DomainManagers config if needed
if CONFIG.reset_domains_after_shutdown():
self.domains.reset_initiated_mgrs()
elif CONFIG.clear_domains_after_shutdown():
self.domains.clear_initiated_mgrs()
# Stop initiated DomainManagers
self.domains.stop_initiated_mgrs()
def install_nffg (self, mapped_nffg, original_request=None,
direct_deploy=False):
"""
Start NF-FG installation.
Process given :class:`NFFG`, slice information self.__global_nffg on
domains and invoke DomainManagers to install domain specific parts.
:param mapped_nffg: mapped NF-FG instance which need to be installed
:type mapped_nffg: :class:`NFFG`
:param original_request: top level, original :class:`NFFG` request
:type original_request: :class:`NFFG`
:param direct_deploy: skip external hook call before deploy (default: False)
:type direct_deploy: bool
:return: deploy result
:rtype: DomainRequestStatus
"""
log.debug("Invoke %s to install NF-FG(%s)" % (
self.__class__.__name__, mapped_nffg.name))
self.collate_deploy_request(request=mapped_nffg)
# Register mapped NFFG for managing statuses of install steps
log.debug("Store mapped NFFG for domain status tracking...")
deploy_status = self.status_mgr.register_service(nffg=mapped_nffg)
if deploy_status is None:
log.error("Missing deploy status for request: %s. Skip deployment..."
% mapped_nffg.id)
return
if not direct_deploy:
if CONFIG.get_vnfm_enabled():
log.info("VNFM is enabled! Skip deploy process and call external "
"component...")
if self.forward_to_vnfm(nffg=mapped_nffg, deploy_status=deploy_status):
log.info("Waiting for external component...")
deploy_status.set_standby()
else:
log.debug("Clear deploy status...")
deploy_status.clear()
log.debug("Deploy status: %s" % deploy_status)
return deploy_status
else:
log.debug("VNFM is disabled! Proceed with deploy...")
else:
log.debug("Direct deploy is set! "
"Bypass external VNFM and proceed with deploy...")
self.DoVManager.backup_dov_state()
# If DoV update is based on status updates, rewrite the whole DoV as the
# first step
if self.DoVManager.status_updates:
log.debug("Status-based update is enabled! "
"Rewrite DoV with mapping result...")
self.DoVManager.rewrite_global_view_with_status(nffg=mapped_nffg)
notify_remote_visualizer(data=mapped_nffg,
unique_id="DOV",
params={"event": "datastore"})
# Split the mapped NFFG into slices based on domains
slices = NFFGToolBox.split_into_domains(nffg=mapped_nffg, log=log)
# If no Infranode in the NFFG, no domain can be detected and slicing by it
if slices is None:
log.warning("Given mapped NFFG: %s can not be sliced! "
"Skip domain notification steps" % mapped_nffg)
# Return with deploy result: fail
return deploy_status
NFFGToolBox.rewrite_interdomain_tags(slices=slices,
flowrule_stitching=CONFIG.flowrule_stitching())
log.info("Notify initiated domains: %s" %
[d for d in self.domains.initiated])
# Perform domain installations
for domain, part in slices:
stats.add_measurement_start_entry(type=stats.TYPE_DEPLOY_DOMAIN,
info=domain)
log.debug("Search DomainManager for domain: %s" % domain)
# Get Domain Manager
domain_mgr = self.domains.get_component_by_domain(domain_name=domain)
if domain_mgr is None:
log.warning("No DomainManager has been initialized for domain: %s! "
"Skip install domain part..." % domain)
deploy_status.set_domain_failed(domain=domain)
continue
log.log(VERBOSE, "Splitted domain: %s part:\n%s" % (domain, part.dump()))
# Check if need to reset domain before install
if CONFIG.reset_domains_before_install():
log.debug("Reset %s domain before deploying mapped NFFG..." %
domain_mgr.domain_name)
domain_mgr.reset_domain()
log.info("Delegate splitted part: %s to %s" % (part, domain_mgr))
# Invoke DomainAdapter's install
domain_install_result = domain_mgr.install_nffg(part)
# Update the DoV based on the mapping result covering some corner case
if domain_install_result is None:
log.error("Installation of %s in %s was unsuccessful!" % (part, domain))
log.debug("Update installed part with collective result: %s" %
NFFG.STATUS_FAIL)
deploy_status.set_domain_failed(domain=domain)
log.debug("Installation status: %s" % deploy_status)
if CONFIG.rollback_on_failure():
# Stop deploying remained nffg_parts and initiate delayed rollback
log.info("Rollback mode is enabled! Skip installation process...")
break
# Update failed status info of mapped elements in NFFG part for DoV
# update
if self.DoVManager.status_updates:
NFFGToolBox.update_status_info(nffg=part, status=NFFG.STATUS_FAIL,
log=log)
else:
log.warning("Skip DoV update with domain: %s! Cause: "
"Domain installation was unsuccessful!" % domain)
continue
if domain_install_result == 0:
log.info("Installation of %s in %s was skipped!" % (part, domain))
deploy_status.set_domain_ok(domain=domain)
log.debug("Installation status: %s" % deploy_status)
continue
log.info("Installation of %s in %s was successful!" % (part, domain))
if self.DoVManager.status_updates:
log.debug("Update installed part with collective result: %s" %
NFFG.STATUS_DEPLOY)
# Update successful status info of mapped elements in NFFG part for
# DoV update
NFFGToolBox.update_status_info(nffg=part, status=NFFG.STATUS_DEPLOY,
log=log)
# If the domain manager does not poll the domain update here
# else polling takes care of domain updating
if isinstance(domain_mgr,
AbstractRemoteDomainManager) and domain_mgr.polling:
log.info("Skip explicit DoV update for domain: %s. "
"Cause: polling enabled!" % domain)
if isinstance(domain_mgr,
UnifyDomainManager) and domain_mgr.callback_manager:
log.debug("Callback is enabled for domain: %s!" % domain)
else:
log.debug("Consider deploy into a polled domain OK...")
deploy_status.set_domain_ok(domain=domain)
log.debug("Installation status: %s" % deploy_status)
continue
if isinstance(domain_mgr,
UnifyDomainManager) and domain_mgr.callback_manager:
log.info("Skip explicit DoV update for domain: %s. "
"Cause: callback registered!" % domain)
deploy_status.set_domain_waiting(domain=domain)
log.debug("Installation status: %s" % deploy_status)
continue
if domain_mgr.IS_INTERNAL_MANAGER:
self.__perform_internal_mgr_update(mapped_nffg=mapped_nffg,
domain=domain)
# In case of Local manager skip the rest of the update
continue
if CONFIG.one_step_update():
log.debug("One-step-update is enabled. Skip explicit domain update!")
else:
# Explicit domain update
self.DoVManager.update_domain(domain=domain, nffg=part)
# self.status_mgr.get_status(mapped_nffg.id).set_domain_ok(domain)
deploy_status.set_domain_ok(domain=domain)
log.debug("Installation status: %s" % deploy_status)
if CONFIG.domain_deploy_delay():
log.warning("Delay next deploy with %ss" % CONFIG.domain_deploy_delay())
time.sleep(CONFIG.domain_deploy_delay())
# END of domain deploy loop
log.info("NF-FG installation is finished by %s" % self.__class__.__name__)
log.debug("Overall installation status: %s" % deploy_status)
stats.add_measurement_end_entry(type=stats.TYPE_DEPLOY,
info=LAYER_NAME)
# Post-mapping steps
if deploy_status.success:
log.info("All installation processes have been finished with success!")
if CONFIG.one_step_update():
log.debug("One-step-update is enabled. Update DoV now...")
self.DoVManager.set_global_view(nffg=deploy_status.data)
elif deploy_status.still_pending:
log.warning("Installation process is still pending! "
"Waiting for results...")
elif deploy_status.failed:
log.error("%s installation was not successful!" % mapped_nffg)
# No pending install part here
if CONFIG.rollback_on_failure():
self.__do_rollback(status=deploy_status,
previous_state=self.DoVManager.get_backup_state())
else:
log.info("All installation processes have been finished!")
return deploy_status
def collate_deploy_request (self, request):
"""
Collate request BiSBiS node IDs to the existent nodes in DoV and correct
domain name in request.
:param request: service request
:type request: :class:`NFFG`
:return: corrected request
:rtype: :class:`NFFG`
"""
log.debug("Collate deploy request node IDs...")
dov = self.DoVManager.dov.get_resource_info()
for node in request.infras:
if node.id not in dov.network:
log.warning("Found non-existent infra node: %s in deploy request: %s!"
% (node.id, request))
continue
domain_name = dov[node.id].domain
if node.domain != domain_name:
log.debug("Collating domain id for node: %s --> %s" % (node.id,
domain_name))
node.domain = domain_name
return request
def forward_to_vnfm (self, nffg, deploy_status):
"""
Send given NFFG to an external component using its REST-API.
:param nffg: un-deployed request
:type nffg: :class:`NFFG`
:param deploy_status: deploy status
:type deploy_status: :any:`DomainRequestStatus`
:return: result of REST call
:rtype: bool
"""
try:
vnfm_config = CONFIG.get_vnfm_config()
log.debug("Acquired external component config: %s" % vnfm_config)
rest_adapter = UnifyRESTAdapter(url=vnfm_config.get('url'),
prefix=vnfm_config.get('prefix'),
domain_name="VNFM")
# Skip removing domain name from ids
rest_adapter.converter.disable_unique_bb_id()
if 'timeout' in vnfm_config:
log.debug("Set explicit timeout: %s" % vnfm_config['timeout'])
rest_adapter.CONNECTION_TIMEOUT = vnfm_config['timeout']
log.debug("Convert deploy request to Virtualizer...")
virtualizer = rest_adapter.converter.dump_to_Virtualizer(nffg=nffg)
virtualizer.id.set_value("DoV")
status = rest_adapter.edit_config(data=virtualizer,
diff=vnfm_config.get('diff', False))
if status is None:
log.error("External VNFM component call was unsuccessful!")
return False
else:
timeout = vnfm_config.get('timeout', 30)
log.debug("Using timeout for external VNFM: %ss!" % timeout)
self.__vnfm_timer = Timer(timeout,
self._vnfm_timeout_expired,
kw=dict(deploy_id=deploy_status.id,
timeout=timeout),
started=True)
return True
except:
log.error("Something went wrong during external VNFM component call!")
def _vnfm_timeout_expired (self, deploy_id, timeout):
"""
Handle expired timeout of external VNFM call and raise event.
:param deploy_id: deploy status ID
:type deploy_id: str or int
:param timeout: timeout value in sec
:type timeout: int
:return: None
"""
log.warning("External VNFM timeout: %ss has expired!" % timeout)
self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,
id=deploy_id,
result=InstallationFinishedEvent.DEPLOY_ERROR)
def cancel_vnfm_timer (self):
"""
Cancel timer defined for external VNFM call.
:return: None
"""
if self.__vnfm_timer:
self.__vnfm_timer.cancel()
def __perform_internal_mgr_update (self, mapped_nffg, domain):
"""
Update DoV state if ESCAPE is a local Domain Orchestrator.
:param mapped_nffg: mapped NFFG
:type mapped_nffg: :class:`NFFG`
:param domain: domain name
:type domain: str
:return: None
"""
# If the internalDM is the only initiated mgr, we can override the
# whole DoV
if mapped_nffg.is_SBB():
# If the request was a cleanup request, we can simply clean the DOV
if mapped_nffg.is_bare():
log.debug("Detected cleanup topology (no NF/Flowrule/SG_hop)! "
"Clean DoV...")
self.DoVManager.clean_domain(domain=domain)
self.status_mgr.get_status(mapped_nffg.id).set_domain_ok(domain)
# If the reset contains some VNF, cannot clean or override
else:
log.warning(
"Detected SingleBiSBiS topology! Local domain has been already "
"cleared, skip DoV update...")
# If the the topology was a GLOBAL view
elif not mapped_nffg.is_virtualized():
if self.DoVManager.status_updates:
# In case of status updates, the DOV update has been done
# In role of Local Orchestrator each element is up and running
# update DoV with status RUNNING
if mapped_nffg.is_bare():
log.debug("Detected cleanup topology! "
"No need for status update...")
else:
log.debug("Detected new deployment!")
self.DoVManager.update_global_view_status(status=NFFG.STATUS_RUN)
self.status_mgr.get_status(mapped_nffg.id).set_domain_ok(domain)
else:
# Override the whole DoV by default
self.DoVManager.set_global_view(nffg=mapped_nffg)
self.status_mgr.get_status(mapped_nffg.id).set_domain_ok(domain)
else:
log.warning("Detected virtualized Infrastructure node in mapped NFFG!"
" Skip DoV update...")
def __do_rollback (self, status, previous_state):
"""
Initiate and perform the rollback feature.
:param status: deploy status object
:type status: :class:`DomainRequestStatus`
:param previous_state: previous state stored before deploy
:type previous_state: :class:`NFFG`
:return: None
"""
if not CONFIG.rollback_on_failure():
return
log.info("Rollback mode is enabled! Resetting previous state....")
status.set_mapping_result(data=previous_state)
log.debug("Current status: %s" % status)
for domain in status.domains:
domain_mgr = self.domains.get_component_by_domain(domain_name=domain)
if domain_mgr is None:
log.error("DomainManager for domain: %s is not found!" % domain)
continue
if isinstance(domain_mgr, UnifyDomainManager):
ds = status.get_domain_status(domain=domain)
# Skip rollback if the domain skipped by rollback interrupt
if ds != status.INITIALIZED:
result = domain_mgr.rollback_install(request_id=status.id)
if not result:
log.debug("RESET request has been failed!")
status.set_domain_failed(domain=domain)
continue
if isinstance(domain_mgr,
UnifyDomainManager) and domain_mgr.callback_manager:
status.set_domain_waiting(domain=domain)
elif isinstance(domain_mgr,
AbstractRemoteDomainManager) and domain_mgr.polling:
log.debug("Polling in domain: %s is enabled! "
"Set rollback status to RESET" % domain)
status.set_domain_reset(domain=domain)
else:
status.set_domain_reset(domain=domain)
if not CONFIG.one_step_update():
log.debug("Extract domain state from previous state...")
reset_state = NFFGToolBox.extract_domain(domain=domain,
nffg=previous_state)
self.DoVManager.update_domain(domain=domain,
nffg=reset_state)
else:
log.debug("Domain: %s is not affected. Skip rollback..." % domain)
else:
log.warning("%s does not support rollback! Skip rollback step...")
log.debug("Installation status: %s" % status)
if status.reset and CONFIG.one_step_update():
log.debug("One-step-update is enabled. Restore DoV state now...")
self.DoVManager.set_global_view(nffg=previous_state)
log.info("Rollback process has been finished!")
def _handle_DomainChangedEvent (self, event):
"""
Handle DomainChangedEvents, dispatch event according to the cause to
store and enforce changes into DoV.
:param event: event object
:type event: :class:`DomainChangedEvent`
:return: None
"""
if isinstance(event.source, AbstractDomainManager) \
and event.source.IS_EXTERNAL_MANAGER:
log.debug("Received DomainChanged event from ExternalDomainManager with "
"cause: %s! Skip implicit domain update from domain: %s" %
(DomainChangedEvent.TYPE.reversed[event.cause], event.domain))
# Handle external domains
return self._manage_external_domain_changes(event)
log.debug("Received DomainChange event from domain: %s, cause: %s"
% (event.domain, DomainChangedEvent.TYPE.reversed[event.cause]))
# If new domain detected
if event.cause == DomainChangedEvent.TYPE.DOMAIN_UP:
self.DoVManager.add_domain(domain=event.domain,
nffg=event.data)
# If domain has got down
elif event.cause == DomainChangedEvent.TYPE.DOMAIN_DOWN:
self.DoVManager.remove_domain(domain=event.domain)
# If domain has changed
elif event.cause == DomainChangedEvent.TYPE.DOMAIN_CHANGED:
if isinstance(event.data, NFFG):
log.log(VERBOSE, "Changed topology:\n%s" % event.data.dump())
self.DoVManager.update_domain(domain=event.domain,
nffg=event.data)
# Handle install status in case the DomainManager is polling the domain
if isinstance(event.source,
AbstractRemoteDomainManager) and not event.source.polling:
return
deploy_status = self.status_mgr.get_last_status()
if deploy_status:
if deploy_status.get_domain_status(event.domain) == deploy_status.OK:
log.debug("Domain: %s is already set OK. "
"Skip overall status check..." % event.domain)
return
if isinstance(event.source,
UnifyDomainManager) and event.source.callback_manager:
log.debug("Callback is enabled for domain: %s! "
"Skip overall status check..." % event.domain)
return
deploy_status.set_domain_ok(event.domain)
log.debug("Installation status: %s" % deploy_status)
if not deploy_status.still_pending:
if deploy_status.success:
log.info("All installation process has been finished for request:"
" %s! Result: %s" % (deploy_status.id,
deploy_status.status))
else:
log.error("All installation process has been finished for request: "
"%s! Result: %s" % (deploy_status.id,
deploy_status.status))
if CONFIG.one_step_update():
log.warning("One-step-update is enabled with domain polling! "
"Skip update...")
elif deploy_status.failed and CONFIG.rollback_on_failure():
self.__do_rollback(status=deploy_status,
previous_state=self.DoVManager.get_backup_state())
result = InstallationFinishedEvent.get_result_from_status(
deploy_status)
log.info("Overall installation result: %s" % result)
self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,
id=deploy_status.id,
result=result)
else:
log.debug("No service under deployment: deploy-status is missing!")
def _handle_EditConfigHookEvent (self, event):
"""
Handle event raised by received callback of a standard edit-config request.
:param event: raised event
:type event: :class:`EditConfigHookEvent`
:return: None
"""
log.debug("Received %s event..." % event.__class__.__name__)
request_id = event.callback.request_id
deploy_status = self.status_mgr.get_status(id=request_id)
if event.was_error():
log.debug("Update failed status for service request: %s..." %
request_id)
deploy_status.set_domain_failed(domain=event.domain)
else:
log.debug("Update success status for service request: %s..." % request_id)
deploy_status.set_domain_ok(domain=event.domain)
if isinstance(event.callback.data, NFFG):
log.log(VERBOSE, "Changed topology:\n%s" % event.callback.data.dump())
domain_mgr = self.domains.get_component_by_domain(event.domain)
if domain_mgr is None:
log.error("DomainManager for domain: %s is not found!" % event.domain)
return
if isinstance(domain_mgr, UnifyDomainManager) and domain_mgr.polling:
log.debug("Polling in domain: %s is enabled! Skip explicit update..."
% event.domain)
domain_mgr.update_topology_cache()
if CONFIG.one_step_update():
log.debug("One-step-update is enabled. Skip explicit domain update!")
else:
self.DoVManager.update_domain(domain=event.domain,
nffg=event.callback.data)
log.debug("Installation status: %s" % deploy_status)
if not deploy_status.still_pending:
if deploy_status.success:
log.info("All installation process has been finished for request: %s! "
"Result: %s" % (deploy_status.id, deploy_status.status))
if CONFIG.one_step_update():
log.info("One-step-update is enabled. Update DoV now...")
self.DoVManager.set_global_view(nffg=deploy_status.data)
elif deploy_status.failed:
log.error("All installation process has been finished for request: %s! "
"Result: %s" % (deploy_status.id, deploy_status.status))
if CONFIG.one_step_update():
log.warning("One-step-update is enabled. "
"Skip update due to failed request...")
if CONFIG.rollback_on_failure():
self.__do_rollback(status=deploy_status,
previous_state=self.DoVManager.get_backup_state())
result = InstallationFinishedEvent.get_result_from_status(deploy_status)
log.info("Overall installation result: %s" % result)
# Rollback set back the domains to WAITING status
if not deploy_status.still_pending:
is_fail = InstallationFinishedEvent.is_error(result)
self._layer_API._process_mapping_result(nffg_id=request_id,
fail=is_fail)
self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,
id=request_id,
result=result)
else:
log.debug("Installation process is still pending! Waiting for results...")
def _handle_ResetHookEvent (self, event):
"""
Handle event raised by received callback of a rollback request.
:param event: raised event
:type event: :class:`ResetHookEvent`
:return: None
"""
log.debug("Received %s event..." % event.__class__.__name__)
request_id = event.callback.request_id
deploy_status = self.status_mgr.get_status(id=request_id)
if event.was_error():
log.error("ROLLBACK request: %s has been failed!" % request_id)
deploy_status.set_domain_reset_failed(domain=event.domain)
else:
log.debug("Update success status for ROLLBACK request: %s..."
% request_id)
deploy_status.set_domain_reset(domain=event.domain)
domain_mgr = self.domains.get_component_by_domain(event.domain)
if isinstance(domain_mgr, UnifyDomainManager) and domain_mgr.polling:
log.debug("Polling in domain: %s is enabled! Skip explicit update..."
% event.domain)
domain_mgr.update_topology_cache()
if CONFIG.one_step_update():
log.debug("One-step-update is enabled. Skip explicit domain update!")
else:
log.debug("Extract domain state from previous state...")
previous_state = self.DoVManager.get_backup_state()
reset_state = NFFGToolBox.extract_domain(domain=event.domain,
nffg=previous_state)
self.DoVManager.update_domain(domain=event.domain,
nffg=reset_state)
log.debug("Rollback status: %s" % deploy_status)
if not deploy_status.still_pending:
if deploy_status.reset:
log.info("All ROLLBACK process has been finished! Result: %s" %
deploy_status.status)
if CONFIG.one_step_update():
log.debug("One-step-update is enabled. Restore DoV state now...")
backup = self.DoVManager.get_backup_state()
self.DoVManager.set_global_view(nffg=backup)
elif deploy_status.failed:
log.error("All ROLLBACK process has been finished! Result: %s" %
deploy_status.status)
if CONFIG.one_step_update():
log.warning("One-step-update is enabled. "
"Skip restore state due to failed request...")
result = InstallationFinishedEvent.get_result_from_status(deploy_status)
log.info("Overall installation result: %s" % result)
self._layer_API.raiseEventNoErrors(InstallationFinishedEvent,
id=request_id,
result=result)
def _handle_InfoHookEvent (self, event):
"""
Handle event raised by received callback of an Info request.
:param event: raised event
:type event: :class:`InfoHookEvent`
:return: None
"""
log.debug("Received %s event..." % event.__class__.__name__)
request_id = event.callback.request_id
req_status = self.status_mgr.get_status(id=request_id)
original_info, binding = req_status.data
log.log(VERBOSE, "Original Info:\n%s" % original_info.xml())
if event.was_error():
log.warning("Update failed status for info request: %s..." % request_id)
req_status.set_domain_failed(domain=event.domain)
else:
log.debug("Update success status for info request: %s..." % request_id)
req_status.set_domain_ok(domain=event.domain)
# Update Info XML with the received callback body
try:
log.debug("Parsing received callback data...")
body = event.callback.body if event.callback.body else ""
new_info = Info.parse_from_text(body)
log.log(VERBOSE, "Received data:\n%s" % new_info.xml())
log.debug("Update collected info with parsed data...")
log.debug("Merging received data...")
original_info.merge(new_info)
log.log(VERBOSE, "Updated Info data:\n%s" % original_info.xml())
except Exception:
log.exception("Got error while processing Info data!")
req_status.set_domain_failed(domain=event.domain)
log.debug("Info request status: %s" % req_status)
if not req_status.still_pending:
log.info("All info processes have been finished!")
self.__reset_node_ids(info=original_info, binding=binding)
result = InfoRequestFinishedEvent.get_result_from_status(req_status)
log.debug("Overall info result: %s" % result)
self._layer_API.raiseEventNoErrors(InfoRequestFinishedEvent,
result=result,
status=req_status)
def collect_domain_urls (self, mapping):
"""
Extend the given mapping info structure with related domain URLs.
:param mapping: collected mapping info
:type mapping: dict
:return: updated mapping info structure
:rtype: dict
"""
for m in mapping:
try:
domain = m['bisbis']['domain']
except KeyError:
log.error("Missing domain from mapping:\n%s" % m)
continue
url = self.get_domain_url(domain=domain)
if url:
log.debug("Found URL: %s for domain: %s" % (url, domain))
else:
log.error("URL is missing from domain: %s!" % domain)
url = "N/A"
m['bisbis']['url'] = url
return mapping
def get_domain_url (self, domain):
"""
Return the configured domain URL based on given `domain` name.
:param domain: domain name
:type domain: str
:return: URL
:rtype: str
"""
mgr = self.domains.get_component_by_domain(domain_name=domain)
if not mgr:
log.error("Domain Manager for domain: %s is not found!" % domain)
return
elif not isinstance(mgr, AbstractRemoteDomainManager):
log.warning("Domain Manager for domain %s is not a remote domain manager!"
% domain)
return
else:
return mgr.get_domain_url()
def __resolve_nodes_in_info (self, info):
"""
Resolve the node path in given `info` structure using the full topology
view. Return with the collected path binding in reverse ordered way.
:param info: info request structure
:type info: :class:`Info`
:return: reverse ordered path binding
:rtype: dict
"""
log.debug("Resolve NF paths...")
reverse_binding = {}
dov = self.DoVManager.dov.get_resource_info()
for attr in (getattr(info, e) for e in info._sorted_children):
rewrite = []
for element in attr:
if hasattr(element, "object"):
old_path = element.object.get_value()
bb, nf = get_bb_nf_from_path(path=old_path)
new_bb = [node.id for node in dov.infra_neighbors(node_id=nf)]
if len(new_bb) != 1:
log.warning("Original BiSBiS for NF: %s was not found "
"in neighbours: %s" % (nf, new_bb))
continue
sep = NFFGConverter.UNIQUE_ID_DELIMITER
new_bb = str(new_bb.pop()).rsplit(sep, 1)[0]
reverse_binding[new_bb] = bb
old_bb, new_bb = "/node[id=%s]" % bb, "/node[id=%s]" % new_bb
log.debug("Find BiSBiS node remapping: %s --> %s" % (old_bb, new_bb))
new_path = str(old_path).replace(old_bb, new_bb)
rewrite.append((element, new_path))
# Tricky override because object is key in yang -> del and re-add
for e, p in rewrite:
attr.remove(e)
e.object.set_value(p)
attr.add(e)
log.debug("Overrided new path for NF --> %s" % e.object.get_value())
return reverse_binding
@staticmethod
def __reset_node_ids (info, binding):
"""
Reset node path in given `info` strucure with a previously collected
reverse path binding structure.
:param info: received Info object
:type info: :class:`Info`
:param binding: reversed node path bindings
:type binding: dict
:return: updated info object
:rtype: :class:`Info`
"""
log.debug("Reset NF paths...")
for attr in (getattr(info, e) for e in info._sorted_children):
rewrite = []
for element in attr:
if hasattr(element, "object"):
old_path = element.object.get_value()
bb, nf = get_bb_nf_from_path(path=old_path)
if bb not in binding:
log.warning("Missing binding for node: %s" % bb)
continue
new_bb = binding.get(bb)
log.debug("Find BiSBiS node remapping: %s --> %s" % (bb, new_bb))
old_bb, new_bb = "/node[id=%s]" % bb, "/node[id=%s]" % new_bb
new_path = str(old_path).replace(old_bb, new_bb)
rewrite.append((element, new_path))
# Tricky override because object is key in yang -> del and re-add
for e, p in rewrite:
attr.remove(e)
e.object.set_value(p)
attr.add(e)
log.debug("Overrided new path for NF --> %s" % e.object.get_value())
log.log(VERBOSE, info.xml())
return info
def __split_info_request_by_domain (self, info):
"""
Split the given `info` structure based on domains.
:param info: received Info object
:type info: :class:`Info`
:return: splitted info dict keyed by domain names
:rtype: dict
"""
dov = self.DoVManager.dov.get_resource_info()
vnfs = get_nfs_from_info(info=info)
if not vnfs:
log.debug("No NF has been detected from info request!")
return {}
splitted = NFFGToolBox.split_nfs_by_domain(nffg=dov, nfs=vnfs, log=log)
for domain, nfs in splitted.items():
log.debug("Splitted domain: %s --> %s" % (domain, nfs))
info_part = strip_info_by_nfs(info, nfs)
log.log(VERBOSE, "Splitted info part:\n%s" % info_part.xml())
splitted[domain] = info_part
return splitted
def propagate_info_requests (self, id, info):
"""
Process the received Info request and propagate the relevant part to the
domain orchestrators.
:param id: Info request ID
:type id: str or int
:param info: received Info object
:type info: :class:`Info`
:return: request status
:rtype: :class:`DomainRequestStatus`
"""
binding = self.__resolve_nodes_in_info(info=info)
splitted = self.__split_info_request_by_domain(info=info)
status = self.status_mgr.register_request(id=id,
domains=splitted.keys(),
data=(info, binding))
if not splitted:
log.warning("No valid request has been remained after splitting!")
return status
for domain, info_part in splitted.iteritems():
log.debug("Search DomainManager for domain: %s" % domain)
# Get Domain Manager
domain_mgr = self.domains.get_component_by_domain(domain_name=domain)
if domain_mgr is None:
log.warning("No DomainManager has been initialized for domain: %s! "
"Skip install domain part..." % domain)
status.set_domain_failed(domain=domain)
continue
if not isinstance(domain_mgr, UnifyDomainManager):
log.warning("Domain manager: %s does not support info request! Skip...")
status.set_domain_failed(domain=domain)
continue
log.log(VERBOSE, "Splitted info request: %s part:\n%s"
% (domain, info_part.xml()))
success = domain_mgr.request_info_from_domain(req_id=id,
info_part=info_part)
if not success:
log.warning("Info request: %s in domain: %s was unsuccessful!"
% (status.id, domain))
status.set_domain_failed(domain=domain)
if status.success:
log.info("All 'info' sub-requests were successful!")
elif status.failed:
log.error("Top Info request: %s was unsuccessful!" % status.id)
elif status.still_pending:
log.info("All 'info' sub-requests have been finished! "
"Waiting for results...")
log.debug("Info request status: %s" % status)
return status
def _handle_GetLocalDomainViewEvent (self, event):
"""
Handle GetLocalDomainViewEvent and set the domain view for the external
DomainManager.
:param event: event object
:type event: :any:`DomainChangedEvent`
:return: None
"""
# TODO implement
pass
def remove_external_domain_managers (self, domain):
"""
Shutdown adn remove ExternalDomainManager.
:param domain: domain name
:type domain: str
:return: None
"""
log.warning("Connection has been lost with external domain client! "
"Shutdown successor DomainManagers for domain: %s..." %
domain)
# Get removable DomainManager names
ext_mgrs = [name for name, mgr in self.domains
if '@' in mgr.domain_name and
mgr.domain_name.endswith(domain)]
# Remove DomainManagers one by one
for mgr_name in ext_mgrs:
log.debug("Found DomainManager: %s for ExternalDomainManager: %s" %
(mgr_name, domain))
self.domains.remove_mgr(name=mgr_name)
return
def get_external_domain_ids (self, domain, topo_nffg):
"""
Get the IDs of nodes from the detected external topology.
:param domain: domain name
:type domain: str
:param topo_nffg: topology description
:type topo_nffg: :class:`NFFG`
:return: external domain IDs
:rtype: set
"""
domain_mgr = self.domains.get_component_by_domain(domain_name=domain)
new_ids = {infra.id for infra in topo_nffg.infras}
if domain_mgr is None:
log.error("No manager has been found for domain %s in %s"
% (domain, self.domains.domains))
return new_ids
try:
if new_ids:
# Remove oneself from domains
new_ids.remove(domain_mgr.bgp_domain_id)
except KeyError:
log.warning("Detected domains does not include own BGP ID: %s" %
domain_mgr.bgp_domain_id)
return new_ids
def _manage_external_domain_changes (self, event):
"""
Handle DomainChangedEvents came from an :any:`ExternalDomainManager`.
:param event: event object
:type event: :any:`DomainChangedEvent`
:return: None
"""
# BGP-LS client is up
if event.cause == DomainChangedEvent.TYPE.DOMAIN_UP:
log.debug("Detect remote domains from external DomainManager...")
# New topology received from BGP-LS client
elif event.cause == DomainChangedEvent.TYPE.DOMAIN_CHANGED:
log.debug("Detect domain changes from external DomainManager...")
# BGP-LS client is down
elif event.cause == DomainChangedEvent.TYPE.DOMAIN_DOWN:
return self.remove_external_domain_managers(domain=event.domain)
topo_nffg = event.data
if topo_nffg is None:
log.warning("Topology description is missing!")
return
# Get domain Ids
new_ids = self.get_external_domain_ids(domain=event.domain,
topo_nffg=topo_nffg)
# Get the main ExternalDomainManager
domain_mgr = self.domains.get_component_by_domain(domain_name=event.domain)
# Check lost domain
for id in (domain_mgr.managed_domain_ids - new_ids):
log.info("Detected disconnected domain from external DomainManager! "
"BGP id: %s" % id)
MessageDumper().dump_to_file(data=topo_nffg.dump(),
unique="%s-changed" % event.domain)
# Remove lost domain
if id in domain_mgr.managed_domain_ids:
domain_mgr.managed_domain_ids.remove(id)
else:
log.warning("Lost domain is missing from managed domains: %s!" %
domain_mgr.managed_domain_ids)
# Get DomainManager name by domain name
ext_domain_name = "%s@%s" % (id, domain_mgr.domain_name)
ext_mgr_name = self.domains.get_component_name_by_domain(
domain_name=ext_domain_name)
# Stop DomainManager and remove object from register
self.domains.stop_mgr(name=ext_mgr_name)
# Check new domains
for id in (new_ids - domain_mgr.managed_domain_ids):
orchestrator_url = topo_nffg[id].metadata.get(self.EXTERNAL_MDO_META_NAME)
if orchestrator_url is None:
log.warning("MdO URL is not found in the Node: %s with the name: %s! "
"Skip initialization..." % (
id, self.EXTERNAL_MDO_META_NAME))
return
log.info("New domain detected from external DomainManager! "
"BGP id: %s, Orchestrator URL: %s" % (id, orchestrator_url))
MessageDumper().dump_to_file(data=topo_nffg.dump(),
unique="%s-changed" % event.domain)
# Track new domain
domain_mgr.managed_domain_ids.add(id)
# Get RemoteDM config
mgr_cfg = CONFIG.get_component_params(component=domain_mgr.prototype)
if mgr_cfg is None:
log.warning("DomainManager: %s configurations is not found! "
"Skip initialization...")
return
# Set domain name
mgr_cfg['domain_name'] = "%s%s%s" % (id,
self.EXTERNAL_DOMAIN_NAME_JOINER,
domain_mgr.domain_name)
log.debug("Generated domain name: %s" % mgr_cfg['domain_name'])
# Set URL and prefix
try:
url = urlparse.urlsplit(orchestrator_url)
mgr_cfg['adapters']['REMOTE']['url'] = "http://%s" % url.netloc
mgr_cfg['adapters']['REMOTE']['prefix'] = url.path
except KeyError as e:
log.warning("Missing required config entry %s from "
"RemoteDomainManager: %s" % (e, domain_mgr.prototype))
log.log(VERBOSE, "Used configuration:\n%s" % pprint.pformat(mgr_cfg))
log.info("Initiate DomainManager for detected external domain: %s, "
"URL: %s" % (mgr_cfg['domain_name'], orchestrator_url))
# Initialize DomainManager for detected domain
ext_mgr = self.domains.load_component(component_name=domain_mgr.prototype,
params=mgr_cfg)
log.debug("Use domain name: %s for external DomainManager name!" %
ext_mgr.domain_name)
# Start the DomainManager
self.domains.register_mgr(name=ext_mgr.domain_name,
mgr=ext_mgr,
autostart=True)
class DomainRequestStatus(object):
"""
Container class for storing related information about a service request.
"""
INITIALIZED = "INITIALIZED"
OK = "OK"
WAITING = "WAITING"
FAILED = "FAILED"
RESET = "RESET"
RESET_FAILED = "RESET_FAILED"
def __init__ (self, id, domains, data=None):
"""
Init.
:param id: request ID
:type id: str or int
:param domains: domains affected by the request
:type domains: set
:param data: service request under deploy (optional)
:type data: :class:`NFFG`
"""
self.__id = id
self.__statuses = {}.fromkeys(domains, self.INITIALIZED)
self.__standby = False
self.__data = data
@property
def id (self):
"""
Return service ID.
:return: service ID
:rtype: str or int
"""
return self.__id
@property
def data (self):
"""
Return data.
:return: data
:rtype: :class:`NFFG` or
"""
return self.__data
def set_mapping_result (self, data):
"""
Overwrite the stored service request under deploy.
:param data: new service request
:type data: :class:`NFFG`
:return: None
"""
log.debug("Set mapping result: %s for service request: %s"
% (data.id, self.__id))
self.__data = data
def reset_status (self, data=None):
"""
Reset the object state and use the given data as service request.
:param data: optional service request
:type data: :class:`NFFG`
:return: None
"""
log.debug("Clear domain status...")
for domain in self.__statuses:
self.__statuses[domain] = self.INITIALIZED
self.set_mapping_result(data=data)
self.reset_standby()
def set_standby (self):
"""
Set deploy status object in `standby` state.
:return: None
"""
log.debug("Put request: %s in standby mode" % self.__id)
self.__standby = True
@property
def standby (self):
"""
Return standby state.
:return: standby
:rtype: bool
"""
return self.__standby
def set_active (self):
"""
Set deploy status object in `active` state.
:return: None
"""
if self.__standby:
log.debug("Continue request: %s " % self.__id)
self.__standby = False
def reset_standby (self):
"""
Reset standby state to default value.
:return: None
"""
if self.__standby:
log.debug("Reset request to active mode")
self.__standby = False
def clear (self):
"""
Clear tracked domain statuses.
:return: None
"""
self.__statuses.clear()
@property
def still_pending (self):
"""
:return: Return True if the deployment is still pending
:rtype: bool
"""
if self.__statuses:
return any(map(lambda s: s in (self.INITIALIZED, self.WAITING),
self.__statuses.itervalues()))
else:
return False
@property
def success (self):
"""
:return: Return True if the deployment was successful
:rtype: bool
"""
if self.__statuses:
return all(map(lambda s: s == self.OK,
self.__statuses.itervalues()))
else:
return False
@property
def reset (self):
"""
:return: Return True if the service was successfuly reset
:rtype: bool
"""
if self.__statuses:
return all(map(lambda s: s == self.RESET,
self.__statuses.itervalues()))
else:
return False
@property
def failed (self):
"""
:return: Return True if the deployment was failed
:rtype: bool
"""
return any(map(lambda s: s == self.FAILED,
self.__statuses.itervalues()))
@property
def reset_failed (self):
"""
:return: Return True if the service was unsuccessfully reset
:rtype: bool
"""
return any(map(lambda s: s == self.RESET_FAILED,
self.__statuses.itervalues()))
@property
def status (self):
"""
Return the overall deploy status.
:return: deploy status
:rtype: str
"""
for s in self.statuses:
if s == self.FAILED:
return self.FAILED
elif s == self.RESET_FAILED:
return self.RESET_FAILED
elif s == self.WAITING:
return self.WAITING
if self.reset:
return self.RESET
elif self.success:
return self.OK
else:
return self.INITIALIZED
@property
def domains (self):
"""
:return: Tracked domains names
:rtype: tuple
"""
return self.__statuses.keys()
@property
def statuses (self):
"""
:return: Tracked domain statuses
:rtype: tuple
"""
return self.__statuses.values()
def __str__ (self):
return "%s(id=%s) => %s" % (self.__class__.__name__,
self.__id, str(self.__statuses))
def get_domain_status (self, domain):
"""
Return with the given domain deploy status.
:param domain: domain name
:type domain: str
:return: deploy status
:rtype: str
"""
return self.__statuses.get(domain)
def set_domain (self, domain, status):
"""
Set the given domain wioht the given status value.
:param domain: domain name
:type domain: str
:param status: deploy status
:type status: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
if domain not in self.__statuses:
raise RuntimeError("Updated domain: %s is not registered!" % domain)
self.__statuses[domain] = status
if status in (self.OK, self.FAILED, self.RESET):
stats.add_measurement_end_entry(type=stats.TYPE_DEPLOY_DOMAIN,
info="%s-->%s" % (domain, status))
return self
def set_domain_ok (self, domain):
"""
Set successful domain status for given domain.
:param domain: domain name
:type domain: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
log.debug("Set install status: %s for domain: %s" % (self.OK, domain))
return self.set_domain(domain=domain, status=self.OK)
def set_domain_waiting (self, domain):
"""
Set pending domain status for given domain.
:param domain: domain name
:type domain: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
log.debug("Set install status: %s for domain: %s" % (self.WAITING, domain))
return self.set_domain(domain=domain, status=self.WAITING)
def set_domain_failed (self, domain):
"""
Set failed domain status for given domain.
:param domain: domain name
:type domain: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
log.debug("Set install status: %s for domain: %s" % (self.FAILED, domain))
return self.set_domain(domain=domain, status=self.FAILED)
def set_domain_reset (self, domain):
"""
Set reset domain status for given domain.
:param domain: domain name
:type domain: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
log.debug("Set install status: %s for domain: %s" % (self.RESET, domain))
return self.set_domain(domain=domain, status=self.RESET)
def set_domain_reset_failed (self, domain):
"""
Set faield reset domain status for given domain.
:param domain: domain name
:type domain: str
:return: domain status object
:rtype: :class:`DomainRequestStatus`
"""
log.debug("Set install status: %s for domain: %s" % (self.RESET_FAILED,
domain))
return self.set_domain(domain=domain, status=self.RESET_FAILED)
class DomainRequestManager(object):
"""
Manager class to register service requests for managing deployment.
"""
def __init__ (self):
"""
Init.
"""
self._services = []
self._last = None
def register_request (self, id, domains, data=None):
"""
Register a service request.
:param id: request ID
:type id: str or int
:param domains: domains affected by the request
:type domains: set
:param data: service request under deploy (optional)
:type data: :class:`NFFG`
:return: created deploy status object
:rtype: :class:`DomainRequestStatus`
"""
for s in self._services:
if s.id == id:
log.warning("Detected already registered service request: %s in %s! "
"Reset deploy status..." % (id, self.__class__.__name__))
s.reset_status(data=data)
self._last = s
return s
else:
status = DomainRequestStatus(id=id, domains=domains, data=data)
self._services.append(status)
self._last = status
log.info("Request with id: %s is registered for status management!" % id)
log.debug("Status: %s" % status)
return status
def get_last_status (self):
"""
:return: Last registered deploy status object
:rtype: :class:`DomainRequestStatus`
"""
return self._last
def register_service (self, nffg):
"""
Wrapper function to register a service request using the request object.
:param nffg: service request
:type nffg: :class:`NFFG`
:return: created deploy status object
:rtype: :class:`DomainRequestStatus`
"""
domains = NFFGToolBox.detect_domains(nffg=nffg)
return self.register_request(id=nffg.id, domains=domains, data=nffg)
def get_status (self, id):
"""
Return the deploy status object of the given ID.
:param id: service request ID
:type id: str or int
:return: created deploy status object
:rtype: :class:`DomainRequestStatus`
"""
for status in self._services:
if status.id == id:
return status
else:
log.error("Service status for service: %s is missing!" % id)
class GlobalResourceManager(object):
"""
Handle and store the Global Resources view as known as the DoV.
"""
def __init__ (self):
"""
Init.
"""
super(GlobalResourceManager, self).__init__()
log.debug("Init DomainResourceManager")
self.__dov = DomainVirtualizer(self) # Domain Virtualizer
self.__tracked_domains = set() # Cache for detected and stored domains
self.status_updates = CONFIG.use_status_based_update()
self.remerge_strategy = CONFIG.use_remerge_update_strategy()
self.__backup = None
@property
def dov (self):
"""
Getter for :class:`DomainVirtualizer`.
:return: global infrastructure view as the Domain Virtualizer
:rtype: :any:`DomainVirtualizer`
"""
return self.__dov
@property
def tracked (self):
"""
Getter for tuple of detected domains.
:return: detected domains
:rtype: tuple
"""
return tuple(self.tracked)
def backup_dov_state (self):
"""
Backup current state of DoV.
:return: None
"""
log.debug("Backup current DoV state...")
self.__backup = self.dov.get_resource_info()
self.__backup.id = (self.__backup.id + "-backup")
def get_backup_state (self):
"""
Return with the stored beckup.
:return: stashed DoV
:rtype: :class:`NFFG`
"""
log.debug("Acquire previous DoV state...")
return self.__backup
def set_global_view (self, nffg):
"""
Replace the global view with the given topology.
:param nffg: new global topology
:type nffg: :class:`NFFG`
:return: None
"""
log.debug("Update the whole Global view (DoV) with the NFFG: %s..." % nffg)
self.__dov.update_full_global_view(nffg=nffg)
self.__tracked_domains.clear()
self.__tracked_domains.update(NFFGToolBox.detect_domains(nffg))
notify_remote_visualizer(data=self.__dov.get_resource_info(),
unique_id="DOV",
params={"event": "datastore"})
def update_global_view_status (self, status):
"""
Update the status of the elements in DoV with the given status.
:param status: status
:type status: str
:return: None
"""
log.debug("Update Global view (DoV) mapping status with: %s" % status)
NFFGToolBox.update_status_info(nffg=self.__dov.get_resource_info(),
status=status, log=log)
def rewrite_global_view_with_status (self, nffg):
"""
Replace the global view with the given topology and add status for the
elements.
:param nffg: new global topology
:type nffg: :class:`NFFG`
:return: None
"""
if not nffg.is_infrastructure():
log.error("New topology is not contains no infrastructure node!"
"Skip DoV update...")
return
if nffg.is_virtualized():
log.debug("Update NFFG contains virtualized node(s)!")
if self.__dov.get_resource_info().is_virtualized():
log.debug("DoV also contains virtualized node(s)! "
"Enable DoV rewriting!")
else:
log.warning("Detected unexpected virtualized node(s) in update NFFG! "
"Skip DoV update...")
return
log.debug("Migrate status info of deployed elements from DoV...")
NFFGToolBox.update_status_by_dov(nffg=nffg,
dov=self.__dov.get_resource_info(),
log=log)
self.set_global_view(nffg=nffg)
log.log(VERBOSE,
"Updated DoV:\n%s" % self.__dov.get_resource_info().dump())
def add_domain (self, domain, nffg):
"""
Update the global view data with the specific domain info.
:param domain: domain name
:type domain: str
:param nffg: infrastructure info collected from the domain
:type nffg: :class:`NFFG`
:return: None
"""
# If the domain is not tracked
if domain not in self.__tracked_domains:
if nffg:
log.info("Append %s domain to DoV..." % domain)
# If DoV is empty
if not self.__dov.is_empty():
# Merge domain topo into global view
self.__dov.merge_new_domain_into_dov(nffg=nffg)
else:
# No other domain detected, set NFFG as the whole Global view
log.debug(
"DoV is empty! Add new domain: %s as the global view!" % domain)
self.__dov.set_domain_as_global_view(domain=domain, nffg=nffg)
else:
log.warning("Got empty data. Add uninitialized domain...")
# Add detected domain to cached domains
self.__tracked_domains.add(domain)
notify_remote_visualizer(data=self.__dov.get_resource_info(),
unique_id="DOV",
params={"event": "datastore"})
else:
log.error("New domain: %s has already tracked in domains: %s! "
"Abort adding..." % (domain, self.__tracked_domains))
def update_domain (self, domain, nffg):
"""
Update the detected domain in the global view with the given info.
:param domain: domain name
:type domain: str
:param nffg: changed infrastructure info
:type nffg: :class:`NFFG`
:return: None
"""
if domain in self.__tracked_domains:
log.info("Update domain: %s in DoV..." % domain)
if self.status_updates:
log.debug("Update status info for domain: %s in DoV..." % domain)
self.__dov.update_domain_status_in_dov(domain=domain, nffg=nffg)
elif self.remerge_strategy:
log.debug("Using REMERGE strategy for DoV update...")
self.__dov.remerge_domain_in_dov(domain=domain, nffg=nffg)
else:
log.debug("Using UPDATE strategy for DoV update...")
self.__dov.update_domain_in_dov(domain=domain, nffg=nffg)
notify_remote_visualizer(data=self.__dov.get_resource_info(),
unique_id="DOV",
params={"event": "datastore"})
else:
log.error(
"Detected domain: %s is not included in tracked domains: %s! Abort "
"updating..." % (domain, self.__tracked_domains))
def remove_domain (self, domain):
"""
Remove the detected domain from the global view.
:param domain: domain name
:type domain: str
:return: None
"""
if domain in self.__tracked_domains:
log.info("Remove domain: %s from DoV..." % domain)
self.__dov.remove_domain_from_dov(domain=domain)
self.__tracked_domains.remove(domain)
notify_remote_visualizer(data=self.__dov.get_resource_info(),
unique_id="DOV",
params={"event": "datastore"})
else:
log.warning("Removing domain: %s is not included in tracked domains: %s! "
"Skip removing..." % (domain, self.__tracked_domains))
def clean_domain (self, domain):
"""
Clean given domain.
:param domain: domain name
:type domain: str
:return: None
"""
if domain in self.__tracked_domains:
log.info(
"Remove initiated VNFs and flowrules from the domain: %s" % domain)
self.__dov.clean_domain_from_dov(domain=domain)
notify_remote_visualizer(data=self.__dov.get_resource_info(),
unique_id="DOV",
params={"event": "datastore"})
else:
log.error(
"Detected domain: %s is not included in tracked domains: %s! Abort "
"cleaning..." % (domain, self.__tracked_domains)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012-2015, Eucalyptus Systems, Inc.
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import base64
import calendar
import datetime
import email.utils
import hashlib
import hmac
import os
import re
import tempfile
import time
import warnings
import six
import six.moves.urllib_parse as urlparse
from requestbuilder import Arg
from requestbuilder.auth import BaseAuth
from requestbuilder.exceptions import AuthError
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_BASIC = '%Y%m%dT%H%M%SZ'
class HmacKeyAuth(BaseAuth):
'''
Basis for AWS HMAC-based authentication
'''
ARGS = [Arg('-I', '--access-key-id', dest='key_id', metavar='KEY_ID'),
Arg('-S', '--secret-key', dest='secret_key', metavar='KEY'),
Arg('--security-token', dest='security_token', metavar='TOKEN')]
@classmethod
def from_other(cls, other, **kwargs):
kwargs.setdefault('loglevel', other.log.level)
kwargs.setdefault('key_id', other.args.get('key_id'))
kwargs.setdefault('secret_key', other.args.get('secret_key'))
kwargs.setdefault('security_token', other.args.get('security_token'))
kwargs.setdefault('credential_expiration',
other.args.get('credential_expiration'))
new = cls(other.config, **kwargs)
new.configure()
return new
def configure(self):
self.__populate_auth_args()
if not self.args.get('key_id'):
raise AuthError('missing access key ID; please supply one with -I')
if not self.args.get('secret_key'):
raise AuthError('missing secret key; please supply one with -S')
if self.args.get('credential_expiration'):
expiration = None
for fmt in ('%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ'):
try:
expiration = datetime.datetime.strptime(
self.args['credential_expiration'], fmt)
break
except ValueError:
continue
else:
self.log.warn(
'failed to parse credential expiration time '
'\'{0}\'; proceeding without validation'
.format(self.args['credential_expiration']))
if expiration and expiration < datetime.datetime.utcnow():
raise AuthError('credentials have expired')
def configure_from_aws_credential_file(self):
if 'AWS_CREDENTIAL_FILE' in os.environ:
path = os.getenv('AWS_CREDENTIAL_FILE')
path = os.path.expandvars(path)
path = os.path.expanduser(path)
with open(path) as credfile:
for line in credfile:
line = line.split('#', 1)[0]
if '=' in line:
(key, val) = line.split('=', 1)
if (key.strip() == 'AWSAccessKeyId' and
not self.args.get('key_id')):
# There's probably a better way to do this, but it
# seems to work for me. Patches are welcome. :)
self.args['key_id'] = val.strip()
elif (key.strip() == 'AWSSecretKey' and
not self.args.get('secret_key')):
self.args['secret_key'] = val.strip()
return path
def __populate_auth_args(self):
"""
Try to get auth info from each source in turn until one provides
both a key ID and a secret key. After each time a source fails
to provide enough info we wipe self.args out so we don't wind up
mixing info from multiple sources.
"""
# self.args gets highest precedence
if self.args.get('key_id') and not self.args.get('secret_key'):
# __reset_unless_ready will wipe out key_id and result in
# the wrong error message
raise AuthError('missing secret key; please supply one with -S')
if self.args.get('secret_key') and not self.args.get('key_id'):
# If only one is supplied at the command line we should
# immediately blow up
raise AuthError('missing access key ID; please supply one with -I')
if self.__reset_unless_ready():
self.log.debug('using auth info provided directly')
return
# Environment comes next
self.args['key_id'] = (os.getenv('AWS_ACCESS_KEY_ID') or
os.getenv('AWS_ACCESS_KEY'))
self.args['secret_key'] = (os.getenv('AWS_SECRET_ACCESS_KEY') or
os.getenv('AWS_SECRET_KEY'))
self.args['security_token'] = os.getenv('AWS_SECURITY_TOKEN')
self.args['credential_expiration'] = \
os.getenv('AWS_CREDENTIAL_EXPIRATION')
if self.__reset_unless_ready():
self.log.debug('using auth info from environment')
return
# See if an AWS credential file was given in the environment
aws_credfile_path = self.configure_from_aws_credential_file()
if aws_credfile_path and self.__reset_unless_ready():
self.log.debug('using auth info from AWS credential file %s',
aws_credfile_path)
return
# Try the config file
self.args['key_id'] = self.config.get_user_option('key-id')
self.args['secret_key'] = self.config.get_user_option('secret-key',
redact=True)
if self.__reset_unless_ready():
self.log.debug('using auth info from configuration')
return
def __reset_unless_ready(self):
"""
If both an access key ID and a secret key are set in self.args
return True. Otherwise, clear auth info from self.args and
return False.
"""
if self.args.get('key_id') and self.args.get('secret_key'):
return True
for arg in ('key_id', 'secret_key', 'security_token',
'credential_expiration'):
self.args[arg] = None
return False
class HmacV1Auth(HmacKeyAuth):
'''
S3 REST authentication
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
'''
# This list comes from the CanonicalizedResource section of the above page
HASHED_PARAMS = set((
'acl', 'lifecycle', 'location', 'logging', 'notification',
'partNumber', 'policy', 'requestPayment', 'torrent', 'uploadId',
'uploads', 'versionId', 'versioning', 'versions', 'website'))
def apply_to_request(self, req, service):
self._update_request_before_signing(req)
c_headers = self.get_canonicalized_headers(req)
c_resource = self.get_canonicalized_resource(req, service)
to_sign = self._get_string_to_sign(req, c_headers, c_resource)
self.log.debug('string to sign: %s', repr(to_sign))
signature = self.sign_string(to_sign.encode('utf-8'))
self.log.debug('b64-encoded signature: %s', signature)
self._apply_signature(req, signature)
return req
def apply_to_request_params(self, req, service, expiration_datetime):
# This does not implement security tokens.
msg = ('S3RestAuth.apply_to_request_params is deprecated; use '
'requestbuilder.auth.aws.QueryHmacV1Auth instead')
self.log.warn(msg)
warnings.warn(msg, DeprecationWarning)
for param in ('AWSAccessKeyId', 'Expires', 'Signature'):
req.params.pop(param, None)
expiration = calendar.timegm(expiration_datetime.utctimetuple())
delta_t = expiration_datetime - datetime.datetime.utcnow()
delta_t_sec = ((delta_t.microseconds +
(delta_t.seconds + delta_t.days * 24 * 3600) * 10**6)
/ 10**6)
self.log.debug('expiration: %i (%f seconds from now)',
expiration, delta_t_sec)
c_headers = self.get_canonicalized_headers(req)
self.log.debug('canonicalized headers: %s', repr(c_headers))
c_resource = self.get_canonicalized_resource(req, service)
self.log.debug('canonicalized resource: %s', repr(c_resource))
to_sign = '\n'.join((req.method,
req.headers.get('Content-MD5', ''),
req.headers.get('Content-Type', ''),
six.text_type(expiration),
c_headers + c_resource))
self.log.debug('string to sign: %s', repr(to_sign))
signature = self.sign_string(to_sign.encode('utf-8'))
self.log.debug('b64-encoded signature: %s', signature)
req.params['AWSAccessKeyId'] = self.args['key_id']
req.params['Expires'] = six.text_type(expiration)
req.params['Signature'] = signature
if self.args.get('security_token'):
# This is a guess. I have no evidence that this actually works.
req.params['SecurityToken'] = self.args['security_token']
def _update_request_before_signing(self, req):
if not req.headers:
req.headers = {}
req.headers['Date'] = email.utils.formatdate()
req.headers['Host'] = urlparse.urlparse(req.url).netloc
if self.args.get('security_token'):
req.headers['x-amz-security-token'] = self.args['security_token']
req.headers.pop('Signature', None)
def _get_string_to_sign(self, req, c_headers, c_resource):
return '\n'.join((req.method.upper(),
req.headers.get('Content-MD5', ''),
req.headers.get('Content-Type', ''),
req.headers.get('Date'),
c_headers + c_resource))
def _apply_signature(self, req, signature):
req.headers['Authorization'] = 'AWS {0}:{1}'.format(
self.args['key_id'], signature)
def get_canonicalized_resource(self, req, service):
# /bucket/keyname
parsed_req_path = urlparse.urlparse(req.url).path
assert service.endpoint is not None
parsed_svc_path = urlparse.urlparse(service.endpoint).path
# IMPORTANT: this only supports path-style requests
assert parsed_req_path.startswith(parsed_svc_path)
resource = parsed_req_path[len(parsed_svc_path):]
if parsed_svc_path.endswith('/'):
# The leading / got stripped off
resource = '/' + resource
if not resource:
# This resource does not address a bucket
resource = '/'
# Now append sub-resources, a.k.a. query string parameters
if getattr(req, 'params', None):
# A regular Request
params = req.params
else:
# A PreparedRequest
params = _get_params_from_url(req.url)
if params:
subresources = []
for key, val in sorted(params.iteritems()):
if key in self.HASHED_PARAMS:
if val is None:
subresources.append(key)
else:
subresources.append(key + '=' + val)
if subresources:
resource += '?' + '&'.join(subresources)
self.log.debug('canonicalized resource: %s', repr(resource))
return resource
def get_canonicalized_headers(self, req):
headers_dict = {}
for key, val in req.headers.iteritems():
if key.lower().startswith('x-amz-'):
headers_dict.setdefault(key.lower(), [])
headers_dict[key.lower()].append(' '.join(val.split()))
headers_strs = []
for key, vals in sorted(headers_dict.iteritems()):
headers_strs.append('{0}:{1}'.format(key, ','.join(vals)))
if headers_strs:
c_headers = '\n'.join(headers_strs) + '\n'
else:
c_headers = ''
self.log.debug('canonicalized headers: %s', repr(c_headers))
return c_headers
def sign_string(self, to_sign):
req_hmac = hmac.new(self.args['secret_key'], digestmod=hashlib.sha1)
req_hmac.update(to_sign)
return base64.b64encode(req_hmac.digest())
class QueryHmacV1Auth(HmacV1Auth):
DEFAULT_TIMEOUT = 600 # 10 minutes
def _update_request_before_signing(self, req):
timeout = int(self.args.get('timeout')) or self.DEFAULT_TIMEOUT
assert timeout > 0
params = _get_params_from_url(req.url)
params['AWSAccessKeyId'] = self.args['key_id']
params['Expires'] = int(time.time() + timeout)
params.pop('Signature', None)
req.prepare_url(_remove_params_from_url(req.url), params)
def _get_string_to_sign(self, req, c_headers, c_resource):
params = _get_params_from_url(req.url)
return '\n'.join((req.method.upper(),
req.headers.get('Content-MD5', ''),
req.headers.get('Content-Type', ''),
params['Expires'],
c_headers + c_resource))
def _apply_signature(self, req, signature):
req.prepare_url(req.url, {'Signature': signature})
class QueryHmacV2Auth(HmacKeyAuth):
'''
AWS signature version 2
http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
'''
def apply_to_request(self, req, service):
parsed = urlparse.urlparse(req.url)
if req.method == 'POST':
# This is probably going to break when given multipart data.
params = urlparse.parse_qs(req.body or '', keep_blank_values=True)
else:
params = urlparse.parse_qs(parsed.query, keep_blank_values=True)
params = dict((key, vals[0]) for key, vals in params.iteritems())
params['AWSAccessKeyId'] = self.args['key_id']
params['SignatureVersion'] = 2
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.args.get('security_token'):
params['SecurityToken'] = self.args['security_token']
# Needed for retries so old signatures aren't included in to_sign
params.pop('Signature', None)
to_sign = '{method}\n{host}\n{path}\n'.format(
method=req.method, host=parsed.netloc.lower(),
path=(parsed.path or '/'))
quoted_params = []
for key in sorted(params):
val = six.text_type(params[key])
quoted_params.append(urlparse.quote(key, safe='') + '=' +
urlparse.quote(val, safe='-_~'))
query_string = '&'.join(quoted_params)
to_sign += query_string
# Redact passwords
redacted_to_sign = re.sub('assword=[^&]*', 'assword=<redacted>',
to_sign)
self.log.debug('string to sign: %s', repr(redacted_to_sign))
signature = self.sign_string(to_sign)
self.log.debug('b64-encoded signature: %s', signature)
params['Signature'] = signature
if req.method == 'POST':
req.prepare_body(params, {})
else:
req.prepare_url(_remove_params_from_url(req.url), params)
return req
def sign_string(self, to_sign):
req_hmac = hmac.new(self.args['secret_key'], digestmod=hashlib.sha256)
req_hmac.update(to_sign)
return base64.b64encode(req_hmac.digest())
class HmacV4Auth(HmacKeyAuth):
"""
AWS signature version 4
http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
"""
def apply_to_request(self, req, service):
if not service.NAME:
self.log.critical('service class %s must have a NAME attribute '
'to use sigv4', service.__class__.__name__)
raise AuthError('BUG: service class {0} does not have a name'
.format(service.__class__.__name__))
payload_hash = self._hash_payload(req) # large files will be slow here
now = time.time()
date_header = time.strftime(ISO8601_BASIC, time.gmtime(now))
scope = self._build_scope(service, now)
credential = '/'.join((self.args['key_id'],) + scope)
self._update_request_before_signing(req, credential, payload_hash,
date_header)
c_uri = self._get_canonical_uri(req)
c_query = self._get_canonical_query(req)
c_headers = self._get_canonical_headers(req)
s_headers = self._get_signed_headers(req)
c_request = '\n'.join((req.method.upper(), c_uri, c_query, c_headers,
'', s_headers, payload_hash))
self.log.debug('canonical request: %s', repr(c_request))
to_sign = '\n'.join(('AWS4-HMAC-SHA256', date_header, '/'.join(scope),
hashlib.sha256(c_request).hexdigest()))
# Redact passwords
redacted_to_sign = re.sub('assword=[^&]*', 'assword=<redacted>',
to_sign)
self.log.debug('string to sign: %s', repr(redacted_to_sign))
derived_hmac = hmac.new('AWS4{0}'.format(self.args['secret_key']),
digestmod=hashlib.sha256)
for chunk in scope:
derived_hmac.update(chunk)
derived_hmac = hmac.new(derived_hmac.digest(),
digestmod=hashlib.sha256)
derived_hmac.update(to_sign)
signature = derived_hmac.hexdigest()
self.log.debug('signature: %s', signature)
self._apply_signature(req, credential, signature)
return req
def _update_request_before_signing(self, req, credential, payload_sha256,
date_header):
parsed = urlparse.urlparse(req.url)
req.headers['Host'] = parsed.netloc
req.headers.pop('Authorization', None)
req.headers['X-Amz-Content-SHA256'] = payload_sha256
req.headers['X-Amz-Date'] = date_header
if self.args.get('security_token'):
req.headers['X-Amz-Security-Token'] = self.args['security_token']
def _apply_signature(self, req, credential, signature):
auth_header = ', '.join((
'AWS4-HMAC-SHA256 Credential={0}'.format(credential),
'SignedHeaders={0}'.format(self._get_signed_headers(req)),
'Signature={0}'.format(signature)))
req.headers['Authorization'] = auth_header
def _build_scope(self, service, timestamp):
if service.region_name:
region = service.region_name
elif os.getenv('AWS_AUTH_REGION'):
region = os.getenv('AWS_AUTH_REGION')
else:
self.log.error('a region name is required to use sigv4')
raise AuthError(
"region name is required; either use a config file "
"to supply the service's URL or set AWS_AUTH_REGION "
"in the environment")
scope = (time.strftime('%Y%m%d', time.gmtime(timestamp)),
region, service.NAME, 'aws4_request')
self.log.debug('scope: %s', '/'.join(scope))
return scope
def _get_canonical_uri(self, req):
path = urlparse.urlsplit(req.url).path or '/'
# TODO: Normalize stuff like ".."
c_uri = urlparse.quote(path, safe='/~')
self.log.debug('canonical URI: %s', c_uri)
return c_uri
def _get_canonical_query(self, req):
req_params = urlparse.parse_qsl(urlparse.urlparse(req.url).query,
keep_blank_values=True)
params = []
for key, val in sorted(req_params or []):
params.append('='.join((urlparse.quote(key, safe='~-_.'),
urlparse.quote(val, safe='~-_.'))))
c_params = '&'.join(params)
self.log.debug('canonical query: %s', c_params)
return c_params
def _get_normalized_headers(self, req):
# This doesn't currently support multi-value headers.
headers = {}
for key, val in req.headers.iteritems():
if key.lower() not in ('connection', 'user-agent'):
# Reverse proxies like to rewrite Connection headers.
# Ignoring User-Agent lets us generate storable query URLs
headers[key.lower().strip()] = val.strip()
return headers
def _get_canonical_headers(self, req):
headers = []
normalized_headers = self._get_normalized_headers(req)
for key, val in sorted(normalized_headers.items()):
headers.append(':'.join((key, val)))
self.log.debug('canonical headers: %s', str(headers))
return '\n'.join(headers)
def _get_signed_headers(self, req):
normalized_headers = self._get_normalized_headers(req)
s_headers = ';'.join(sorted(normalized_headers))
self.log.debug('signed headers: %s', s_headers)
return s_headers
def _hash_payload(self, req):
if self.args.get('payload_hash'):
return self.args['payload_hash']
digest = hashlib.sha256()
if not req.body:
pass
elif hasattr(req.body, 'seek'):
body_position = req.data.tell()
self.log.debug('payload hashing starting')
while True:
chunk = req.body.read(16384)
if not chunk:
break
digest.update(chunk)
req.body.seek(body_position)
self.log.debug('payload hashing done')
elif hasattr(req.body, 'read'):
self.log.debug('payload spooling/hashing starting')
# 10M happens to be the size of a bundle part, the thing we upload
# most frequently.
spool = tempfile.SpooledTemporaryFile(max_size=(10 * 1024 * 1024))
while True:
chunk = req.body.read(16384)
if not chunk:
break
digest.update(chunk)
spool.write(chunk)
self.log.debug('payload spooling/hashing done')
spool.seek(0)
self.log.info('re-pointing request body at spooled payload')
req.body = spool
# Should we close the original req.body here?
else:
digest.update(req.body)
self.log.debug('payload hash: %s', digest.hexdigest())
return digest.hexdigest()
class QueryHmacV4Auth(HmacV4Auth):
def _update_request_before_signing(self, req, credential, payload_sha256,
date_header):
# We don't do anything with payload_sha256. Is that bad?
if (req.method.upper() == 'POST' and
'form-urlencoded' in req.headers.get('Content-Type', '')):
self.log.warn('Query string authentication and POST form data '
'are generally mutually exclusive; GET is '
'recommended instead')
parsed = urlparse.urlparse(req.url)
req.headers['Host'] = parsed.netloc
req.headers.pop('Authorization', None)
params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': credential,
'X-Amz-Date': date_header,
'X-Amz-SignedHeaders': self._get_signed_headers(req)}
if self.args.get('timeout'):
params['X-Amz-Expires'] = self.args['timeout']
if self.args.get('security_token'):
params['X-Amz-Security-Token'] = self.args['security_token']
req.prepare_url(req.url, params)
def _apply_signature(self, req, credential, signature):
req.prepare_url(req.url, {'X-Amz-Signature': signature})
def _get_params_from_url(url):
"""
Given a URL, return a dict of parameters and their values. If a
parameter appears more than once all but the first value will be lost.
"""
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query, keep_blank_values=True)
return dict((key, vals[0]) for key, vals in params.iteritems())
def _remove_params_from_url(url):
"""
Return a copy of a URL with its parameters, fragments, and query
string removed.
"""
parsed = urlparse.urlparse(url)
return urlparse.urlunparse((parsed[0], parsed[1], parsed[2], '', '', '')) | unknown | codeparrot/codeparrot-clean | ||
"""
Created on 23 Mar 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
from scs_core.data.str import Str
# --------------------------------------------------------------------------------------------------------------------
class CmdMQTTClient(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-p UDS_PUB] "
"[-s] { -c { C | G | P | S | X } (UDS_SUB_1) | "
"[SUB_TOPIC_1 (UDS_SUB_1) .. SUB_TOPIC_N (UDS_SUB_N)] } "
"[-e] [-l LED_UDS] [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--pub", "-p", type="string", nargs=1, action="store", dest="uds_pub",
default=None, help="read publications from UDS instead of stdin")
self.__parser.add_option("--sub", "-s", action="store_true", dest="uds_sub",
help="write subscriptions to UDS instead of stdout")
self.__parser.add_option("--channel", "-c", type="string", nargs=1, action="store", dest="channel",
help="subscribe to channel")
self.__parser.add_option("--echo", "-e", action="store_true", dest="echo", default=False,
help="echo input to stdout (if not writing subscriptions to stdout)")
self.__parser.add_option("--led", "-l", type="string", nargs=1, action="store", dest="led_uds",
help="send LED commands to LED_UDS")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.echo and self.subscriptions and not self.__opts.uds_sub:
return False
if self.channel is None:
if self.__opts.uds_sub and len(self.__args) % 2 != 0:
return False
# else:
# if self.__opts.uds_sub and len(self.__args) != 1:
# return False
#
# if not self.__opts.uds_sub and len(self.__args) != 0:
# return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def subscriptions(self):
subscriptions = []
if self.channel:
return subscriptions
if self.__opts.uds_sub:
for i in range(0, len(self.__args), 2):
subscriptions.append(Subscription(self.__args[i], self.__args[i + 1]))
else:
for i in range(len(self.__args)):
subscriptions.append(Subscription(self.__args[i]))
return subscriptions
@property
def channel(self):
return self.__opts.channel
@property
def channel_uds(self):
if self.channel is None or not self.__opts.uds_sub:
return None
return self.__args[0]
@property
def uds_pub(self):
return self.__opts.uds_pub
@property
def echo(self):
return self.__opts.echo
@property
def led_uds(self):
return self.__opts.led_uds
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdMQTTClient:{subscriptions:%s, channel:%s, channel_uds:%s, uds_pub:%s, echo:%s, " \
"led:%s, verbose:%s}" % \
(Str.collection(self.subscriptions), self.channel, self.channel_uds, self.uds_pub, self.echo,
self.led_uds, self.verbose)
# --------------------------------------------------------------------------------------------------------------------
class Subscription(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, topic, address=None):
"""
Constructor
"""
self.__topic = topic # string topic path
self.__address = address # string DomainSocket address
# ----------------------------------------------------------------------------------------------------------------
@property
def topic(self):
return self.__topic
@property
def address(self):
return self.__address
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Subscription:{topic:%s, address:%s}" % (self.topic, self.address) | unknown | codeparrot/codeparrot-clean | ||
"""
Configuration parameters:
path.internal.bin.upstream
"""
# pylint: disable=relative-import
from __future__ import print_function
import os
import re
from subprocess import Popen, PIPE
from polyglot.detect import Detector
from polyglot.detect.base import UnknownLanguage
from config import CONFIG
from languages_data import SO_NAME
from .upstream import UpstreamAdapter
NOT_FOUND_MESSAGE = """404 NOT FOUND
Unknown cheat sheet. Please try to reformulate your query.
Query format:
/LANG/QUESTION
Examples:
/python/read+json
/golang/run+external+program
/js/regex+search
See /:help for more info.
If the problem persists, file a GitHub issue at
github.com/chubin/cheat.sh or ping @igor_chubin
"""
class Question(UpstreamAdapter):
"""
Answer to a programming language question, using Stackoverflow
as the main data source. Heavy lifting is done by an external
program `CONFIG["path.internal.bin.upstream"]`.
If the program is not found, fallback to the superclass `UpstreamAdapter`,
which queries the upstream server (by default https://cheat.sh/)
for the answer
"""
_adapter_name = "question"
_output_format = "text+code"
_cache_needed = True
def _get_page(self, topic, request_options=None):
"""
Find answer for the `topic` question.
"""
if not os.path.exists(CONFIG["path.internal.bin.upstream"]):
# if the upstream program is not found, use normal upstream adapter
self._output_format = "ansi"
return UpstreamAdapter._get_page(self, topic, request_options=request_options)
topic = topic.replace('+', ' ')
# if there is a language name in the section name,
# cut it off (de:python => python)
if '/' in topic:
section_name, topic = topic.split('/', 1)
if ':' in section_name:
_, section_name = section_name.split(':', 1)
section_name = SO_NAME.get(section_name, section_name)
topic = "%s/%s" % (section_name, topic)
# some clients send queries with - instead of + so we have to rewrite them to
topic = re.sub(r"(?<!-)-", ' ', topic)
topic_words = topic.split()
topic = " ".join(topic_words)
lang = 'en'
try:
query_text = topic # " ".join(topic)
query_text = re.sub('^[^/]*/+', '', query_text.rstrip('/'))
query_text = re.sub('/[0-9]+$', '', query_text)
query_text = re.sub('/[0-9]+$', '', query_text)
detector = Detector(query_text)
supposed_lang = detector.languages[0].code
if len(topic_words) > 2 \
or supposed_lang in ['az', 'ru', 'uk', 'de', 'fr', 'es', 'it', 'nl']:
lang = supposed_lang
if supposed_lang.startswith('zh_') or supposed_lang == 'zh':
lang = 'zh'
elif supposed_lang.startswith('pt_'):
lang = 'pt'
if supposed_lang in ['ja', 'ko']:
lang = supposed_lang
except UnknownLanguage:
print("Unknown language (%s)" % query_text)
if lang != 'en':
topic = ['--human-language', lang, topic]
else:
topic = [topic]
cmd = [CONFIG["path.internal.bin.upstream"]] + topic
proc = Popen(cmd, stdin=open(os.devnull, "r"), stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode('utf-8')
if not answer:
return NOT_FOUND_MESSAGE
return answer
def get_list(self, prefix=None):
return []
def is_found(self, topic):
return True | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
import ldap
except ImportError:
# This module needs to be importable despite ldap not being a requirement
ldap = None
import time
from oslo.config import cfg
from nova import exception
from nova.network import dns_driver
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ldap_dns_opts = [
cfg.StrOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help='URL for LDAP server which will store DNS entries'),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='User for LDAP DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
help='Password for LDAP DNS',
secret=True),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help='Hostmaster for LDAP DNS driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for LDAP DNS driver'),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help='Base DN for DNS entries in LDAP'),
cfg.StrOpt('ldap_dns_soa_refresh',
default='1800',
help='Refresh interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_retry',
default='3600',
help='Retry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_expiry',
default='86400',
help='Expiry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_minimum',
default='7200',
help='Minimum interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
]
CONF.register_opts(ldap_dns_opts)
# Importing ldap.modlist breaks the tests for some reason,
# so this is an abbreviated version of a function from
# there.
def create_modlist(newattrs):
modlist = []
for attrtype in newattrs.keys():
utf8_vals = []
for val in newattrs[attrtype]:
utf8_vals.append(utils.utf8(val))
newattrs[attrtype] = utf8_vals
modlist.append((attrtype, newattrs[attrtype]))
return modlist
class DNSEntry(object):
def __init__(self, ldap_object):
"""ldap_object is an instance of ldap.LDAPObject.
It should already be initialized and bound before
getting passed in here.
"""
self.lobj = ldap_object
self.ldap_tuple = None
self.qualified_domain = None
@classmethod
def _get_tuple_for_domain(cls, lobj, domain):
entry = lobj.search_s(CONF.ldap_dns_base_dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s)' % utils.utf8(domain))
if not entry:
return None
if len(entry) > 1:
LOG.warn(_("Found multiple matches for domain "
"%(domain)s.\n%(entry)s") %
(domain, entry))
return entry[0]
@classmethod
def _get_all_domains(cls, lobj):
entries = lobj.search_s(CONF.ldap_dns_base_dn,
ldap.SCOPE_SUBTREE, '(sOARecord=*)')
domains = []
for entry in entries:
domain = entry[1].get('associatedDomain')
if domain:
domains.append(domain[0])
return domains
def _set_tuple(self, tuple):
self.ldap_tuple = tuple
def _qualify(self, name):
return '%s.%s' % (name, self.qualified_domain)
def _dequalify(self, name):
z = ".%s" % self.qualified_domain
if name.endswith(z):
dequalified = name[0:name.rfind(z)]
else:
LOG.warn(_("Unable to dequalify. %(name)s is not in "
"%(domain)s.\n") %
{'name': name,
'domain': self.qualified_domain})
dequalified = None
return dequalified
def _dn(self):
return self.ldap_tuple[0]
dn = property(_dn)
def _rdn(self):
return self.dn.partition(',')[0]
rdn = property(_rdn)
class DomainEntry(DNSEntry):
@classmethod
def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
CONF.ldap_dns_servers[0],
CONF.ldap_dns_soa_hostmaster,
date,
CONF.ldap_dns_soa_refresh,
CONF.ldap_dns_soa_retry,
CONF.ldap_dns_soa_expiry,
CONF.ldap_dns_soa_minimum)
return utils.utf8(soa)
@classmethod
def create_domain(cls, lobj, domain):
"""Create a new domain entry, and return an object that wraps it."""
entry = cls._get_tuple_for_domain(lobj, domain)
if entry:
raise exception.FloatingIpDNSExists(name=domain, domain='')
newdn = 'dc=%s,%s' % (domain, CONF.ldap_dns_base_dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'sOARecord': [cls._soa()],
'associatedDomain': [domain],
'dc': [domain]}
lobj.add_s(newdn, create_modlist(attrs))
return DomainEntry(lobj, domain)
def __init__(self, ldap_object, domain):
super(DomainEntry, self).__init__(ldap_object)
entry = self._get_tuple_for_domain(self.lobj, domain)
if not entry:
raise exception.NotFound()
self._set_tuple(entry)
assert(entry[1]['associatedDomain'][0] == domain)
self.qualified_domain = domain
def delete(self):
"""Delete the domain that this entry refers to."""
entries = self.lobj.search_s(self.dn,
ldap.SCOPE_SUBTREE,
'(aRecord=*)')
for entry in entries:
self.lobj.delete_s(entry[0])
self.lobj.delete_s(self.dn)
def update_soa(self):
mlist = [(ldap.MOD_REPLACE, 'sOARecord', self._soa())]
self.lobj.modify_s(self.dn, mlist)
def subentry_with_name(self, name):
entry = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s.%s)' %
(utils.utf8(name),
utils.utf8(self.qualified_domain)))
if entry:
return HostEntry(self, entry[0])
else:
return None
def subentries_with_ip(self, ip):
entries = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(aRecord=%s)' % utils.utf8(ip))
objs = []
for entry in entries:
if 'associatedDomain' in entry[1]:
objs.append(HostEntry(self, entry))
return objs
def add_entry(self, name, address):
if self.subentry_with_name(name):
raise exception.FloatingIpDNSExists(name=name,
domain=self.qualified_domain)
entries = self.subentries_with_ip(address)
if entries:
# We already have an ldap entry for this IP, so we just
# need to add the new name.
existingdn = entries[0].dn
self.lobj.modify_s(existingdn, [(ldap.MOD_ADD,
'associatedDomain',
utils.utf8(self._qualify(name)))])
return self.subentry_with_name(name)
else:
# We need to create an entirely new entry.
newdn = 'dc=%s,%s' % (name, self.dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'aRecord': [address],
'associatedDomain': [self._qualify(name)],
'dc': [name]}
self.lobj.add_s(newdn, create_modlist(attrs))
return self.subentry_with_name(name)
def remove_entry(self, name):
entry = self.subentry_with_name(name)
if not entry:
raise exception.NotFound()
entry.remove_name(name)
self.update_soa()
class HostEntry(DNSEntry):
def __init__(self, parent, tuple):
super(HostEntry, self).__init__(parent.lobj)
self.parent_entry = parent
self._set_tuple(tuple)
self.qualified_domain = parent.qualified_domain
def remove_name(self, name):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) > 1:
# We just have to remove the requested domain.
self.lobj.modify_s(self.dn, [(ldap.MOD_DELETE, 'associatedDomain',
self._qualify(utils.utf8(name)))])
if (self.rdn[1] == name):
# We just removed the rdn, so we need to move this entry.
names.remove(self._qualify(name))
newrdn = 'dc=%s' % self._dequalify(names[0])
self.lobj.modrdn_s(self.dn, [newrdn])
else:
# We should delete the entire record.
self.lobj.delete_s(self.dn)
def modify_address(self, name, address):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) == 1:
self.lobj.modify_s(self.dn, [(ldap.MOD_REPLACE, 'aRecord',
[utils.utf8(address)])])
else:
self.remove_name(name)
self.parent.add_entry(name, address)
def _names(self):
names = []
for domain in self.ldap_tuple[1]['associatedDomain']:
names.append(self._dequalify(domain))
return names
names = property(_names)
def _ip(self):
ip = self.ldap_tuple[1]['aRecord'][0]
return ip
ip = property(_ip)
def _parent(self):
return self.parent_entry
parent = property(_parent)
class LdapDNS(dns_driver.DNSDriver):
"""Driver for PowerDNS using ldap as a back end.
This driver assumes ldap-method=strict, with all domains
in the top-level, aRecords only.
"""
def __init__(self):
if not ldap:
raise ImportError(_('ldap not installed'))
self.lobj = ldap.initialize(CONF.ldap_dns_url)
self.lobj.simple_bind_s(CONF.ldap_dns_user,
CONF.ldap_dns_password)
def get_domains(self):
return DomainEntry._get_all_domains(self.lobj)
def create_entry(self, name, address, type, domain):
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a' entries."))
dEntry = DomainEntry(self.lobj, domain)
dEntry.add_entry(name, address)
def delete_entry(self, name, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.remove_entry(name)
def get_entries_by_address(self, address, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
entries = dEntry.subentries_with_ip(address)
names = []
for entry in entries:
names.extend(entry.names)
return names
def get_entries_by_name(self, name, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
nEntry = dEntry.subentry_with_name(name)
if nEntry:
return [nEntry.ip]
def modify_address(self, name, address, domain):
dEntry = DomainEntry(self.lobj, domain)
nEntry = dEntry.subentry_with_name(name)
nEntry.modify_address(name, address)
def create_domain(self, domain):
DomainEntry.create_domain(self.lobj, domain)
def delete_domain(self, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.delete()
def delete_dns_file(self):
LOG.warn(_("This shouldn't be getting called except during testing."))
pass | unknown | codeparrot/codeparrot-clean | ||
{
"name": "@playground/framework-express",
"version": "0.0.0",
"private": true,
"sideEffects": false,
"type": "module",
"scripts": {
"build": "react-router build",
"dev": "node ./server.js",
"start": "cross-env NODE_ENV=production node ./server.js",
"typecheck": "react-router typegen && tsc"
},
"dependencies": {
"@react-router/express": "workspace:*",
"@react-router/node": "workspace:*",
"compression": "^1.8.1",
"express": "^4.19.2",
"isbot": "^5.1.11",
"morgan": "^1.10.1",
"react": "catalog:",
"react-dom": "catalog:",
"react-router": "workspace:*"
},
"devDependencies": {
"@react-router/dev": "workspace:*",
"@types/compression": "^1.8.1",
"@types/express": "^4.17.20",
"@types/morgan": "^1.9.10",
"@types/react": "catalog:",
"@types/react-dom": "catalog:",
"cross-env": "^7.0.3",
"typescript": "catalog:",
"vite": "^6.3.0",
"vite-tsconfig-paths": "^4.2.1"
},
"engines": {
"node": ">=20.0.0"
}
} | json | github | https://github.com/remix-run/react-router | playground/framework-express/package.json |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.pybind11.staging.variables.
This file is a copy of clif/testing/python/variables_test.py.
"""
import unittest
from clif.pybind11.staging import variables
class VariableTest(unittest.TestCase):
def test_const_int(self):
self.assertEqual(variables.kMyConstInt, 42)
def test_const_int_renamed(self):
self.assertEqual(variables.const_int, 123)
def test_const_float(self):
self.assertEqual(variables.kMyConstFloat, 15.0)
def test_const_bool(self):
self.assertEqual(variables.kMyConstBool, True)
def test_const_complex(self):
self.assertEqual(variables.kMyConstComplex, complex(1))
def test_const_array(self):
expected_array = [0, 10, 20, 30, 40]
self. assertSequenceEqual(expected_array, variables.kMyConstIntArray)
def test_const_pair(self):
expected_tuple = [0, 10]
self.assertSequenceEqual(expected_tuple, variables.kMyConstPair)
def test_const_dict(self):
expected_dict = {1: 10, 2: 20, 3: 30}
self.assertDictEqual(expected_dict, variables.kMyConstMap)
def test_const_set(self):
expected_set = {1, 2, 3}
self.assertSetEqual(expected_set, variables.kMyConstSet)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# encoding: utf-8
"""
xray_background.py - remove background from small angle x-ray scattering imgs
Created by Dave Williams on 2014-10-09
"""
# System imports
import numpy as np
from scipy import optimize
import cv2
import matplotlib.pyplot as plt
# Local module imports
import support
import fake_img
## Find the background profile and fit it
def background_collapse(center, img, peaks, plot=False):
"""Collapse the image background, ignoring the peak regions.
Good ideas to be had here: http://goo.gl/2xEApw
Args:
center: x,y center of blocked image
img: from which background is extracted
peaks: row,col locations of peaks; don't want these in the background
plot: if we should plot the exclusion regions and profile (True/False)
or a list of two axes to plot onto
Gives:
background: profile of background
background_dists: pixel distances of background from center
"""
#import ipdb; ipdb.set_trace()
## Find peak angles
cx, cy = center
#m_thetas = [support.pt_to_pt_angle((cy, cx), pt) for pt in peaks]
m_thetas = [np.arctan2(pt[0] - cy, pt[1] - cx) for pt in peaks]
## With shifting, find the masking region
mask = np.ones((img.shape[0], img.shape[1]*2), dtype=np.float)
m_center = (int(round(center[0] + img.shape[1])), int(round(center[1])))
m_thetas = np.round(np.degrees(m_thetas)).astype(np.int)
theta_pm = 12 # amount to block on either side
m_angles = [(t-theta_pm, t+theta_pm) for t in m_thetas] # angles to block
m_axes = (img.shape[1], img.shape[1]) # should always fill screen
for angle in m_angles:
cv2.ellipse(mask, m_center, m_axes, 180, angle[0], angle[1], 0, -1)
mask = mask[:,img.shape[1]:]
# Construct a radial distance img
row, col = np.indices(img.shape)
r = np.sqrt((col-center[0])**2 + (row-center[1])**2)
# Coerce into ints for bincount
r = r.astype(np.int)
img = img.astype(np.int)
img = img*mask
# Do the counting
flat_count = np.bincount(r.ravel(), img.ravel())
rad_occurances = np.bincount(r.ravel())
radial_profile = flat_count/rad_occurances
# Kill the blocked region
highest_ind = radial_profile.argmax()
background = radial_profile[highest_ind:]
background_dists = np.arange(highest_ind,len(radial_profile))
# Plot if passed
if plot is not False:
if plot is True:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=[6,6])
else:
ax1, ax2 = plot
ax1.scatter(center[0], center[1], color='m')
ax1.imshow(mask*img)
colors = list(np.tile(
['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w', '.25', '.5', '.75'], 5))
for peak in peaks:
c = colors.pop(0)
ax1.scatter(peak[1], peak[0], c=c, s=40)
ax2.plot(background, linewidth=3)
ax1.set_title("Masked image for profiling")
ax2.set_title("Resulting radial profile")
plt.draw()
plt.tight_layout()
plt.show()
return background_dists, background
def _fit_double_exp(trace_y, trace_x, plot=False):
"""Fit a double exponential function to the passed trace.
Ignore the region to the left of the peak.
Takes:
trace_y: a nx1 data trace
trace_x: the x indices that go with trace_y
plot: whether or not to plot the fit
Gives:
vals: optimized parameters for a double exp
"""
# A residual function to test how good our fits are
dexp = support.double_exponential_1d
diff = lambda i, j: np.sum(np.abs(np.subtract(i,j)))
resi = lambda g: diff(dexp(trace_x, g[0], g[1], g[2], g[3], g[4]), trace_y)
# Guess some values then optimize
guess = [1.0, 1000.0, 0.01, 5000.0, 0.1]
opt_res = optimize.minimize(resi, guess, jac=False, bounds = ( (0, np.inf), (0, np.inf), (0, 1), (0, np.inf), (0, 1)))
success = opt_res['success']
vals = opt_res['x']
# Plot if desired
if plot is not False:
if plot is True:
fig, ax = plt.subplots(figsize=[6,3])
else:
ax = plot
plt.plot(trace_x, dexp(trace_x, *zip(vals)), 'c', linewidth=3)
plt.plot(trace_x, trace_y, 'r', linewidth=3)
ax.set_title("Real (r) and fitted (c) values")
plt.draw()
plt.tight_layout()
plt.show()
return vals
## Generate a fake background and subtract it from a passed image
def _fake_background(size, mask_center, mask_rad, diff_center, back_vals):
"""Generate a fake background image from the passed (fitted) values.
Args:
size (tuple): (row, col) size of image to generate
mask_center: the center of the masked region
mask_rad: the radius of the masked region
diff_center: the center of the diffraction (and background) pattern
back_vals (iter): the (a,b,c,d,e) values of the double exponential
Returns:
img: the fake background image
"""
# Flips and unpacks
a, b, c, d, e = back_vals
mask_center = (mask_center[1], mask_center[0])
diff_center = (diff_center[1], diff_center[0])
exp_img = fake_img.background(size, diff_center, a, b, c, d, e)
mask_img = fake_img.masking(size, mask_center, mask_rad)
return exp_img*mask_img
def find_and_remove_background(mask_cen, mask_rad, diff_cen, img, peaks,
plot=False):
"""Fit/subtract the background of an image and the peaks of its angles.
Args:
mask_cen: the center of the masking region
mask_rad: the radius of the masking region
diff_cen: the center of the diffraction pattern
img: the image whose background we're interested in
peaks: the peaks we want to exclude (at least one)
plot: to plot the masks and fit or not (True/False) or a list of
three axes to plot onto
Returns:
img: img-background, to best of abilities
"""
# Plot set up
if plot is not False:
if plot is True:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=[6,9])
ax12 = (ax1, ax2)
else:
ax12 = plot[0], plot[1]
ax3 = plot[2]
else:
ax12, ax3 = False, False
size = img.shape
back_x, back_y = background_collapse(diff_cen, img, peaks, plot=ax12)
fits = _fit_double_exp(back_y, back_x, plot=ax3)
fake_back_img = _fake_background(size, mask_cen, mask_rad, diff_cen, fits)
return img-fake_back_img
## Use peaks to find info about image
def find_diffraction_center(pairs, which_list='longest'):
""" Find the diffraction center based off of pairs of points.
By default, use the longest list of pairs.
Takes:
pairs: lists of point pairs, output of extract_pairs
which_list: "longest" or index location in pairs
Gives:
center: row,col center of the diffraction image
"""
# Which pair list to use
if which_list == 'longest':
which_list = np.argmax(map(len, pairs))
# Find mean middle point
mid = lambda pair: np.add(np.subtract(pair[0], pair[1])/2.0, pair[1])
center = np.mean([mid(p) for p in pairs[which_list]], 0)
return center
## Test if run directly
def main():
import support
import peak_finder
SAMPLEFILE = 'sampleimg1.tif'
data = support.image_as_numpy(SAMPLEFILE) # load
block = find_blocked_region(data, True) # find blocker
unorg_peaks = peak_finder.peaks_from_image(data, block, plot=True)
success, thetas, peaks = peak_finder.optimize_thetas(block[0], unorg_peaks)
back_dists, back = background_collapse(block[0], data, thetas, True)
back_params = _fit_double_exp(back, back_dists, True)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""A registry of :class:`Schema <marshmallow.Schema>` classes. This allows for string
lookup of schemas, which may be used with
class:`fields.Nested <marshmallow.fields.Nested>`.
.. warning::
This module is treated as private API.
Users should not need to use this module directly.
"""
from __future__ import unicode_literals
from marshmallow.exceptions import RegistryError
# {
# <class_name>: <list of class objects>
# <module_path_to_class>: <list of class objects>
# }
_registry = {}
def register(classname, cls):
"""Add a class to the registry of serializer classes. When a class is
registered, an entry for both its classname and its full, module-qualified
path are added to the registry.
Example: ::
class MyClass:
pass
register('MyClass', MyClass)
# Registry:
# {
# 'MyClass': [path.to.MyClass],
# 'path.to.MyClass': [path.to.MyClass],
# }
"""
# Module where the class is located
module = cls.__module__
# Full module path to the class
# e.g. user.schemas.UserSchema
fullpath = '.'.join([module, classname])
# If the class is already registered; need to check if the entries are
# in the same module as cls to avoid having multiple instances of the same
# class in the registry
if classname in _registry and not \
any(each.__module__ == module for each in _registry[classname]):
_registry[classname].append(cls)
else:
_registry[classname] = [cls]
# Also register the full path
_registry.setdefault(fullpath, []).append(cls)
return None
def get_class(classname, all=False):
"""Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name.
"""
try:
classes = _registry[classname]
except KeyError:
raise RegistryError('Class with name {0!r} was not found. You may need '
'to import the class.'.format(classname))
if len(classes) > 1:
if all:
return _registry[classname]
raise RegistryError('Multiple classes with name {0!r} '
'were found. Please use the full, '
'module-qualified path.'.format(classname))
else:
return _registry[classname][0] | unknown | codeparrot/codeparrot-clean | ||
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
},
{
"datasource": {
"uid": "grafana"
},
"enable": true,
"name": "Deployments"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"id": 1,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Panel with transformations remains unchanged",
"transformations": [
{
"id": "labelsToFields",
"options": {
"keepLabels": [
"job",
"instance"
],
"mode": "rows"
}
},
{
"id": "merge",
"options": {}
}
],
"type": "timeseries"
},
{
"autoMigrateFrom": "graph",
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"id": 2,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Graph panel remains unchanged",
"type": "timeseries",
"yAxes": [
{
"show": true
}
]
},
{
"collapsed": false,
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"id": 3,
"panels": [
{
"fieldConfig": {
"defaults": {
"unit": "bytes"
}
},
"id": 4,
"title": "Nested stat panel",
"type": "stat"
}
],
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Row with nested panels",
"type": "row"
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": [
{
"datasource": "prometheus",
"name": "environment",
"options": [],
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "V32 No-Op Migration Test Dashboard",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/output/latest_version/v32.no_op_migration.v42.json |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.