code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
/*!
* Copyright 2014 Apereo Foundation (AF) Licensed under the
* Educational Community License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
define(['jquery', 'oae.core'], function ($, oae) {
return function (uid) {
// The widget container
var $rootel = $('#' + uid);
// Holds the current state of the user profile as it is updated
var profile = _.extend({}, oae.data.me);
// Holds an email address that is pending verification from the user, if
// applicable
var unverifiedEmail = null;
/**
* Determine if the current profile has a valid display name
*
* @return {Boolean} Whether or not the profile has a valid display name
*/
var isValidDisplayName = function() {
return oae.api.util.validation().isValidDisplayName(profile.displayName);
};
/**
* Determine if the current profile has a valid email
*
* @return {Boolean} Whether or not the profile has a valid email
*/
var isValidEmail = function() {
return oae.api.util.validation().isValidEmail(profile.email);
};
/**
* Determine if the current profile is valid, such that it would allow a user to dismiss the
* user profile modal
*
* @return {Boolean} Whether or not the profile is valid in its current state
*/
var isValidProfile = function() {
return (isValidEmail() && isValidDisplayName());
};
/**
* Show the main panel
*/
var showMainPanel = function() {
$('#editprofile-panel-email-container').hide();
$('#editprofile-panel-main-container').show();
};
/**
* Show the email panel
*/
var showEmailPanel = function() {
$('#editprofile-panel-main-container').hide();
$('#editprofile-panel-email-container').show();
};
/**
* Show the appropriate panel based on the user's profile state
*/
var showDefaultPanel = function() {
// Initialize the email verification status
oae.api.user.getEmailVerificationStatus(oae.data.me.id, function(err, email) {
// Ignore issues checking for a pending email verification, as there being no
// pending verification is the 99.999% use-case and we wouldn't want to annoy
// uninterested users with an error notification or anything
unverifiedEmail = email;
if (isValidDisplayName() && !isValidEmail() && unverifiedEmail) {
// If the user profile is awaiting a verified email, but all
// the other information is accurate, we take them directly
// to the panel that indicates they need to verify their
// email
renderEditProfileEmailPanel();
} else {
renderEditProfileMainPanel();
}
});
};
/**
* Render the edit profile "main" panel with validation and switch the current modal view to
* the "main" panel
*/
var renderEditProfileMainPanel = function() {
// If the display name is not valid, clear it to inform the template that the user
// has no real display name
if (!isValidDisplayName()) {
profile.displayName = null;
// Profiles with invalid display names will have had visibility set to private, so we
// reset it to the tenant's default visibility
// @see https://github.com/oaeproject/3akai-ux/pull/4100
var tenantVisibility = oae.api.config.getValue('oae-principals', 'user', 'visibility');
profile.visibility = tenantVisibility;
}
// Render the form elements
oae.api.util.template().render($('#editprofile-panel-main-template', $rootel), {
'isValidProfile': isValidProfile(),
'profile': profile,
'unverifiedEmail': unverifiedEmail
}, $('#editprofile-panel-main-container', $rootel));
// Detect changes in the form and enable the submit button
$('#editprofile-form', $rootel).on(oae.api.util.getFormChangeEventNames(), function() {
$('#editprofile-panel-main-container button[type="submit"]', $rootel).prop('disabled', false);
});
// Initialize jQuery validate on the form
var validateOpts = {
'submitHandler': editProfile,
'methods': {
'displayname': {
'method': oae.api.util.validation().isValidDisplayName,
'text': oae.api.i18n.translate('__MSG__PLEASE_ENTER_A_VALID_NAME__')
}
}
};
oae.api.util.validation().validate($('#editprofile-form', $rootel), validateOpts);
// Switch the view to the main panel
showMainPanel();
};
/**
* Render the edit profile "email" panel that instructs the user how to proceed with
* verifying their email. It will also switch the view to the "email" panel.
*/
var renderEditProfileEmailPanel = function() {
// Render the email verification instruction template
oae.api.util.template().render($('#editprofile-panel-email-template', $rootel), {
'isValidProfile': isValidProfile(),
'profile': profile,
'unverifiedEmail': unverifiedEmail
}, $('#editprofile-panel-email-container', $rootel));
// Switch the view to the email panel
showEmailPanel();
};
/**
* Perform the edit profile action
*/
var editProfile = function() {
// Disable the form
$('#editprofile-form *', $rootel).prop('disabled', true);
var newDisplayName = $.trim($('#editprofile-name', $rootel).val());
var newEmail = $.trim($('#editprofile-email', $rootel).val()).toLowerCase();
var newVisibility = $('.oae-large-options-container input[type="radio"]:checked', $rootel).val();
var params = {
'displayName': newDisplayName,
'email': newEmail,
'visibility': newVisibility
};
// Determine if this update constitutes a change in email. If so we will need to notify
// the user that the new email is pending verification
var isEmailChange = (newEmail !== oae.data.me.email);
oae.api.user.updateUser(params, function (err, data) {
if (!err) {
// Update the user profile in state
profile = data;
// Notify the rest of the UI widgets that the profile has been updated
$(document).trigger('oae.editprofile.done', data);
if (!isEmailChange) {
// If the update succeeded and didn't have an email change, close the modal
// while showing a notification
closeModal();
oae.api.util.notification(
oae.api.i18n.translate('__MSG__PROFILE_EDITED__'),
oae.api.i18n.translate('__MSG__PROFILE_DETAILS_EDIT_SUCCESS__', 'editprofile'));
} else {
// Since the email is updated, a verification email will be sent. We should
// tell the user that they must validate their email address from their
// email inbox
unverifiedEmail = newEmail;
renderEditProfileEmailPanel();
}
} else {
// If the update failed, enable the form and show an error notification
oae.api.util.notification(
oae.api.i18n.translate('__MSG__PROFILE_NOT_EDITED__'),
oae.api.i18n.translate('__MSG__PROFILE_DETAILS_EDIT_FAIL__', 'editprofile'),
'error');
// Enable the form
$('#editprofile-form *', $rootel).prop('disabled', false);
}
});
// Avoid default form submit behavior
return false;
};
/**
* Reset the widget to its original state when the modal dialog is opened and closed.
* Ideally this would only be necessary when the modal is hidden, but IE10+ fires `input`
* events while Bootstrap is rendering the modal, and those events can "undo" parts of the
* reset. Hooking into the `shown` event provides the chance to compensate.
*/
var setUpReset = function() {
$('#editprofile-modal', $rootel).on('shown.bs.modal', showDefaultPanel);
$('#editprofile-modal', $rootel).on('hidden.bs.modal', function (evt) {
// Reset the form
var $form = $('#editprofile-form', $rootel);
$form[0].reset();
oae.api.util.validation().clear($form);
// Enable the form
$('#editprofile-form *', $rootel).prop('disabled', false);
$('#editprofile-form button[type="submit"]', $rootel).prop('disabled', true);
showMainPanel();
});
};
/**
* Apply the listeners to the document that will launch the editprofile modal
*/
var setUpModalListeners = function() {
$(document).on('click', '.oae-trigger-editprofile', showModal);
$(document).on('oae.trigger.editprofile', showModal);
};
/**
* Show the edit profile modal and render the appropriate panel
*/
var showModal = function() {
$('#editprofile-modal', $rootel).modal({
'backdrop': 'static'
});
showDefaultPanel();
};
/**
* Close the edit profile modal
*/
var closeModal = function() {
$('#editprofile-modal', $rootel).modal('hide');
if (oae.data.me.needsToAcceptTC) {
// It is possible that we entered the edit profile modal to
// clean up our user profile before accepting the terms and
// conditions (see `oae.api.js` function `setupPreUseActions`).
// Therefore we need to ensure we segue to the terms and
// conditions widget after we close the editprofile modal
oae.api.widget.insertWidget('termsandconditions', null, null, true);
}
};
/**
* Bind all the action listeners needed for the user to interact with the "main" panel in
* the edit profile modal
*/
var bindEditProfileMainPanelListeners = function() {
$('#editprofile-modal', $rootel).on('shown.bs.modal', function() {
// Set focus to the display name field
$('#editprofile-name', $rootel).focus();
});
// Catch changes in the visibility radio group
$rootel.on('change', '#editprofile-panel-main-container .oae-large-options-container input[type="radio"]', function() {
$('.oae-large-options-container label', $rootel).removeClass('checked');
$(this).parents('label').addClass('checked');
});
// When the "Resend Verification" button is clicked, resend the email verification
$rootel.on('click', '#editprofile-email-verification .editprofile-email-verification-action button', function() {
// Disable all actions in the modal
$('#editprofile-form *', $rootel).prop('disabled', true);
oae.api.user.resendEmailToken(oae.data.me.id, function(err) {
if (!err) {
// If the token resent successfully show a notification
oae.api.util.notification(
oae.api.i18n.translate('__MSG__VERIFICATION_EMAIL_SENT__', 'editprofile'),
oae.api.i18n.translate('__MSG__A_VERIFICATION_EMAIL_HAS_BEEN_SENT_TO_UNVERIFIED_EMAIL__', 'editprofile', {
'unverifiedEmail': unverifiedEmail
}));
} else {
// If the token failed to resend, show a notification
oae.api.util.notification(
oae.api.i18n.translate('__MSG__VERIFICATION_EMAIL_FAILED__', 'editprofile'),
oae.api.i18n.translate('__MSG__A_VERIFICATION_EMAIL_FAILED_TO_BE_SENT_TO_UNVERIFIED_EMAIL__', 'editprofile', {
'unverifiedEmail': unverifiedEmail
}),
'error');
}
// Re-enable the form
$('#editprofile-form *', $rootel).prop('disabled', false);
});
});
// When the "Cancel Verification" button is clicked, delete the pending email verification
// and close the container that indicates there is a pending verification
$rootel.on('click', '#editprofile-email-verification .editprofile-email-verification-cancel button', function(evt) {
// Allow the modal to be saved now
$('#editprofile-panel-main-container button[type="submit"]', $rootel).prop('disabled', false);
oae.api.user.deletePendingEmailVerification(function(err) {
if (!err) {
unverifiedEmail = null;
// If cancelling succeeded, simply remove the email verification panel
$('#editprofile-email-verification', $rootel).slideUp();
} else {
// If the token failed to resend, show a notification
oae.api.util.notification(
oae.api.i18n.translate('__MSG__CANCEL_EMAIL_VERIFICATION_FAILED__', 'editprofile'),
oae.api.i18n.translate('__MSG__AN_ERROR_OCCURRED_WHILE_CANCELLING_THE_EMAIL_VERIFICATION__', 'editprofile'),
'error');
}
});
evt.preventDefault();
});
};
/**
* Bind all the action listeners needed for the user to interact with the "email" panel in
* the edit profile modal
*/
var bindEditProfileEmailPanelListeners = function() {
// When "Done" is clicked, close the modal
$rootel.on('click', '#editprofile-panel-email-container .modal-footer button.btn-primary', function() {
closeModal();
});
// When the user chooses to go back, re-render and enable the main panel
$rootel.on('click', '#editprofile-panel-email-container .modal-footer button.btn-link', function() {
renderEditProfileMainPanel();
});
};
setUpReset();
setUpModalListeners();
bindEditProfileMainPanelListeners();
bindEditProfileEmailPanelListeners();
};
});
| nicolaasmatthijs/3akai-ux | node_modules/oae-core/editprofile/js/editprofile.js | JavaScript | apache-2.0 | 16,154 |
# ./darwinpush/xb/raw/sm.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:8eb48f8f0e727f488907a816c69d6ed98ba221c7
# Generated 2015-04-23 16:42:14.513978 by PyXB version 1.2.4 using Python 3.4.1.final.0
# Namespace http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1 [xmlns:sm]
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}MsgCategoryType
class MsgCategoryType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The category of operator message"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgCategoryType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 15, 1)
_Documentation = 'The category of operator message'
MsgCategoryType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgCategoryType, enum_prefix=None)
MsgCategoryType.Train = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Train', tag='Train')
MsgCategoryType.Station = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Station', tag='Station')
MsgCategoryType.Connections = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Connections', tag='Connections')
MsgCategoryType.System = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='System', tag='System')
MsgCategoryType.Misc = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Misc', tag='Misc')
MsgCategoryType.PriorTrains = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorTrains', tag='PriorTrains')
MsgCategoryType.PriorOther = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorOther', tag='PriorOther')
MsgCategoryType._InitializeFacetMap(MsgCategoryType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'MsgCategoryType', MsgCategoryType)
# Atomic simple type: {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}MsgSeverityType
class MsgSeverityType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""The severity of operator message"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgSeverityType')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 29, 1)
_Documentation = 'The severity of operator message'
MsgSeverityType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgSeverityType, enum_prefix=None)
MsgSeverityType.n0 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='0', tag='n0')
MsgSeverityType.n1 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='1', tag='n1')
MsgSeverityType.n2 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='2', tag='n2')
MsgSeverityType.n3 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='3', tag='n3')
MsgSeverityType._InitializeFacetMap(MsgSeverityType._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'MsgSeverityType', MsgSeverityType)
# Complex type [anonymous] with content type MIXED
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""The content of the message"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 58, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}p uses Python identifier p
__p = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'p'), 'p', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1p', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1), )
p = property(__p.value, __p.set, None, 'Defines an HTML paragraph')
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a
__a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), )
a = property(__a.value, __a.set, None, 'Defines an HTML anchor')
_ElementMap.update({
__p.name() : __p,
__a.name() : __a
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type MIXED
class CTD_ANON_ (pyxb.binding.basis.complexTypeDefinition):
"""Defines an HTML paragraph"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 88, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a
__a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON__httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), )
a = property(__a.value, __a.set, None, 'Defines an HTML anchor')
_ElementMap.update({
__a.name() : __a
})
_AttributeMap.update({
})
# Complex type [anonymous] with content type SIMPLE
class CTD_ANON_2 (pyxb.binding.basis.complexTypeDefinition):
"""Defines an HTML anchor"""
_TypeDefinition = pyxb.binding.datatypes.string
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 98, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.string
# Attribute href uses Python identifier href
__href = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'href'), 'href', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_2_href', pyxb.binding.datatypes.string, required=True)
__href._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5)
__href._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5)
href = property(__href.value, __href.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__href.name() : __href
})
# Complex type {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}StationMessage with content type ELEMENT_ONLY
class StationMessage (pyxb.binding.basis.complexTypeDefinition):
"""Darwin Workstation Station Message"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'StationMessage')
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 41, 1)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Station uses Python identifier Station
__Station = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Station'), 'Station', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Station', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3), )
Station = property(__Station.value, __Station.set, None, 'The Stations the message is being applied to')
# Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Msg uses Python identifier Msg
__Msg = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Msg'), 'Msg', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Msg', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3), )
Msg = property(__Msg.value, __Msg.set, None, 'The content of the message')
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_id', pyxb.binding.datatypes.int, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2)
__id._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2)
id = property(__id.value, __id.set, None, None)
# Attribute cat uses Python identifier cat
__cat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'cat'), 'cat', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_cat', MsgCategoryType, required=True)
__cat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2)
__cat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2)
cat = property(__cat.value, __cat.set, None, 'The category of message')
# Attribute sev uses Python identifier sev
__sev = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'sev'), 'sev', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_sev', MsgSeverityType, required=True)
__sev._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2)
__sev._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2)
sev = property(__sev.value, __sev.set, None, 'The severity of the message')
# Attribute suppress uses Python identifier suppress
__suppress = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'suppress'), 'suppress', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_suppress', pyxb.binding.datatypes.boolean, unicode_default='false')
__suppress._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2)
__suppress._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2)
suppress = property(__suppress.value, __suppress.set, None, 'Whether the train running information is suppressed to the public')
_ElementMap.update({
__Station.name() : __Station,
__Msg.name() : __Msg
})
_AttributeMap.update({
__id.name() : __id,
__cat.name() : __cat,
__sev.name() : __sev,
__suppress.name() : __suppress
})
Namespace.addCategoryObject('typeBinding', 'StationMessage', StationMessage)
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_3 (pyxb.binding.basis.complexTypeDefinition):
"""The Stations the message is being applied to"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 50, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute crs uses Python identifier crs
__crs = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'crs'), 'crs', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_3_crs', _ImportedBinding_darwinpush_xb_ct.CrsType, required=True)
__crs._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5)
__crs._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5)
crs = property(__crs.value, __crs.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__crs.name() : __crs
})
p = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1))
Namespace.addCategoryObject('elementBinding', p.name().localName(), p)
a = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1))
Namespace.addCategoryObject('elementBinding', a.name().localName(), a)
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, scope=CTD_ANON, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'p')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON_, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_._Automaton = _BuildAutomaton_()
StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Station'), CTD_ANON_3, scope=StationMessage, documentation='The Stations the message is being applied to', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3)))
StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Msg'), CTD_ANON, scope=StationMessage, documentation='The content of the message', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3))
counters.add(cc_0)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Station')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Msg')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
StationMessage._Automaton = _BuildAutomaton_2()
| HackTrain/darwinpush | darwinpush/xb/raw/sm.py | Python | apache-2.0 | 22,743 |
/*
* Copyright (c) 2020, Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef NRFX_CONFIG_NRF52820_H__
#define NRFX_CONFIG_NRF52820_H__
#ifndef NRFX_CONFIG_H__
#error "This file should not be included directly. Include nrfx_config.h instead."
#endif
// <<< Use Configuration Wizard in Context Menu >>>\n
// <h> nRF_Drivers
// <e> NRFX_CLOCK_ENABLED - nrfx_clock - CLOCK peripheral driver
//==========================================================
#ifndef NRFX_CLOCK_ENABLED
#define NRFX_CLOCK_ENABLED 0
#endif
// <o> NRFX_CLOCK_CONFIG_LF_SRC - LF Clock Source
// <0=> RC
// <1=> XTAL
// <2=> Synth
// <131073=> External Low Swing
// <196609=> External Full Swing
#ifndef NRFX_CLOCK_CONFIG_LF_SRC
#define NRFX_CLOCK_CONFIG_LF_SRC 1
#endif
// <q> NRFX_CLOCK_CONFIG_LF_CAL_ENABLED - Enables LF Clock Calibration Support
#ifndef NRFX_CLOCK_CONFIG_LF_CAL_ENABLED
#define NRFX_CLOCK_CONFIG_LF_CAL_ENABLED 0
#endif
// <q> NRFX_CLOCK_CONFIG_LFXO_TWO_STAGE_ENABLED - Enables two-stage LFXO start procedure
// <i> If set to a non-zero value, LFRC will be started before LFXO and corresponding
// <i> event will be generated. It means that CPU will be woken up when LFRC
// <i> oscillator starts, but user callback will be invoked only after LFXO
// <i> finally starts.
#ifndef NRFX_CLOCK_CONFIG_LFXO_TWO_STAGE_ENABLED
#define NRFX_CLOCK_CONFIG_LFXO_TWO_STAGE_ENABLED 0
#endif
// <o> NRFX_CLOCK_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_CLOCK_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_CLOCK_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_CLOCK_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_CLOCK_CONFIG_LOG_ENABLED
#define NRFX_CLOCK_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_CLOCK_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_CLOCK_CONFIG_LOG_LEVEL
#define NRFX_CLOCK_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_CLOCK_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_CLOCK_CONFIG_INFO_COLOR
#define NRFX_CLOCK_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_CLOCK_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_CLOCK_CONFIG_DEBUG_COLOR
#define NRFX_CLOCK_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_COMP_ENABLED - nrfx_comp - COMP peripheral driver
//==========================================================
#ifndef NRFX_COMP_ENABLED
#define NRFX_COMP_ENABLED 0
#endif
// <o> NRFX_COMP_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_COMP_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_COMP_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_COMP_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_COMP_CONFIG_LOG_ENABLED
#define NRFX_COMP_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_COMP_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_COMP_CONFIG_LOG_LEVEL
#define NRFX_COMP_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_COMP_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_COMP_CONFIG_INFO_COLOR
#define NRFX_COMP_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_COMP_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_COMP_CONFIG_DEBUG_COLOR
#define NRFX_COMP_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_EGU_ENABLED - nrfx_egu - EGU peripheral driver.
//==========================================================
#ifndef NRFX_EGU_ENABLED
#define NRFX_EGU_ENABLED 0
#endif
// <q> NRFX_EGU0_ENABLED - Enable EGU0 instance.
#ifndef NRFX_EGU0_ENABLED
#define NRFX_EGU0_ENABLED 0
#endif
// <q> NRFX_EGU1_ENABLED - Enable EGU1 instance.
#ifndef NRFX_EGU1_ENABLED
#define NRFX_EGU1_ENABLED 0
#endif
// <q> NRFX_EGU2_ENABLED - Enable EGU2 instance.
#ifndef NRFX_EGU2_ENABLED
#define NRFX_EGU2_ENABLED 0
#endif
// <q> NRFX_EGU3_ENABLED - Enable EGU3 instance.
#ifndef NRFX_EGU3_ENABLED
#define NRFX_EGU3_ENABLED 0
#endif
// <q> NRFX_EGU4_ENABLED - Enable EGU4 instance.
#ifndef NRFX_EGU4_ENABLED
#define NRFX_EGU4_ENABLED 0
#endif
// <q> NRFX_EGU5_ENABLED - Enable EGU5 instance.
#ifndef NRFX_EGU5_ENABLED
#define NRFX_EGU5_ENABLED 0
#endif
// <o> NRFX_EGU_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority.
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_EGU_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_EGU_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// </e>
// <e> NRFX_GPIOTE_ENABLED - nrfx_gpiote - GPIOTE peripheral driver
//==========================================================
#ifndef NRFX_GPIOTE_ENABLED
#define NRFX_GPIOTE_ENABLED 0
#endif
// <o> NRFX_GPIOTE_CONFIG_NUM_OF_LOW_POWER_EVENTS - Number of lower power input pins
#ifndef NRFX_GPIOTE_CONFIG_NUM_OF_LOW_POWER_EVENTS
#define NRFX_GPIOTE_CONFIG_NUM_OF_LOW_POWER_EVENTS 1
#endif
// <o> NRFX_GPIOTE_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_GPIOTE_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_GPIOTE_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_GPIOTE_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_GPIOTE_CONFIG_LOG_ENABLED
#define NRFX_GPIOTE_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_GPIOTE_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_GPIOTE_CONFIG_LOG_LEVEL
#define NRFX_GPIOTE_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_GPIOTE_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_GPIOTE_CONFIG_INFO_COLOR
#define NRFX_GPIOTE_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_GPIOTE_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_GPIOTE_CONFIG_DEBUG_COLOR
#define NRFX_GPIOTE_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_NVMC_ENABLED - nrfx_nvmc - NVMC peripheral driver
//==========================================================
#ifndef NRFX_NVMC_ENABLED
#define NRFX_NVMC_ENABLED 0
#endif
// </e>
// <e> NRFX_POWER_ENABLED - nrfx_power - POWER peripheral driver
//==========================================================
#ifndef NRFX_POWER_ENABLED
#define NRFX_POWER_ENABLED 0
#endif
// <o> NRFX_POWER_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_POWER_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_POWER_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// </e>
// <e> NRFX_PPI_ENABLED - nrfx_ppi - PPI peripheral allocator
//==========================================================
#ifndef NRFX_PPI_ENABLED
#define NRFX_PPI_ENABLED 0
#endif
// <e> NRFX_PPI_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_PPI_CONFIG_LOG_ENABLED
#define NRFX_PPI_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_PPI_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_PPI_CONFIG_LOG_LEVEL
#define NRFX_PPI_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_PPI_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_PPI_CONFIG_INFO_COLOR
#define NRFX_PPI_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_PPI_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_PPI_CONFIG_DEBUG_COLOR
#define NRFX_PPI_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_PRS_ENABLED - nrfx_prs - Peripheral Resource Sharing module
//==========================================================
#ifndef NRFX_PRS_ENABLED
#define NRFX_PRS_ENABLED 0
#endif
// <q> NRFX_PRS_BOX_0_ENABLED - Enables box 0 in the module.
#ifndef NRFX_PRS_BOX_0_ENABLED
#define NRFX_PRS_BOX_0_ENABLED 0
#endif
// <q> NRFX_PRS_BOX_1_ENABLED - Enables box 1 in the module.
#ifndef NRFX_PRS_BOX_1_ENABLED
#define NRFX_PRS_BOX_1_ENABLED 0
#endif
// <q> NRFX_PRS_BOX_2_ENABLED - Enables box 2 in the module.
#ifndef NRFX_PRS_BOX_2_ENABLED
#define NRFX_PRS_BOX_2_ENABLED 0
#endif
// <e> NRFX_PRS_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_PRS_CONFIG_LOG_ENABLED
#define NRFX_PRS_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_PRS_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_PRS_CONFIG_LOG_LEVEL
#define NRFX_PRS_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_PRS_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_PRS_CONFIG_INFO_COLOR
#define NRFX_PRS_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_PRS_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_PRS_CONFIG_DEBUG_COLOR
#define NRFX_PRS_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_QDEC_ENABLED - nrfx_qdec - QDEC peripheral driver
//==========================================================
#ifndef NRFX_QDEC_ENABLED
#define NRFX_QDEC_ENABLED 0
#endif
// <o> NRFX_QDEC_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_QDEC_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_QDEC_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_QDEC_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_QDEC_CONFIG_LOG_ENABLED
#define NRFX_QDEC_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_QDEC_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_QDEC_CONFIG_LOG_LEVEL
#define NRFX_QDEC_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_QDEC_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_QDEC_CONFIG_INFO_COLOR
#define NRFX_QDEC_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_QDEC_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_QDEC_CONFIG_DEBUG_COLOR
#define NRFX_QDEC_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_RNG_ENABLED - nrfx_rng - RNG peripheral driver
//==========================================================
#ifndef NRFX_RNG_ENABLED
#define NRFX_RNG_ENABLED 0
#endif
// <o> NRFX_RNG_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_RNG_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_RNG_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_RNG_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_RNG_CONFIG_LOG_ENABLED
#define NRFX_RNG_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_RNG_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_RNG_CONFIG_LOG_LEVEL
#define NRFX_RNG_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_RNG_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_RNG_CONFIG_INFO_COLOR
#define NRFX_RNG_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_RNG_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_RNG_CONFIG_DEBUG_COLOR
#define NRFX_RNG_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_RTC_ENABLED - nrfx_rtc - RTC peripheral driver
//==========================================================
#ifndef NRFX_RTC_ENABLED
#define NRFX_RTC_ENABLED 0
#endif
// <q> NRFX_RTC0_ENABLED - Enable RTC0 instance
#ifndef NRFX_RTC0_ENABLED
#define NRFX_RTC0_ENABLED 0
#endif
// <q> NRFX_RTC1_ENABLED - Enable RTC1 instance
#ifndef NRFX_RTC1_ENABLED
#define NRFX_RTC1_ENABLED 0
#endif
// <o> NRFX_RTC_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_RTC_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_RTC_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_RTC_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_RTC_CONFIG_LOG_ENABLED
#define NRFX_RTC_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_RTC_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_RTC_CONFIG_LOG_LEVEL
#define NRFX_RTC_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_RTC_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_RTC_CONFIG_INFO_COLOR
#define NRFX_RTC_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_RTC_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_RTC_CONFIG_DEBUG_COLOR
#define NRFX_RTC_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_SPIM_ENABLED - nrfx_spim - SPIM peripheral driver
//==========================================================
#ifndef NRFX_SPIM_ENABLED
#define NRFX_SPIM_ENABLED 0
#endif
// <q> NRFX_SPIM0_ENABLED - Enable SPIM0 instance
#ifndef NRFX_SPIM0_ENABLED
#define NRFX_SPIM0_ENABLED 0
#endif
// <q> NRFX_SPIM1_ENABLED - Enable SPIM1 instance
#ifndef NRFX_SPIM1_ENABLED
#define NRFX_SPIM1_ENABLED 0
#endif
// <o> NRFX_SPIM_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_SPIM_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_SPIM_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_SPIM_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_SPIM_CONFIG_LOG_ENABLED
#define NRFX_SPIM_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_SPIM_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_SPIM_CONFIG_LOG_LEVEL
#define NRFX_SPIM_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_SPIM_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPIM_CONFIG_INFO_COLOR
#define NRFX_SPIM_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_SPIM_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPIM_CONFIG_DEBUG_COLOR
#define NRFX_SPIM_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_SPIS_ENABLED - nrfx_spis - SPIS peripheral driver
//==========================================================
#ifndef NRFX_SPIS_ENABLED
#define NRFX_SPIS_ENABLED 0
#endif
// <q> NRFX_SPIS0_ENABLED - Enable SPIS0 instance
#ifndef NRFX_SPIS0_ENABLED
#define NRFX_SPIS0_ENABLED 0
#endif
// <q> NRFX_SPIS1_ENABLED - Enable SPIS1 instance
#ifndef NRFX_SPIS1_ENABLED
#define NRFX_SPIS1_ENABLED 0
#endif
// <o> NRFX_SPIS_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_SPIS_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_SPIS_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_SPIS_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_SPIS_CONFIG_LOG_ENABLED
#define NRFX_SPIS_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_SPIS_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_SPIS_CONFIG_LOG_LEVEL
#define NRFX_SPIS_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_SPIS_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPIS_CONFIG_INFO_COLOR
#define NRFX_SPIS_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_SPIS_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPIS_CONFIG_DEBUG_COLOR
#define NRFX_SPIS_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_SPI_ENABLED - nrfx_spi - SPI peripheral driver
//==========================================================
#ifndef NRFX_SPI_ENABLED
#define NRFX_SPI_ENABLED 0
#endif
// <q> NRFX_SPI0_ENABLED - Enable SPI0 instance
#ifndef NRFX_SPI0_ENABLED
#define NRFX_SPI0_ENABLED 0
#endif
// <q> NRFX_SPI1_ENABLED - Enable SPI1 instance
#ifndef NRFX_SPI1_ENABLED
#define NRFX_SPI1_ENABLED 0
#endif
// <o> NRFX_SPI_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_SPI_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_SPI_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_SPI_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_SPI_CONFIG_LOG_ENABLED
#define NRFX_SPI_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_SPI_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_SPI_CONFIG_LOG_LEVEL
#define NRFX_SPI_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_SPI_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPI_CONFIG_INFO_COLOR
#define NRFX_SPI_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_SPI_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_SPI_CONFIG_DEBUG_COLOR
#define NRFX_SPI_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <q> NRFX_SYSTICK_ENABLED - nrfx_systick - ARM(R) SysTick driver
#ifndef NRFX_SYSTICK_ENABLED
#define NRFX_SYSTICK_ENABLED 0
#endif
// <e> NRFX_TEMP_ENABLED - nrfx_temp - TEMP peripheral driver
//==========================================================
#ifndef NRFX_TEMP_ENABLED
#define NRFX_TEMP_ENABLED 0
#endif
// <o> NRFX_TEMP_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_TEMP_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_TEMP_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// </e>
// <e> NRFX_TIMER_ENABLED - nrfx_timer - TIMER periperal driver
//==========================================================
#ifndef NRFX_TIMER_ENABLED
#define NRFX_TIMER_ENABLED 0
#endif
// <q> NRFX_TIMER0_ENABLED - Enable TIMER0 instance
#ifndef NRFX_TIMER0_ENABLED
#define NRFX_TIMER0_ENABLED 0
#endif
// <q> NRFX_TIMER1_ENABLED - Enable TIMER1 instance
#ifndef NRFX_TIMER1_ENABLED
#define NRFX_TIMER1_ENABLED 0
#endif
// <q> NRFX_TIMER2_ENABLED - Enable TIMER2 instance
#ifndef NRFX_TIMER2_ENABLED
#define NRFX_TIMER2_ENABLED 0
#endif
// <q> NRFX_TIMER3_ENABLED - Enable TIMER3 instance
#ifndef NRFX_TIMER3_ENABLED
#define NRFX_TIMER3_ENABLED 0
#endif
// <o> NRFX_TIMER_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_TIMER_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_TIMER_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_TIMER_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_TIMER_CONFIG_LOG_ENABLED
#define NRFX_TIMER_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_TIMER_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_TIMER_CONFIG_LOG_LEVEL
#define NRFX_TIMER_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_TIMER_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TIMER_CONFIG_INFO_COLOR
#define NRFX_TIMER_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_TIMER_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TIMER_CONFIG_DEBUG_COLOR
#define NRFX_TIMER_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_TWIM_ENABLED - nrfx_twim - TWIM peripheral driver
//==========================================================
#ifndef NRFX_TWIM_ENABLED
#define NRFX_TWIM_ENABLED 0
#endif
// <q> NRFX_TWIM0_ENABLED - Enable TWIM0 instance
#ifndef NRFX_TWIM0_ENABLED
#define NRFX_TWIM0_ENABLED 0
#endif
// <q> NRFX_TWIM1_ENABLED - Enable TWIM1 instance
#ifndef NRFX_TWIM1_ENABLED
#define NRFX_TWIM1_ENABLED 0
#endif
// <o> NRFX_TWIM_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_TWIM_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_TWIM_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_TWIM_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_TWIM_CONFIG_LOG_ENABLED
#define NRFX_TWIM_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_TWIM_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_TWIM_CONFIG_LOG_LEVEL
#define NRFX_TWIM_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_TWIM_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWIM_CONFIG_INFO_COLOR
#define NRFX_TWIM_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_TWIM_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWIM_CONFIG_DEBUG_COLOR
#define NRFX_TWIM_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_TWIS_ENABLED - nrfx_twis - TWIS peripheral driver
//==========================================================
#ifndef NRFX_TWIS_ENABLED
#define NRFX_TWIS_ENABLED 0
#endif
// <q> NRFX_TWIS0_ENABLED - Enable TWIS0 instance
#ifndef NRFX_TWIS0_ENABLED
#define NRFX_TWIS0_ENABLED 0
#endif
// <q> NRFX_TWIS1_ENABLED - Enable TWIS1 instance
#ifndef NRFX_TWIS1_ENABLED
#define NRFX_TWIS1_ENABLED 0
#endif
// <q> NRFX_TWIS_ASSUME_INIT_AFTER_RESET_ONLY - Assume that any instance would be initialized only once
// <i> Optimization flag. Registers used by TWIS are shared by other peripherals. Normally, during initialization driver tries to clear all registers to known state before doing the initialization itself. This gives initialization safe procedure, no matter when it would be called. If you activate TWIS only once and do never uninitialize it - set this flag to 1 what gives more optimal code.
#ifndef NRFX_TWIS_ASSUME_INIT_AFTER_RESET_ONLY
#define NRFX_TWIS_ASSUME_INIT_AFTER_RESET_ONLY 0
#endif
// <q> NRFX_TWIS_NO_SYNC_MODE - Remove support for synchronous mode
// <i> Synchronous mode would be used in specific situations. And it uses some additional code and data memory to safely process state machine by polling it in status functions. If this functionality is not required it may be disabled to free some resources.
#ifndef NRFX_TWIS_NO_SYNC_MODE
#define NRFX_TWIS_NO_SYNC_MODE 0
#endif
// <o> NRFX_TWIS_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_TWIS_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_TWIS_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_TWIS_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_TWIS_CONFIG_LOG_ENABLED
#define NRFX_TWIS_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_TWIS_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_TWIS_CONFIG_LOG_LEVEL
#define NRFX_TWIS_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_TWIS_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWIS_CONFIG_INFO_COLOR
#define NRFX_TWIS_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_TWIS_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWIS_CONFIG_DEBUG_COLOR
#define NRFX_TWIS_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_TWI_ENABLED - nrfx_twi - TWI peripheral driver
//==========================================================
#ifndef NRFX_TWI_ENABLED
#define NRFX_TWI_ENABLED 0
#endif
// <q> NRFX_TWI0_ENABLED - Enable TWI0 instance
#ifndef NRFX_TWI0_ENABLED
#define NRFX_TWI0_ENABLED 0
#endif
// <q> NRFX_TWI1_ENABLED - Enable TWI1 instance
#ifndef NRFX_TWI1_ENABLED
#define NRFX_TWI1_ENABLED 0
#endif
// <o> NRFX_TWI_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_TWI_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_TWI_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_TWI_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_TWI_CONFIG_LOG_ENABLED
#define NRFX_TWI_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_TWI_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_TWI_CONFIG_LOG_LEVEL
#define NRFX_TWI_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_TWI_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWI_CONFIG_INFO_COLOR
#define NRFX_TWI_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_TWI_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_TWI_CONFIG_DEBUG_COLOR
#define NRFX_TWI_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_UARTE_ENABLED - nrfx_uarte - UARTE peripheral driver
//==========================================================
#ifndef NRFX_UARTE_ENABLED
#define NRFX_UARTE_ENABLED 0
#endif
// <q> NRFX_UARTE0_ENABLED - Enable UARTE0 instance
#ifndef NRFX_UARTE0_ENABLED
#define NRFX_UARTE0_ENABLED 0
#endif
// <o> NRFX_UARTE_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_UARTE_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_UARTE_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_UARTE_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_UARTE_CONFIG_LOG_ENABLED
#define NRFX_UARTE_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_UARTE_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_UARTE_CONFIG_LOG_LEVEL
#define NRFX_UARTE_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_UARTE_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_UARTE_CONFIG_INFO_COLOR
#define NRFX_UARTE_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_UARTE_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_UARTE_CONFIG_DEBUG_COLOR
#define NRFX_UARTE_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_UART_ENABLED - nrfx_uart - UART peripheral driver
//==========================================================
#ifndef NRFX_UART_ENABLED
#define NRFX_UART_ENABLED 0
#endif
// <q> NRFX_UART0_ENABLED - Enable UART0 instance
#ifndef NRFX_UART0_ENABLED
#define NRFX_UART0_ENABLED 0
#endif
// <o> NRFX_UART_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_UART_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_UART_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_UART_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_UART_CONFIG_LOG_ENABLED
#define NRFX_UART_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_UART_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_UART_CONFIG_LOG_LEVEL
#define NRFX_UART_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_UART_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_UART_CONFIG_INFO_COLOR
#define NRFX_UART_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_UART_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_UART_CONFIG_DEBUG_COLOR
#define NRFX_UART_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_USBD_ENABLED - nrfx_usbd - USBD peripheral driver
//==========================================================
#ifndef NRFX_USBD_ENABLED
#define NRFX_USBD_ENABLED 0
#endif
// <o> NRFX_USBD_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_USBD_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_USBD_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <q> USBD_CONFIG_DMASCHEDULER_ISO_BOOST - Give priority to isochronous transfers
// <i> This option gives priority to isochronous transfers.
// <i> Enabling it assures that isochronous transfers are always processed,
// <i> even if multiple other transfers are pending.
// <i> Isochronous endpoints are prioritized before the usbd_dma_scheduler_algorithm
// <i> function is called, so the option is independent of the algorithm chosen.
#ifndef NRFX_USBD_CONFIG_DMASCHEDULER_ISO_BOOST
#define NRFX_USBD_CONFIG_DMASCHEDULER_ISO_BOOST 1
#endif
// <q> USBD_CONFIG_ISO_IN_ZLP - Respond to an IN token on ISO IN endpoint with ZLP when no data is ready
// <i> If set, ISO IN endpoint will respond to an IN token with ZLP when no data is ready to be sent.
// <i> Else, there will be no response.
#ifndef NRFX_USBD_CONFIG_ISO_IN_ZLP
#define NRFX_USBD_CONFIG_ISO_IN_ZLP 0
#endif
// <e> NRFX_USBD_CONFIG_LOG_ENABLED - Enable logging in the module
//==========================================================
#ifndef NRFX_USBD_CONFIG_LOG_ENABLED
#define NRFX_USBD_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_USBD_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_USBD_CONFIG_LOG_LEVEL
#define NRFX_USBD_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_USBD_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_USBD_CONFIG_INFO_COLOR
#define NRFX_USBD_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_USBD_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_USBD_CONFIG_DEBUG_COLOR
#define NRFX_USBD_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// <e> NRFX_WDT_ENABLED - nrfx_wdt - WDT peripheral driver
//==========================================================
#ifndef NRFX_WDT_ENABLED
#define NRFX_WDT_ENABLED 0
#endif
// <q> NRFX_WDT0_ENABLED - Enable WDT0 instance
#ifndef NRFX_WDT0_ENABLED
#define NRFX_WDT0_ENABLED 0
#endif
// <o> NRFX_WDT_CONFIG_NO_IRQ - Remove WDT IRQ handling from WDT driver
// <0=> Include WDT IRQ handling
// <1=> Remove WDT IRQ handling
#ifndef NRFX_WDT_CONFIG_NO_IRQ
#define NRFX_WDT_CONFIG_NO_IRQ 0
#endif
// <o> NRFX_WDT_DEFAULT_CONFIG_IRQ_PRIORITY - Interrupt priority
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef NRFX_WDT_DEFAULT_CONFIG_IRQ_PRIORITY
#define NRFX_WDT_DEFAULT_CONFIG_IRQ_PRIORITY 7
#endif
// <e> NRFX_WDT_CONFIG_LOG_ENABLED - Enables logging in the module.
//==========================================================
#ifndef NRFX_WDT_CONFIG_LOG_ENABLED
#define NRFX_WDT_CONFIG_LOG_ENABLED 0
#endif
// <o> NRFX_WDT_CONFIG_LOG_LEVEL - Default Severity level
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#ifndef NRFX_WDT_CONFIG_LOG_LEVEL
#define NRFX_WDT_CONFIG_LOG_LEVEL 3
#endif
// <o> NRFX_WDT_CONFIG_INFO_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_WDT_CONFIG_INFO_COLOR
#define NRFX_WDT_CONFIG_INFO_COLOR 0
#endif
// <o> NRFX_WDT_CONFIG_DEBUG_COLOR - ANSI escape code prefix.
// <0=> Default
// <1=> Black
// <2=> Red
// <3=> Green
// <4=> Yellow
// <5=> Blue
// <6=> Magenta
// <7=> Cyan
// <8=> White
#ifndef NRFX_WDT_CONFIG_DEBUG_COLOR
#define NRFX_WDT_CONFIG_DEBUG_COLOR 0
#endif
// </e>
// </e>
// </h>
#endif // NRFX_CONFIG_NRF52820_H__
| Vudentz/zephyr | modules/hal_nordic/nrfx/nrfx_config_nrf52820.h | C | apache-2.0 | 35,566 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.waveprotocol.wave.client.editor.content.paragraph;
import static org.waveprotocol.wave.client.editor.Editor.ROOT_HANDLER_REGISTRY;
import com.google.gwt.dom.client.Element;
import junit.framework.TestCase;
import org.waveprotocol.wave.client.editor.Editor;
import org.waveprotocol.wave.client.editor.EditorTestingUtil;
import org.waveprotocol.wave.client.editor.content.CMutableDocument;
import org.waveprotocol.wave.client.editor.content.ContentDocElement;
import org.waveprotocol.wave.client.editor.content.ContentDocument;
import org.waveprotocol.wave.client.editor.content.ContentDocument.PermanentMutationHandler;
import org.waveprotocol.wave.client.editor.content.ContentElement;
import org.waveprotocol.wave.client.editor.content.ContentNode;
import org.waveprotocol.wave.client.editor.content.HasImplNodelets;
import org.waveprotocol.wave.client.editor.content.paragraph.OrderedListRenumberer.LevelNumbers;
import org.waveprotocol.wave.client.editor.content.paragraph.Paragraph.Alignment;
import org.waveprotocol.wave.client.editor.content.paragraph.Paragraph.Direction;
import org.waveprotocol.wave.client.scheduler.FinalTaskRunner;
import org.waveprotocol.wave.client.scheduler.Scheduler.Task;
import org.waveprotocol.wave.model.document.indexed.IndexedDocumentImpl;
import org.waveprotocol.wave.model.document.operation.Attributes;
import org.waveprotocol.wave.model.document.operation.impl.DocInitializationBuilder;
import org.waveprotocol.wave.model.document.util.Point;
import org.waveprotocol.wave.model.schema.conversation.ConversationSchemas;
import org.waveprotocol.wave.model.util.CollectionUtils;
import java.io.PrintStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
/**
* Utilities for testing ordered list numbering.
*
* A bunch of methods refer to lines by "index". This is index into the
* conceptual list of lines, so, 0 for the first line, 1 for the second line,
* and so forth.
*
* @author danilatos@google.com (Daniel Danilatos)
*/
public abstract class RenumbererTestBase extends TestCase {
/**
* Simple enum for representing a style of line, that maps to the type and
* li-style type attributes. Contains a representative sample of the types of
* lines that could possibly have different effects on renumbering.
*/
enum Type {
/** No attributes */
NONE,
/** t=h1 */
HEADING,
/** t=li without listyle */
LIST,
/** t=li with listyle = decimal */
DECIMAL // DECIMAL must come last
}
/**
* Fake renderer that doesn't depend on any DOM stuff.
*/
ParagraphHtmlRenderer renderer = new ParagraphHtmlRenderer() {
@Override
public Element createDomImpl(Renderable element) {
return null;
}
@Override
public void updateRendering(HasImplNodelets element, String type, String listStyle, int indent,
Alignment alignment, Direction direction) {
}
@Override
public void updateListValue(HasImplNodelets element, int value) {
assertEquals(Line.fromParagraph(((ContentElement) element)).getCachedNumberValue(), value);
}
};
/**
* Renumberer being tested.
*/
final OrderedListRenumberer renumberer = new OrderedListRenumberer(renderer);
/**
* Batch render task that will get scheduled.
*/
Task scheduledTask;
/**
* Simple fake take runner that just sets {@link #scheduledTask}
*/
final FinalTaskRunner runner = new FinalTaskRunner() {
@Override public void scheduleFinally(Task task) {
assertTrue(scheduledTask == null || scheduledTask == task);
scheduledTask = task;
}
};
/**
* Same as a regular ParagraphRenderer but tagged with
* {@link PermanentMutationHandler} so that it gets used even in POJO document mode.
*/
static class Renderer extends ParagraphRenderer implements PermanentMutationHandler {
Renderer(ParagraphHtmlRenderer htmlRenderer, OrderedListRenumberer renumberer,
FinalTaskRunner finalRaskRunner) {
super(htmlRenderer, renumberer, finalRaskRunner);
// TODO Auto-generated constructor stub
}
}
ContentDocument content1;
ContentDocument content2;
CMutableDocument doc1;
CMutableDocument doc2;
/**
* Current doc being used. For some tests we render more than one doc to test
* the sharing of a single renumberer between multiple documents.
*/
CMutableDocument doc;
/** Number of lines in test documents */
final int SIZE = 10;
@Override
protected void setUp() {
EditorTestingUtil.setupTestEnvironment();
ContentDocElement.register(ROOT_HANDLER_REGISTRY, ContentDocElement.DEFAULT_TAGNAME);
Paragraph.register(ROOT_HANDLER_REGISTRY);
LineRendering.registerLines(ROOT_HANDLER_REGISTRY);
LineRendering.registerParagraphRenderer(Editor.ROOT_HANDLER_REGISTRY,
new Renderer(renderer, renumberer, runner));
renumberer.updateHtmlEvenWhenNullImplNodelet = true;
DocInitializationBuilder builder = new DocInitializationBuilder();
builder.elementStart("body", Attributes.EMPTY_MAP);
for (int i = 0; i < SIZE; i++) {
builder.elementStart("line", Attributes.EMPTY_MAP).elementEnd();
}
builder.elementEnd();
content1 = new ContentDocument(ConversationSchemas.BLIP_SCHEMA_CONSTRAINTS);
content1.setRegistries(Editor.ROOT_REGISTRIES);
content1.consume(builder.build());
doc1 = content1.getMutableDoc();
content2 = new ContentDocument(ConversationSchemas.BLIP_SCHEMA_CONSTRAINTS);
content2.setRegistries(Editor.ROOT_REGISTRIES);
content2.consume(builder.build());
doc2 = content2.getMutableDoc();
doc = doc1;
runTask();
}
/**
* Performs a randomized test of renumbering logic.
*
* @param testIterations number of test iterations on the same document. Each
* iteration does a substantial amount of work (depending on document
* size).
* @param seed initial random seed.
*/
void doRandomTest(int testIterations, int seed) {
ContentDocument.performExpensiveChecks = false;
ContentDocument.validateLocalOps = false;
IndexedDocumentImpl.performValidation = false;
final int LEVELS = 4;
final int MAX_RUN = 3;
final int ITERS_PER_BATCH_RENDER = 6;
final int DECIMALS_TO_OTHERS = 4; // ratio of decimal bullets to other stuff
final int UPDATE_TO_ADD_REMOVE = 4; // ratio of updates to node adds/removals
assertNull(scheduledTask);
int maxRand = 5;
Random r = new Random(seed);
// For each iteration
for (int iter = 0; iter < testIterations; iter++) {
info("Iter: " + iter);
// Repeat several times for a single batch render, to make sure we are
// able to handle multiple overlapping, redundant updates.
// Times two because we are alternating between two documents to test
// the ability of the renumberer to handle more than one document
// correctly.
int innerIters = (r.nextInt(ITERS_PER_BATCH_RENDER) + 1) * 2;
for (int inner = 0; inner < innerIters; inner++) {
doc = doc1; // (inner % 2 == 0) ? doc1 : doc2;
int totalLines = (doc.size() - 2) / 2;
Line line = getFirstLine();
// Pick a random section of the document to perform a bunch of random
// changes to
int i = 0;
int a = r.nextInt(totalLines);
int b = r.nextInt(totalLines);
int startSection = Math.min(a, b);
int endSection = Math.max(a, b);
while (i < startSection) {
i++;
line = line.next();
}
while (i < endSection && line != null) {
// Pick a random indentation to set
int level = r.nextInt(LEVELS);
// Length of run of elements to update
int length;
// Whether we are making them numbered items or doing something else
boolean decimal;
if (r.nextInt(DECIMALS_TO_OTHERS) == 0) {
// No need making it a long run for non-numbered items.
length = r.nextInt(2);
decimal = false;
} else {
decimal = true;
length = r.nextInt(MAX_RUN - 1) + 1;
}
while (length > 0 && i < endSection && line != null) {
boolean fiftyFifty = i % 2 == 0;
// If we're numbering these lines, then DECIMAL, otherwise choose a
// random other type.
Type type = decimal ? Type.DECIMAL : Type.values()[r.nextInt(Type.values().length - 1)];
// Randomly decide to add/remove, or to update
if (r.nextInt(UPDATE_TO_ADD_REMOVE) == 0) {
int index = index(line);
// Randomly decide to add or remove.
// Include some constraints to ensure the document doesn't get too small or too large.
boolean add = index == 0 ||
totalLines < SIZE / 2 ? true : (totalLines > SIZE * 2 ? false : r.nextBoolean());
if (add) {
line = create(index, type, level, r.nextBoolean());
} else {
line = delete(index);
if (line == null) {
// We just deleted the last line.
continue;
}
}
assert line != null;
} else {
update(index(line), type, level, fiftyFifty);
}
length--;
i++;
line = line.next();
}
}
}
check(iter);
}
}
/**
* @return index for the given line object (0 for the first line, etc).
*/
int index(Line line) {
return (doc.getLocation(line.getLineElement()) - 1) / 2;
}
/**
* @return the line element for the given index.
*/
ContentElement getLineElement(int index) {
return doc.locate(index * 2 + 1).getNodeAfter().asElement();
}
/**
* @return the first line object
*/
Line getFirstLine() {
return Line.getFirstLineOfContainer(doc.getDocumentElement().getFirstChild().asElement());
}
/**
* Creates and returns a new line.
*
* @param createAndUpdateSeparately if true, creates a line, then sets the
* attributes as a separate operation. Otherwise, sets them all at
* once. We want to test both scenarios.
*/
Line create(int index, Type type, int indent, boolean createAndUpdateSeparately) {
// info("Creating @" + index + " " +
// type + " " + indent + " " + createAndUpdateSeparately);
Point<ContentNode> loc = doc.locate(index * 2 + 1);
Line l;
if (createAndUpdateSeparately) {
l = Line.fromLineElement(
doc.createElement(loc, "line", Attributes.EMPTY_MAP));
update(index, type, indent);
} else {
l = Line.fromLineElement(
doc.createElement(loc, "line", attributes(type, indent, false, true)));
}
assertNotNull(l);
return l;
}
/**
* Deletes the line at the specified index.
*/
Line delete(int index) {
// info("Deleting @" + index);
assert index != 0 : "Code doesn't (yet) support killing the initial line";
ContentElement e = getLineElement(index);
Line line = Line.fromLineElement(e).next();
doc.deleteNode(e);
return line;
}
/**
* Updates the attributes of the line at the specified index.
*/
void update(int index, Type type, int indent) {
update(index, type, indent, true);
}
/**
* Updates the attributes of the line at the specified index.
*
* @param alwaysSetRedundant if true, always set the listyle attribute even if it
* is not necessary. For example, if the listyle attribute was
* "decimal", but the type is "HEADING", the listyle attribute should
* normally be ignored and has no meaning. It won't make a difference
* if it is set or not. We want to test both scenarios.
*/
void update(int index, Type type, int indent, boolean alwaysSetRedundant) {
ContentElement e = getLineElement(index);
// info("Making @" + ((doc.getLocation(e) - 1)/2) + " " +
// type + " " + indent + " " + alwaysSetStyle);
Map<String, String> updates = attributes(type, indent, alwaysSetRedundant, false);
for (Map.Entry<String, String> pair : updates.entrySet()) {
doc.setElementAttribute(e, pair.getKey(), pair.getValue());
}
}
/**
* Creates the map of element attributes for the given parameters.
*
* @param alwaysSetStyle see {@link #update(int, Type, int, boolean)}
* @param noNulls eliminate keys that would have null values. We want nulls
* for updates, but no nulls for creates.
*/
Map<String, String> attributes(Type type, int indent, boolean alwaysSetStyle, boolean noNulls) {
Map<String, String> updates = new HashMap<String, String>();
String levelStr = (indent > 0 ? "" + indent : null);
maybePut(updates, Paragraph.INDENT_ATTR, levelStr, noNulls);
String t = null;
String lt = null;
switch (type) {
case HEADING: t = "h1"; break;
case LIST: t = Paragraph.LIST_TYPE; break;
case DECIMAL: t = Paragraph.LIST_TYPE; lt = Paragraph.LIST_STYLE_DECIMAL; break;
}
maybePut(updates, Paragraph.SUBTYPE_ATTR, t, noNulls);
if (alwaysSetStyle || type == Type.LIST || type == Type.DECIMAL) {
maybePut(updates, Paragraph.LIST_STYLE_ATTR, lt, noNulls);
}
return updates;
}
void maybePut(Map<String, String> map, String key, String val, boolean noNull) {
if (val != null || !noNull) {
map.put(key, val);
}
}
/**
* Check the current line numbering is consistent with the document state.
*/
void check() {
check(-1);
}
/**
* Check the current line numbering is consistent with the document state.
*
* @param iter current test iteration, for debugging/logging purposes.
*/
void check(int iter) {
runTask();
// if (iter >= 1740) {
// info("\n\nCHECKING\n");
// printInfo(null, "XX");
// info("---");
// }
LevelNumbers numbers = new LevelNumbers(0, 1);
Line line = getFirstLine();
while (line != null) {
int indent = line.getIndent();
numbers.setLevel(indent);
if (line.isDecimalListItem()) {
int num = numbers.getNumberAndIncrement();
assertFalse(line.getCachedNumberValue() == Line.DIRTY);
if (num != line.getCachedNumberValue()) {
String msg = "Expected: " + num + ", got: " + line.getCachedNumberValue();
printInfo(line, msg);
fail("Wrong number on iteration " + iter + ". " + msg +
". See stdout & stderr for debug details");
}
} else {
numbers.setNumber(1);
}
line = line.next();
}
// info("^^^");
}
void runTask() {
if (scheduledTask != null) {
scheduledTask.execute();
}
scheduledTask = null;
}
void printInfo(Line badLine, String msg) {
Line line = getFirstLine();
PrintStream stream = System.out;
int i = 0;
while (line != null) {
int indent = line.getIndent();
stream.println(
CollectionUtils.repeat('.', line.getIndent()) +
line.toString() +
" indent:" + indent +
CollectionUtils.repeat(' ', 20) + line.getCachedNumberValue() + " (" + i + ")");
if (line == badLine) {
stream.println("\n\n\n");
stream = System.err;
stream.println(msg);
stream.println(">>>>>>>>>>>>>>>>>>>>>>>>> DIED ON LINE ABOVE <<<<<<<<<<<<<<<<<<\n\n");
}
line = line.next();
i++;
}
}
void info(Object msg) {
// Uncomment for debugging
// System.out.println(msg == null ? "null" : msg.toString());
}
}
| vega113/incubator-wave | wave/src/test/java/org/waveprotocol/wave/client/editor/content/paragraph/RenumbererTestBase.java | Java | apache-2.0 | 16,472 |
FROM java:openjdk-8-jre
MAINTAINER Martijn Koster "mak-docker@greenhills.co.uk"
ENV SOLR_VERSION 5.2.1
ENV SOLR solr-$SOLR_VERSION
ENV SOLR_USER solr
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
apt-get -y install lsof && \
groupadd -r $SOLR_USER && \
useradd -r -g $SOLR_USER $SOLR_USER && \
mkdir -p /opt && \
wget -nv --output-document=/opt/$SOLR.tgz http://www.us.apache.org/dist/lucene/solr/$SOLR_VERSION/$SOLR.tgz && \
tar -C /opt --extract --file /opt/$SOLR.tgz && \
rm /opt/$SOLR.tgz && \
ln -s /opt/$SOLR /opt/solr && \
chown -R $SOLR_USER:$SOLR_USER /opt/solr /opt/$SOLR
EXPOSE 8983
WORKDIR /opt/solr
USER $SOLR_USER
CMD ["/bin/bash", "-c", "/opt/solr/bin/solr -f"]
| JStrittmatter/docker-solr | 5.2/Dockerfile | Dockerfile | apache-2.0 | 726 |
SELECT * FROM tbl1_ts;
\c db_ts;
\l+;
SELECT * FROM tbl2_ts;
select spcname from pg_tablespace where spcname='ts';
| Chibin/gpdb | gpMgmt/test/behave/mgmt_utils/steps/data/gptransfer_verify/filespace_tablespace_verify.sql | SQL | apache-2.0 | 119 |
from urlparse import urlparse
from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from tests.base import fake
from osf_tests.factories import (
ProjectFactory,
CommentFactory,
RegistrationFactory,
WithdrawnRegistrationFactory,
)
class TestWithdrawnRegistrations(NodeCRUDTestCase):
def setUp(self):
super(TestWithdrawnRegistrations, self).setUp()
self.registration = RegistrationFactory(creator=self.user, project=self.public_project)
self.withdrawn_registration = WithdrawnRegistrationFactory(registration=self.registration, user=self.registration.creator)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.withdrawn_url = '/{}registrations/{}/?version=2.2'.format(API_BASE, self.registration._id)
self.withdrawn_registration.justification = 'We made a major error.'
self.withdrawn_registration.save()
def test_can_access_withdrawn_contributors(self):
url = '/{}registrations/{}/contributors/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_access_withdrawn_children(self):
url = '/{}registrations/{}/children/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_comments(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_comment = CommentFactory(node=self.public_project, user=self.user)
url = '/{}registrations/{}/comments/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_can_access_withdrawn_contributor_detail(self):
url = '/{}registrations/{}/contributors/{}/'.format(API_BASE, self.registration._id, self.user._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint(self):
url = '/{}nodes/{}/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_delete_a_withdrawn_registration(self):
url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
self.registration.reload()
assert_equal(res.status_code, 405)
def test_cannot_access_withdrawn_files_list(self):
url = '/{}registrations/{}/files/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_detail(self):
url = '/{}registrations/{}/node_links/{}/'.format(API_BASE, self.registration._id, self.public_pointer._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_list(self):
url = '/{}registrations/{}/node_links/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_logs(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
url = '/{}registrations/{}/logs/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_registrations_list(self):
self.registration.save()
url = '/{}registrations/{}/registrations/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_withdrawn_registrations_display_limited_fields(self):
registration = self.registration
res = self.app.get(self.withdrawn_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
registration.reload()
expected_attributes = {
'title': registration.title,
'description': registration.description,
'date_created': registration.date_created.isoformat().replace('+00:00', 'Z'),
'date_registered': registration.registered_date.isoformat().replace('+00:00', 'Z'),
'date_modified': registration.date_modified.isoformat().replace('+00:00', 'Z'),
'withdrawal_justification': registration.retraction.justification,
'public': None,
'category': None,
'registration': True,
'fork': None,
'collection': None,
'tags': None,
'withdrawn': True,
'pending_withdrawal': None,
'pending_registration_approval': None,
'pending_embargo_approval': None,
'embargo_end_date': None,
'registered_meta': None,
'current_user_permissions': None,
'registration_supplement': registration.registered_schema.first().name
}
for attribute in expected_attributes:
assert_equal(expected_attributes[attribute], attributes[attribute])
contributors = urlparse(res.json['data']['relationships']['contributors']['links']['related']['href']).path
assert_equal(contributors, '/{}registrations/{}/contributors/'.format(API_BASE, registration._id))
assert_not_in('children', res.json['data']['relationships'])
assert_not_in('comments', res.json['data']['relationships'])
assert_not_in('node_links', res.json['data']['relationships'])
assert_not_in('registrations', res.json['data']['relationships'])
assert_not_in('parent', res.json['data']['relationships'])
assert_not_in('forked_from', res.json['data']['relationships'])
assert_not_in('files', res.json['data']['relationships'])
assert_not_in('logs', res.json['data']['relationships'])
assert_not_in('registered_by', res.json['data']['relationships'])
assert_not_in('registered_from', res.json['data']['relationships'])
assert_not_in('root', res.json['data']['relationships'])
def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_not_in('children', res.json['data']['relationships'])
assert_in('contributors', res.json['data']['relationships'])
def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['relationships']['contributors']['links']['related']['meta']['count'], 1)
| monikagrabowska/osf.io | api_tests/registrations/views/test_withdrawn_registrations.py | Python | apache-2.0 | 7,865 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.server.initialization.jetty;
import com.google.inject.Binder;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.Module;
import com.google.inject.Provides;
import com.metamx.common.lifecycle.Lifecycle;
import com.metamx.common.logger.Logger;
import io.druid.guice.JsonConfigProvider;
import io.druid.guice.LazySingleton;
import io.druid.guice.LifecycleModule;
import io.druid.guice.annotations.RemoteChatHandler;
import io.druid.guice.annotations.Self;
import io.druid.server.DruidNode;
import io.druid.server.initialization.ServerConfig;
import org.eclipse.jetty.server.Server;
import java.util.Properties;
/**
*/
public class ChatHandlerServerModule implements Module
{
private static final Logger log = new Logger(ChatHandlerServerModule.class);
@Inject
private Properties properties;
@Override
public void configure(Binder binder)
{
/** If "druid.indexer.task.chathandler.port" property is set then we assume that a
* separate Jetty Server with it's own {@link ServerConfig} is required for ingestion apart from the query server
* otherwise we bind {@link DruidNode} annotated with {@link RemoteChatHandler} to {@literal @}{@link Self} {@link DruidNode}
* so that same Jetty Server is used for querying as well as ingestion
*/
if (properties.containsKey("druid.indexer.task.chathandler.port")) {
log.info("Spawning separate ingestion server at port [%s]", properties.get("druid.indexer.task.chathandler.port"));
JsonConfigProvider.bind(binder, "druid.indexer.task.chathandler", DruidNode.class, RemoteChatHandler.class);
JsonConfigProvider.bind(binder, "druid.indexer.server.chathandler.http", ServerConfig.class, RemoteChatHandler.class);
LifecycleModule.register(binder, Server.class, RemoteChatHandler.class);
} else {
binder.bind(DruidNode.class).annotatedWith(RemoteChatHandler.class).to(Key.get(DruidNode.class, Self.class));
binder.bind(ServerConfig.class).annotatedWith(RemoteChatHandler.class).to(Key.get(ServerConfig.class));
}
}
@Provides
@LazySingleton
@RemoteChatHandler
public Server getServer(Injector injector, Lifecycle lifecycle, @RemoteChatHandler DruidNode node, @RemoteChatHandler ServerConfig config)
{
final Server server = JettyServerModule.makeJettyServer(node, config);
JettyServerModule.initializeServer(injector, lifecycle, server);
return server;
}
}
| fjy/druid | server/src/main/java/io/druid/server/initialization/jetty/ChatHandlerServerModule.java | Java | apache-2.0 | 3,297 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.cache.lucene;
import static org.apache.geode.cache.lucene.test.LuceneTestUtilities.INDEX_NAME;
import static org.apache.geode.cache.lucene.test.LuceneTestUtilities.REGION_NAME;
import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.List;
import java.util.stream.IntStream;
import junitparams.JUnitParamsRunner;
import junitparams.Parameters;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.Region;
import org.apache.geode.internal.cache.GemFireCacheImpl;
import org.apache.geode.internal.cache.PartitionedRegion;
import org.apache.geode.internal.cache.control.HeapMemoryMonitor;
import org.apache.geode.test.dunit.SerializableRunnableIF;
import org.apache.geode.test.junit.categories.LuceneTest;
@Category({LuceneTest.class})
@RunWith(JUnitParamsRunner.class)
public class EvictionDUnitTest extends LuceneQueriesAccessorBase {
protected static final float INITIAL_EVICTION_HEAP_PERCENTAGE = 50.9f;
protected static final float EVICTION_HEAP_PERCENTAGE_FAKE_NOTIFICATION = 85.0f;
protected static final int TEST_MAX_MEMORY = 100;
protected static final int MEMORY_USED_FAKE_NOTIFICATION = 90;
protected RegionTestableType[] getPartitionRedundantOverflowEvictionRegionType() {
return new RegionTestableType[] {
RegionTestableType.PARTITION_PERSISTENT_REDUNDANT_EVICTION_OVERFLOW};
}
protected RegionTestableType[] getPartitionRedundantLocalDestroyEvictionRegionType() {
return new RegionTestableType[] {RegionTestableType.PARTITION_REDUNDANT_EVICTION_LOCAL_DESTROY,
RegionTestableType.PARTITION_REDUNDANT_PERSISTENT_EVICTION_LOCAL_DESTROY,
RegionTestableType.PARTITION_EVICTION_LOCAL_DESTROY,
RegionTestableType.PARTITION_PERSISTENT_EVICTION_LOCAL_DESTROY};
}
@Test
@Parameters(method = "getPartitionRedundantLocalDestroyEvictionRegionType")
public void regionWithEvictionWithLocalDestroyMustNotbeAbleToCreateLuceneIndexes(
RegionTestableType regionTestType) {
SerializableRunnableIF createIndex = getSerializableRunnableIFCreateIndex();
dataStore1.invoke(() -> {
try {
initDataStore(createIndex, regionTestType);
} catch (UnsupportedOperationException e) {
assertEquals(
"Lucene indexes on regions with eviction and action local destroy are not supported",
e.getMessage());
assertNull(getCache().getRegion(REGION_NAME));
}
});
}
private SerializableRunnableIF getSerializableRunnableIFCreateIndex() {
return () -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
luceneService.createIndexFactory().setFields("text").create(INDEX_NAME, REGION_NAME);
};
}
@Test
@Parameters(method = "getPartitionRedundantOverflowEvictionRegionType")
public void regionsWithEvictionWithOverflowMustBeAbleToCreateLuceneIndexes(
RegionTestableType regionTestType) {
SerializableRunnableIF createIndex = () -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
luceneService.createIndexFactory().setFields("text").create(INDEX_NAME, REGION_NAME);
};
dataStore1.invoke(() -> initDataStore(createIndex, regionTestType));
accessor.invoke(() -> initDataStore(createIndex, regionTestType));
accessor.invoke(() -> {
Cache cache = getCache();
Region region = cache.getRegion(REGION_NAME);
IntStream.range(0, NUM_BUCKETS).forEach(i -> region.put(i, new TestObject("hello world")));
});
waitForFlushBeforeExecuteTextSearch(accessor, 60000);
dataStore1.invoke(() -> {
try {
getCache().getResourceManager().setEvictionHeapPercentage(INITIAL_EVICTION_HEAP_PERCENTAGE);
final PartitionedRegion partitionedRegion = (PartitionedRegion) getRootRegion(REGION_NAME);
raiseFakeNotification();
await().untilAsserted(() -> {
assertTrue(partitionedRegion.getDiskRegionStats().getNumOverflowOnDisk() > 0);
});
} finally {
cleanUpAfterFakeNotification();
}
});
accessor.invoke(() -> {
LuceneService luceneService = LuceneServiceProvider.get(getCache());
LuceneQuery<Integer, TestObject> query = luceneService.createLuceneQueryFactory()
.setLimit(100).create(INDEX_NAME, REGION_NAME, "world", "text");
List<LuceneResultStruct<Integer, TestObject>> resultList = query.findResults();
assertEquals(NUM_BUCKETS, resultList.size());
});
}
protected void raiseFakeNotification() {
((GemFireCacheImpl) getCache()).getHeapEvictor().setTestAbortAfterLoopCount(1);
HeapMemoryMonitor.setTestDisableMemoryUpdates(true);
getCache().getResourceManager()
.setEvictionHeapPercentage(EVICTION_HEAP_PERCENTAGE_FAKE_NOTIFICATION);
HeapMemoryMonitor heapMemoryMonitor =
((GemFireCacheImpl) getCache()).getInternalResourceManager().getHeapMonitor();
heapMemoryMonitor.setTestMaxMemoryBytes(TEST_MAX_MEMORY);
heapMemoryMonitor.updateStateAndSendEvent(MEMORY_USED_FAKE_NOTIFICATION, "test");
}
protected void cleanUpAfterFakeNotification() {
((GemFireCacheImpl) getCache()).getHeapEvictor().setTestAbortAfterLoopCount(Integer.MAX_VALUE);
HeapMemoryMonitor.setTestDisableMemoryUpdates(false);
}
}
| masaki-yamakawa/geode | geode-lucene/src/distributedTest/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java | Java | apache-2.0 | 6,321 |
{#
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#}
{% extends 'appbuilder/init.html' %}
{% import 'appbuilder/baselib.html' as baselib %}
{% block head_css %}
{{ super() }}
<link href="{{ url_for_asset('main.css') }}" rel="stylesheet">
{% if not appbuilder.app_theme %}
{# airflowDefaultTheme.css file contains the styles from local bootstrap-theme.css #}
<link href="{{ url_for_asset('airflowDefaultTheme.css') }}" rel="stylesheet">
{% endif %}
<link rel="icon" type="image/png" href="{{ url_for('static', filename='pin_30.png') }}">
{% endblock %}
{% block body %}
{% include 'appbuilder/general/confirm.html' %}
{% include 'appbuilder/general/alert.html' %}
{% block navbar %}
<header class="top" role="header">
{% include 'appbuilder/navbar.html' %}
</header>
{% endblock %}
<div class="container">
<div class="row">
{% block messages %}
{% include 'appbuilder/flash.html' %}
{% endblock %}
{% block content %}
{% endblock %}
</div>
</div>
{% block footer %}
<footer>
<div class="img-rounded nav-fixed-bottom">
<div class="container">
{% include 'appbuilder/footer.html' %}
</div>
</div>
</footer>
{% endblock %}
{% endblock %}
{% block tail_js %}
{{ super() }}
<script type="text/javascript">
// below variables are used in clock.js
var hostName = '{{ hostname }}';
var csrfToken = '{{ csrf_token() }}';
</script>
<script src="{{ url_for_asset('clock.js') }}" type="text/javascript"></script>
{% endblock %}
| edgarRd/incubator-airflow | airflow/www_rbac/templates/appbuilder/baselayout.html | HTML | apache-2.0 | 2,373 |
if (!this["output"]) output = print;
var passed = 0;
var failed = 0;
function is(got, expected, msg) {
if (got == expected) {
output("OK: " + msg);
++passed;
} else {
output("FAIL: " + msg);
output("Expected |" + expected + "|");
output(" Got |" + got + "|");
++failed;
}
}
function complete() {
output("\nTests Complete");
output("--------------");
output("Passed: " + passed);
output("Failed: " + failed);
output("\n");
}
//
// test our internal functions
//
function test_isObjectOrArray() {
is(isObjectOrArray({}), true, "isObjectOrArray detects an object");
is(isObjectOrArray([]), true, "isObjectOrArray detects an array");
is(isObjectOrArray("foo"), false, "isObjectOrArray shouldn't detect string");
is(isObjectOrArray(4), false, "isObjectOrArray shouldn't detect integer");
is(isObjectOrArray(5.5), false, "isObjectOrArray shouldn't detect float");
is(isObjectOrArray(undefined), false,
"isObjectOrArray shouldn't detect undefined");
is(isObjectOrArray(null), false, "isObjectOrArray shouldn't detect null");
}
function test_identifySuspects() {
var suspects = identifySuspects({"a": 1, "b": 2, "c": 3, "d": "hmm"},
{"a": 1, "b": 2, "c": 3, "d": "hmm"});
is(suspects.length, 0, "shouldn't be any suspects for matching primitives");
suspects = identifySuspects({"a": 1, "b": 2, "c": 3, "d": "hmm"},
{"a": 1, "b": 3, "c": 3, "d": "hmm"});
is(suspects.length, 1, "should detect edited primitives");
suspects = identifySuspects({"a": 1, "b": {}, "c": 3, "d": "hmm"},
{"a": 1, "b": {}, "c": 3, "d": "hmm"});
is(suspects.length, 1, "should detect matching objects");
suspects = identifySuspects({"a": 1, "b": 2, "c": 3, "d": "hmm"},
{"a": "1", "b": 2, "c": 3, "d": "hmm"});
is(suspects.length, 1, "should detect primitive type change");
suspects = identifySuspects({"a": 1, "b": 2, "c": 3, "d": "hmmm"},
{"a": 1, "b": 2, "c": 3, "d": "hmm"});
is(suspects.length, 1, "should detect string edit");
suspects = identifySuspects({"xxx": 1, "b": 2, "c": 3, "d": "hmm"},
{"yyy": 1, "b": 2, "c": 3, "d": "hmm"});
is(suspects.length, 2, "should detect differing keys");
suspects = identifySuspects({"0": 1, "b": 2, "c": 3, "d": "hmm"},
{0: 1, "b": 2, "c": 3, "d": "hmm"});
is(suspects.length, 0, "should not detect key type changes");
}
function test_created() {
var record = created(["foo"], {});
is(record.length, 1, "created empty object is length 1");
record = created(["foo"], 1);
is(record.length, 1, "created primitive is length 1");
record = created(["foo"], {"bar":"baz"});
is(record.length, 2, "created populated object is length 2");
record = created(["foo"], {"bar":"baz", "qux":"baz"});
is(record.length, 3, "created populated object is length 3");
is(record[0].action, "create", "create action is correct");
is(record[0].path.length, 1, "creation paths in preorder");
is(record[1].path.length, 2, "creation paths in preorder");
is(record[1].value, "baz", "create has correct value");
is(record[2].path.length, 2, "creation paths in preorder");
is(record[2].value, "baz", "create has correct value");
}
function test_removed() {
var record = removed(["foo"], {});
is(record.length, 1, "removed empty object is length 1");
record = removed(["foo"], 1);
is(record.length, 1, "removed primitive is length 1");
record = removed(["foo"], {"bar":"baz"});
is(record.length, 2, "removed populated object is length 2");
record = removed(["foo"], {"bar":"baz", "qux":"baz"});
is(record.length, 3, "removed populated object is length 3");
is(record[0].action, "remove", "remove action is correct");
is(record[0].path.length, 2, "removal paths in postorder");
is(record[1].path.length, 2, "removal paths in postorder");
is(record[2].path.length, 1, "removal paths in postorder");
}
function test_edited() {
var record = edited(["foo"], 5, 3);
is(record.length, 1, "primitive edit is length 1");
is(record[0].action, "edit", "edit action is correct");
is(record[0].value, 3, "edit has correct value");
is(record[0].path.length, 1, "edit path is correct");
record = edited(["foo"], {"bar": "baz"}, 3);
is(record.length, 2, "obj2primitive contains removals");
is(record[0].action, "edit", "edits precede removals");
is(record[1].action, "remove", "remove action is there");
record = edited(["foo"], 3, {"bar": "baz"});
is(record.length, 2, "primitive2object contains creations");
is(record[0].action, "edit", "edits precede creations");
is(record[1].action, "create", "create action is there");
};
// A snapshot, followed by four non-conflicting replicas
var snap = { "foo": 1, "bar": 1, "baz": 1, "qux": 1 }
var replica1 = { "foo": 0, "bar": 1, "baz": 1, "qux": 1 }
var replica2 = { "foo": 1, "bar": 0, "baz": 1, "qux": 1 }
var replica3 = { "foo": 1, "bar": 1, "baz": 0, "qux": 1 }
var replica4 = { "foo": 1, "bar": 1, "baz": 1, "qux": 0 }
function test_detectUpdates() {
function checkReplica(name, replica) {
var updateList = detectUpdates(snap, replica);
is(updateList.length, 1, name + " has correct number of updates");
is(updateList[0].action, "edit", name + " should have an edit");
is(updateList[0].value, 0, name + " should have value 0");
}
checkReplica("replica1", replica1);
checkReplica("replica2", replica2);
checkReplica("replica3", replica3);
checkReplica("replica4", replica4);
}
function test_orderUpdates() {
}
function test_Command() {
var x = new Command("edit", ["foo"], 5);
var y = new Command("edit", ["foo"], 5);
is(x instanceof Command, true, "instanceof");
is(x.equals(y), true, "equals method of Command works");
x.value = "5";
y.value = 5;
is(x.equals(y), false, "equals method of Command detects type changes");
x.value = {};
y.value = {};
is(x.equals(y), true, "equals method of Command matches {} values");
x.value = [];
y.value = [];
is(x.equals(y), true, "equals method of Command matches [] values");
x.value = "5";
y.value = [];
is(x.equals(y), false, "equals method of Command detects obj vs. primitive");
var z = new Command("edit", ["foo","bar"], 5);
is(x.isParentOf(z), true, "check for parents");
}
function test_commandInList() {
var x = new Command("edit", ["foo"], 5);
var y = new Command("remove", ["bar"]);
var commandList = [new Command("edit", ["foo"], 5),
new Command("remove", ["bar"])];
is(commandInList(x, commandList), true, "commandInList matches identical");
is(commandInList(y, commandList), true, "commandInList matches removes");
is(commandInList(new Command("edit", ["foo"], "bar"),
[new Command("edit", ["foo"], "bar")]),
true, "edits match");
is(commandInList(new Command("edit", ["foo"], 6), commandList), false,
"commandInList fails differing values");
}
function test_doesConflict() {
var x = new Command("edit", ["foo"], 1);
var y = new Command("edit", ["foo"], 2);
is(doesConflict(x, y), true, "doesConflict finds identical paths with different values");
y.path = ["bar"];
is(doesConflict(x, y), false, "doesConflict ignores mismatched paths");
var a = new Command("remove", ["foo"]);
var b = new Command("edit", ["foo","bar"], 42);
is(doesConflict(a, b), true, "doesConflict catches edit under remove");
is(doesConflict(b, a), true, "doesConflict catches edit under remove");
}
function test_applyCommand() {
var c = new Command("edit", ["foo"], "bar");
var target = {foo: "qux"};
applyCommand(target, c);
is(target.foo, "bar", "applying edit commands works");
}
function test_reconcileWithNoConflicts() {
var syncdata = reconcile([detectUpdates(snap, replica1),
detectUpdates(snap, replica2),
detectUpdates(snap, replica3),
detectUpdates(snap, replica4)]);
is(syncdata.propagations.length, 4, "correct number of propogation arrays");
is(syncdata.propagations[0].length, 3, "correct number of commands to exec");
is(syncdata.propagations[0][0].action, "edit", "is it an edit?");
applyCommands(replica1, syncdata.propagations[0]);
applyCommands(replica2, syncdata.propagations[1]);
applyCommands(replica3, syncdata.propagations[2]);
applyCommands(replica4, syncdata.propagations[3]);
forEach([replica1, replica2, replica3, replica4],
function (replica) {
is(replica.foo, 0, "replica.foo is zero");
is(replica.baz, 0, "replica.bar is zero");
is(replica.bar, 0, "replica.baz is zero");
is(replica.qux, 0, "replica.qux is zero");
}
);
}
function pathToArray(path) {
return path == "/" ? [] : path.split("/").slice(1);
}
function commandFromArray(key, array) {
var c = array.filter(
function(c) {
return ("/" + c.path.join("/")) == key;
}
);
return c[0];
}
function checkUpdate(list, path, expectAction, expectValue) {
var x = commandFromArray(path, list);
is(x.action, expectAction, path + " action");
if (isObjectOrArray(expectValue))
is(x.value.constructor, expectValue.constructor, path + " value");
else
is(x.value, expectValue, path + " value");
is(arrayEqual(x.path, pathToArray(path)), true, path + " path");
}
function checkSync(obj1, obj2, path, value) {
field1 = pathToReference(obj1, pathToArray(path));
field2 = pathToReference(obj2, pathToArray(path));
is(field1, field2, path + " in sync");
is(field1, value, path + " correct value");
}
// the README examples
var snapshotJSON =
{
"x": 42,
"a": 1,
"b":
{
"c": 2,
"d":
{
"e": 3,
"f": 4
},
"g": 5
},
"h": 6.6,
"i": [7, 8, 9],
"j": 10,
"k": { "m": 11 },
"n": 66,
}
var currentJSON =
{
"x": 43, /* edited */
"a": 1,
"new": 11, /* created */
"b":
{
"c": 2,
"new2": 22, /* created */
"d":
{
"e": 3,
/*"f": 4*/ /* removed */
},
"g": 55, /* edited */
},
/* "h": 6.6, */ /* removed */
"i": [7, 8, 9, 99], /* added array element */
"j": 10,
"k": 42, /* replaced object with primitive */
"n": { "new3": 77 }, /* replaced primitive with object */
}
function test_complexReconcileWithNoConflicts() {
var updates = detectUpdates(snapshotJSON, currentJSON);
is(updates.length, 11, "detect correct number of updates");
checkUpdate(updates, "/x", "edit", 43);
checkUpdate(updates, "/new", "create", 11);
checkUpdate(updates, "/b/new2", "create", 22);
checkUpdate(updates, "/b/d/f", "remove", undefined);
checkUpdate(updates, "/b/g", "edit", 55);
checkUpdate(updates, "/h", "remove", undefined);
checkUpdate(updates, "/i/3", "create", 99);
checkUpdate(updates, "/k/m", "remove", undefined);
checkUpdate(updates, "/k", "edit", 42);
checkUpdate(updates, "/n", "edit", {});
checkUpdate(updates, "/n/new3", "create", 77);
// now we check against an object that contains edits to other
// fields, or identical edits.
var otherJSON =
{
"x": 43, /* edited to the same value */
"a": 100, /* non-conflicting edit */
"new": 11, /* created to the same value */
"b":
{
/*"c": 2,*/ /* non-conflicting remove */
"d":
{
"e": 3,
/*"f": 4*/ /* removed same value */
},
"g": 5, /* didn't edit */
"foo": 555 /* non-conflicting create */
},
"h": 6.6, /* didn't remove */
"i": [7, 8, 9, 99], /* added same array element */
"j": 10,
"k": { "m": 11 }, /* didn't touch */
"n": 66 /* didn't touch */
}
var otherUpdates = detectUpdates(snapshotJSON, otherJSON);
checkUpdate(otherUpdates, "/x", "edit", 43);
checkUpdate(otherUpdates, "/a", "edit", 100);
checkUpdate(otherUpdates, "/b/c", "remove", undefined);
checkUpdate(otherUpdates, "/b/d/f", "remove", undefined);
checkUpdate(otherUpdates, "/i/3", "create", 99);
var syncdata = reconcile([updates, otherUpdates]);
applyCommands(currentJSON, syncdata.propagations[0]);
applyCommands(otherJSON, syncdata.propagations[1]);
checkSync(currentJSON, otherJSON, "/x", 43);
checkSync(currentJSON, otherJSON, "/a", 100);
checkSync(currentJSON, otherJSON, "/new", 11);
checkSync(currentJSON, otherJSON, "/b/c", undefined);
checkSync(currentJSON, otherJSON, "/b/new2", 22);
checkSync(currentJSON, otherJSON, "/b/d/f", undefined);
checkSync(currentJSON, otherJSON, "/b/g", 55);
checkSync(currentJSON, otherJSON, "/b/foo", 555);
checkSync(currentJSON, otherJSON, "/h", undefined);
checkSync(currentJSON, otherJSON, "/i/3", 99);
checkSync(currentJSON, otherJSON, "/k", 42);
checkSync(currentJSON, otherJSON, "/n/new3", 77);
checkSync(currentJSON, otherJSON, "/j", 10);
}
function test_repeatedSyncsWithNoConflicts() {
var originalJSON = {"foo": {"bar": "baz"}, "toBeRemoved":"goner",
"someArray":["tobeEdited"]};
var clientJSON = {"foo": {"bar": "baz"}, "toBeRemoved":"goner",
"someArray":["tobeEdited"]};
var serverJSON = {"foo": {"bar": "baz"}, "toBeRemoved":"goner",
"someArray":["tobeEdited"]};
clientJSON["foo"]["clientAddition"] = "the client added this";
serverJSON["foo"]["serverAddition"] = "the server added this";
delete clientJSON["toBeRemoved"];
delete serverJSON["toBeRemoved"];
clientJSON["someArray"][0] = "been edited";
serverJSON["someArray"][0] = "been edited";
var syncdata = reconcile([detectUpdates(originalJSON, clientJSON),
detectUpdates(originalJSON, serverJSON)]);
applyCommands(clientJSON, syncdata.propagations[0]);
applyCommands(serverJSON, syncdata.propagations[1]);
is(clientJSON["foo"]["bar"] == serverJSON["foo"]["bar"],
true, "unchanged fields remain");
is(serverJSON["foo"]["clientAddition"], clientJSON["foo"]["clientAddition"],
"server has client addition");
is(clientJSON["foo"]["serverAddition"], serverJSON["foo"]["serverAddition"],
"client has server addition");
is(clientJSON["toBeRemoved"] == undefined, true, "removed from client");
is(serverJSON["toBeRemoved"] == undefined, true, "removed from server");
is(clientJSON["someArray"][0] == serverJSON["someArray"][0], true,
"identically edited array ok");
/* now all the fields are the same */
originalJSON = { "foo": {"bar":"baz",
"clientAddition":"the client added this",
"serverAddition":"the server added this"},
"someArray":["been edited"]} ;
clientJSON["someArray"][0] = "edited again";
serverJSON["foo"]["bar"] = "edit some other field";
syncdata = reconcile([detectUpdates(originalJSON, clientJSON),
detectUpdates(originalJSON, serverJSON)]);
applyCommands(clientJSON, syncdata.propagations[0]);
applyCommands(serverJSON, syncdata.propagations[1]);
is(serverJSON["someArray"][0], "edited again", "repeated edit works");
is(serverJSON["foo"]["bar"], "edit some other field", "repeated edit works");
}
function test_conflictsFromReplica() {
var conflictList = conflictsFromReplica(new Command("edit", ["f"], "b"),
[new Command("edit", ["f"], "b")]);
is(conflictList.conflicts.length, 0, "identical commands don't conflict");
}
function test_basicConflicts() {
// conflicting edits
var snap = {"foo":"bar"}
var clientJSON = {"foo":"baz"}
var serverJSON = {"foo":"qux"}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
// should have zero propagations and one conflict
is(syncdata.propagations[0].length, 0,
"complete edit conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete edit conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"single edit field conflicting should have one conflict");
is(syncdata.conflicts[1].length, 1,
"single edit field conflicting should have one conflict");
// conflicting creates
var snap = {"foo":"bar"}
var clientJSON = {"foo":"bar","baz":"qux"}
var serverJSON = {"foo":"bar","baz":"quux"}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
// should have zero propagations and one conflict
is(syncdata.propagations[0].length, 0,
"complete create conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete create conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"single create field conflicting should have one conflict");
is(syncdata.conflicts[1].length, 1,
"single create field conflicting should have one conflict");
// edit that conflicts with a remove of its parent
var snap = {"foo":{"bar":"baz"}, "xuq":"xuuq"}
var clientJSON = {"xuq":"xuuq"}
var serverJSON = {"foo":{"bar":"qux"}, "xuq":"xuuq"}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
is(syncdata.propagations[0].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"the client gets one conflict: the edit to /foo/bar");
is(syncdata.conflicts[1].length, 2,
"the server gets two conflicts: both of the removals");
// edit that conflicts with an empty object
var snap = {"foo":{"bar":"baz"}}
var clientJSON = {}
var serverJSON = {"foo":{"bar":"qux"}}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
is(syncdata.propagations[0].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"the client gets one conflict: the edit to /foo/bar");
is(syncdata.conflicts[1].length, 2,
"the server gets two conflicts: both of the removals");
// hierarchical create conflict
var snap = {"foo":"bar"}
var clientJSON = {"foo":"bar", "baz":{"qux":"quux"}}
var serverJSON = {"foo":"bar", "baz":"b"}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
is(syncdata.propagations[0].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"the client gets one conflict: creation of /baz");
is(syncdata.conflicts[1].length, 2,
"the server gets two conflicts: both of the client creates");
// edited to primitive
var snap = {"foo":"bar", "baz":{}}
var clientJSON = {"foo":"bar", "baz":{"qux":"quux"}}
var serverJSON = {"foo":"bar", "baz":"b"}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
is(syncdata.propagations[0].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.propagations[1].length, 0,
"complete remove conflict should have no propagations");
is(syncdata.conflicts[0].length, 1,
"the client gets one conflict: edit of /baz");
is(syncdata.conflicts[1].length, 1,
"the server gets one conflict: the creation of /baz/qux");
}
function test_arrayMerging() {
var snap =
{
"foo":
[
{"a": "1"},
{"b": "2"},
{"c": "3"}
]
}
var clientJSON =
{
"foo":
[
{"a": "1"},
{"b": "2"},
{"b2": "2b"},
{"c": "3"}
]
}
var serverJSON =
{
"foo":
[
{"a": "1"},
{"a1": "1a"},
{"b": "2"},
{"c": "3"}
]
}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
// The result we end up with here is a little counter-intuitive.
// The object with keys "b" and "b2" both end up being creates
// underneath /foo/2. This illustrates the need to let clients
// define properties that serve as identifiers.
is(syncdata.propagations[0].length, 3, "move the indexes up");
is(syncdata.propagations[1].length, 1, "apply a create inside /foo/2");
is(syncdata.conflicts[0].length, 0, "no conflicts in array merging");
is(syncdata.conflicts[0].length, 0, "no conflicts in array merging");
applyCommands(clientJSON, syncdata.propagations[0]);
applyCommands(serverJSON, syncdata.propagations[1]);
checkSync(clientJSON, serverJSON, "/foo/0/a", "1");
checkSync(clientJSON, serverJSON, "/foo/1/a1", "1a");
checkSync(clientJSON, serverJSON, "/foo/2/b", "2");
checkSync(clientJSON, serverJSON, "/foo/2/b2", "2b");
checkSync(clientJSON, serverJSON, "/foo/3/c", "3");
}
function test_arrayMergingWithIDs() {
var snap =
{
"foo":
[
{"a": "1"},
{"b": "2"},
{"c": "3"}
]
}
var clientJSON =
{
"foo":
[
{"a": "1"},
{"b": "2"},
{"b2": "2b"},
{"c": "3"}
]
}
var serverJSON =
{
"foo":
[
{"a": "1"},
{"a1": "1a"},
{"b": "2"},
{"c": "3"}
]
}
var syncdata = reconcile([detectUpdates(snap, clientJSON),
detectUpdates(snap, serverJSON)]);
output(syncdata.propagations[0].length + " " + syncdata.propagations[1].length)
output(syncdata.conflicts[0].length + " " + syncdata.conflicts[1].length)
forEach(syncdata.propagations[0], function(x) { output(x.action + " " + x.path + " = " + x.value)})
output("-----------")
forEach(syncdata.propagations[1], function(x) { output(x.action + " " + x.path + " = " + x.value)})
}
function test_complexConflictsMixedWithPropagations() {
var snap =
{
"foo":
{
"bar": "baz",
"bar2": "baz2"
},
"foo2":
{
"hmm1":
{
"hmm": "yeah",
"hm1": "hmmm",
"hm2": "hmmmmmm"
},
"foo3":
{
"bar3": ["hmm", "yeah", "ok"],
"baz3":
[
{"a": "1"},
{"b": "2"},
{"c": "3"}
]
}
}
}
var clientJSON =
{
"foo":
{
"bar": "baz1", /* conflict */
"bar2": "baz2",
"fff": "ggg" /* no conflict create */
},
"foo2":
{
"hmm1":
{
"hmm": "yeah",
"hm1": "hmmm",
"hm2": "hmmmmmm"
},
"foo3":
{
"bar3": ["hmm", "yeah", "ok"],
"baz3":
[
{"a": "1"},
{"a2": "12"},
{"b": "2"},
{"c": "3"}
]
}
}
}
var serverJSON =
{
"foo":
{
"bar": "asdfasdf", /* conflict */
"bar2": "baz2",
"fff": "ggg", /* no conflict create */
"fff2": "ggg2" /* no conflict create */
},
"foo2":
{
"hmm1":
{
"hmm": "yeah",
"hm1": "hmmm",
"hm2": "hmmmmmm"
},
"foo3":
{
"bar3": ["hmm", "yeah", "ok"],
"baz3":
[
{"a": "1"},
{"b": "2"},
{"b2": "2b"},
{"c": "3"}
]
}
}
}
}
function runTests() {
output("\n\nTests Starting");
output("--------------");
test_isObjectOrArray();
test_identifySuspects();
test_created();
test_removed();
test_edited();
test_detectUpdates();
test_orderUpdates();
test_Command();
test_commandInList();
test_doesConflict();
test_applyCommand();
test_reconcileWithNoConflicts();
test_complexReconcileWithNoConflicts();
test_repeatedSyncsWithNoConflicts();
test_conflictsFromReplica();
test_basicConflicts();
test_arrayMerging();
//test_arrayMergingWithIDs();
complete();
}
if (this["document"]) {
window.onload = runTests;
} else {
runTests();
}
| krishnabangalore/Webinos-Platform | webinos/core/manager/synchronisation_manager/lib/json-sync-master/test.js | JavaScript | apache-2.0 | 24,453 |
//where
/** Generate code to create an array with given element type and number
* of dimensions.
*/
Item makeNewArray(DiagnosticPosition pos, Type type, int ndims) {
try {//我加上的
DEBUG.P(this,"makeNewArray(3)");
DEBUG.P("type="+type);
DEBUG.P("ndims="+ndims);
Type elemtype = types.elemtype(type);
if (types.dimensions(elemtype) + ndims > ClassFile.MAX_DIMENSIONS) {
log.error(pos, "limit.dimensions");
nerrs++;
}
int elemcode = Code.arraycode(elemtype);
DEBUG.P("elemcode="+elemcode);
if (elemcode == 0 || (elemcode == 1 && ndims == 1)) {
code.emitAnewarray(makeRef(pos, elemtype), type);
} else if (elemcode == 1) {
code.emitMultianewarray(ndims, makeRef(pos, type), type);
} else {
code.emitNewarray(elemcode, type);
}
return items.makeStackItem(type);
}finally{//我加上的
DEBUG.P(0,this,"makeNewArray(3)");
}
} | mashuai/Open-Source-Research | Javac2007/流程/jvm/16Gen/makeNewArray.java | Java | apache-2.0 | 914 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.util;
import org.apache.calcite.avatica.util.DateTimeUtils;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import java.util.Calendar;
import java.util.regex.Pattern;
import javax.annotation.Nonnull;
/**
* Time literal.
*
* <p>Immutable, internally represented as a string (in ISO format),
* and can support unlimited precision (milliseconds, nanoseconds).
*/
public class TimeString implements Comparable<TimeString> {
private static final Pattern PATTERN =
Pattern.compile("[0-9][0-9]:[0-9][0-9]:[0-9][0-9](\\.[0-9]*[1-9])?");
final String v;
/** Internal constructor, no validation. */
private TimeString(String v, @SuppressWarnings("unused") boolean ignore) {
this.v = v;
}
/** Creates a TimeString. */
public TimeString(String v) {
this(v, false);
Preconditions.checkArgument(PATTERN.matcher(v).matches(),
"Invalid time format:", v);
Preconditions.checkArgument(getHour() >= 0 && getHour() < 24,
"Hour out of range:", getHour());
Preconditions.checkArgument(getMinute() >= 0 && getMinute() < 60,
"Minute out of range:", getMinute());
Preconditions.checkArgument(getSecond() >= 0 && getSecond() < 60,
"Second out of range:", getSecond());
}
/** Creates a TimeString for hour, minute, second and millisecond values. */
public TimeString(int h, int m, int s) {
this(hms(h, m, s), false);
}
/** Validates an hour-minute-second value and converts to a string. */
private static String hms(int h, int m, int s) {
Preconditions.checkArgument(h >= 0 && h < 24, "Hour out of range:", h);
Preconditions.checkArgument(m >= 0 && m < 60, "Minute out of range:", m);
Preconditions.checkArgument(s >= 0 && s < 60, "Second out of range:", s);
final StringBuilder b = new StringBuilder();
DateTimeStringUtils.hms(b, h, m, s);
return b.toString();
}
/** Sets the fraction field of a {@code TimeString} to a given number
* of milliseconds. Nukes the value set via {@link #withNanos}.
*
* <p>For example,
* {@code new TimeString(1970, 1, 1, 2, 3, 4).withMillis(56)}
* yields {@code TIME '1970-01-01 02:03:04.056'}. */
public TimeString withMillis(int millis) {
Preconditions.checkArgument(millis >= 0 && millis < 1000);
return withFraction(DateTimeStringUtils.pad(3, millis));
}
/** Sets the fraction field of a {@code TimeString} to a given number
* of nanoseconds. Nukes the value set via {@link #withMillis(int)}.
*
* <p>For example,
* {@code new TimeString(1970, 1, 1, 2, 3, 4).withNanos(56789)}
* yields {@code TIME '1970-01-01 02:03:04.000056789'}. */
public TimeString withNanos(int nanos) {
Preconditions.checkArgument(nanos >= 0 && nanos < 1000000000);
return withFraction(DateTimeStringUtils.pad(9, nanos));
}
/** Sets the fraction field of a {@code TimeString}.
* The precision is determined by the number of leading zeros.
* Trailing zeros are stripped.
*
* <p>For example,
* {@code new TimeString(1970, 1, 1, 2, 3, 4).withFraction("00506000")}
* yields {@code TIME '1970-01-01 02:03:04.00506'}. */
public TimeString withFraction(String fraction) {
String v = this.v;
int i = v.indexOf('.');
if (i >= 0) {
v = v.substring(0, i);
}
while (fraction.endsWith("0")) {
fraction = fraction.substring(0, fraction.length() - 1);
}
if (fraction.length() > 0) {
v = v + "." + fraction;
}
return new TimeString(v);
}
@Override public String toString() {
return v;
}
@Override public boolean equals(Object o) {
// The value is in canonical form (no trailing zeros).
return o == this
|| o instanceof TimeString
&& ((TimeString) o).v.equals(v);
}
@Override public int hashCode() {
return v.hashCode();
}
@Override public int compareTo(@Nonnull TimeString o) {
return v.compareTo(o.v);
}
/** Creates a TimeString from a Calendar. */
public static TimeString fromCalendarFields(Calendar calendar) {
return new TimeString(
calendar.get(Calendar.HOUR_OF_DAY),
calendar.get(Calendar.MINUTE),
calendar.get(Calendar.SECOND))
.withMillis(calendar.get(Calendar.MILLISECOND));
}
public static TimeString fromMillisOfDay(int i) {
return new TimeString(DateTimeUtils.unixTimeToString(i))
.withMillis((int) DateTimeUtils.floorMod(i, 1000));
}
public TimeString round(int precision) {
Preconditions.checkArgument(precision >= 0);
int targetLength = 9 + precision;
if (v.length() <= targetLength) {
return this;
}
String v = this.v.substring(0, targetLength);
while (v.length() >= 9 && (v.endsWith("0") || v.endsWith("."))) {
v = v.substring(0, v.length() - 1);
}
return new TimeString(v);
}
public int getMillisOfDay() {
int h = Integer.valueOf(v.substring(0, 2));
int m = Integer.valueOf(v.substring(3, 5));
int s = Integer.valueOf(v.substring(6, 8));
int ms = getMillisInSecond();
return (int) (h * DateTimeUtils.MILLIS_PER_HOUR
+ m * DateTimeUtils.MILLIS_PER_MINUTE
+ s * DateTimeUtils.MILLIS_PER_SECOND
+ ms);
}
private int getMillisInSecond() {
switch (v.length()) {
case 8: // "12:34:56"
return 0;
case 10: // "12:34:56.7"
return Integer.valueOf(v.substring(9)) * 100;
case 11: // "12:34:56.78"
return Integer.valueOf(v.substring(9)) * 10;
case 12: // "12:34:56.789"
default: // "12:34:56.7890000012345"
return Integer.valueOf(v.substring(9, 12));
}
}
private int getHour() {
return Integer.parseInt(v.substring(0, 2));
}
private int getMinute() {
return Integer.parseInt(this.v.substring(3, 5));
}
private int getSecond() {
return Integer.parseInt(this.v.substring(6, 8));
}
public Calendar toCalendar() {
return Util.calendar(getMillisOfDay());
}
/** Converts this TimestampString to a string, truncated or padded with
* zeroes to a given precision. */
public String toString(int precision) {
Preconditions.checkArgument(precision >= 0);
final int p = precision();
if (precision < p) {
return round(precision).toString(precision);
}
if (precision > p) {
String s = v;
if (p == 0) {
s += ".";
}
return s + Strings.repeat("0", precision - p);
}
return v;
}
private int precision() {
return v.length() < 9 ? 0 : (v.length() - 9);
}
}
// End TimeString.java
| xhoong/incubator-calcite | core/src/main/java/org/apache/calcite/util/TimeString.java | Java | apache-2.0 | 7,356 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sling.models.it;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import javax.jcr.Node;
import javax.jcr.Session;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.api.resource.ResourceResolverFactory;
import org.apache.sling.junit.annotations.SlingAnnotationsTestRunner;
import org.apache.sling.junit.annotations.TestReference;
import org.apache.sling.models.factory.ModelClassException;
import org.apache.sling.models.factory.ModelFactory;
import org.apache.sling.models.it.models.ConstructorInjectionTestModel;
import org.apache.sling.models.it.models.FieldInjectionTestModel;
import org.apache.sling.models.it.models.InterfaceInjectionTestModel;
import org.apache.sling.models.it.models.implextend.InvalidImplementsInterfacePropertyModel;
import org.apache.sling.models.it.models.implextend.SampleServiceInterface;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@RunWith(SlingAnnotationsTestRunner.class)
public class ModelFactorySimpleTest {
@TestReference
private ResourceResolverFactory rrFactory;
@TestReference
private ModelFactory modelFactory;
private String value;
private ResourceResolver resolver;
private Resource resource;
private Node createdNode;
@Before
public void setUp() throws Exception {
value = RandomStringUtils.randomAlphanumeric(10);
resolver = rrFactory.getAdministrativeResourceResolver(null);
Session session = resolver.adaptTo(Session.class);
Node rootNode = session.getRootNode();
createdNode = rootNode.addNode("test_" + RandomStringUtils.randomAlphanumeric(10));
createdNode.setProperty("testProperty", value);
session.save();
resource = resolver.getResource(createdNode.getPath());
}
@After
public void tearDown() throws Exception {
if (createdNode != null) {
createdNode.remove();
}
if (resolver != null) {
resolver.close();
}
}
@Test
public void testCreateModel() {
FieldInjectionTestModel model = modelFactory.createModel(resource, FieldInjectionTestModel.class);
assertNotNull("Model is null", model);
assertEquals("Test Property is not set correctly", value, model.getTestProperty());
assertNotNull("Filters is null", model.getFilters());
assertSame("Adaptable is not injected", resource, model.getResource());
}
private static final class DummyClass {
}
@Test
public void testIsModelClass() {
assertTrue("Model is not detected as such", modelFactory.isModelClass(ConstructorInjectionTestModel.class));
assertFalse("Dummy class incorrectly detected as model class", modelFactory.isModelClass(DummyClass.class));
assertFalse("Model with invalid adaptable incorrectly detected as model class" , modelFactory.isModelClass(InvalidImplementsInterfacePropertyModel.class));
assertTrue("Model is not detected as such", modelFactory.isModelClass(SampleServiceInterface.class)); // being provided by two adapters
}
@Test
public void testCanCreateFromAdaptable() {
assertTrue("Model is not detected as such", modelFactory.canCreateFromAdaptable(resource, ConstructorInjectionTestModel.class));
assertTrue("Model is not detected as such", modelFactory.canCreateFromAdaptable(resource, SampleServiceInterface.class));
assertFalse("Model is not detected as such", modelFactory.canCreateFromAdaptable(new String(), ConstructorInjectionTestModel.class)); // invalid adaptable
}
@Test(expected=ModelClassException.class)
public void testCanCreateFromAdaptableWithModelExceptin() {
modelFactory.canCreateFromAdaptable(resource, DummyClass.class); // no model class
}
}
| nleite/sling | bundles/extensions/models/integration-tests/src/main/java/org/apache/sling/models/it/ModelFactorySimpleTest.java | Java | apache-2.0 | 4,933 |
import * as _ from 'lodash';
interface IApplicationsService {
getApplications(context: any): Promise<any>;
}
/** Backend service communications. */
export class ApplicationsService implements IApplicationsService {
public static $inject = ['$filter', '$q', 'DataService'];
private $filter: any;
private $q: any;
private DataService: any;
constructor ($filter: any, $q: any, DataService: any) {
this.$filter = $filter;
this.$q = $q;
this.DataService = DataService;
}
public getApplications(context: any): Promise<any> {
var deferred: any = this.$q.defer();
var promises: any = [];
// Load all the "application" types
promises.push(this.DataService.list('deploymentconfigs', context));
promises.push(this.DataService.list('replicationcontrollers', context));
promises.push(this.DataService.list({group: 'apps', resource: 'deployments'}, context));
promises.push(this.DataService.list({group: 'extensions', resource: 'replicasets'}, context));
promises.push(this.DataService.list({group: 'apps', resource: 'statefulsets'}, context));
this.$q.all(promises).then(_.spread((deploymentConfigData: any, replicationControllerData: any, deploymentData: any, replicaSetData: any, statefulSetData: any) => {
var deploymentConfigs: any = _.toArray(deploymentConfigData.by('metadata.name'));
var replicationControllers: any = _.reject(replicationControllerData.by('metadata.name'), this.$filter('hasDeploymentConfig'));
var deployments: any = _.toArray(deploymentData.by('metadata.name'));
var replicaSets: any = _.reject(replicaSetData.by('metadata.name'), this.$filter('hasDeployment'));
var statefulSets: any = _.toArray(statefulSetData.by('metadata.name'));
var apiObjects: any = deploymentConfigs.concat(deployments)
.concat(replicationControllers)
.concat(replicaSets)
.concat(statefulSets);
deferred.resolve(_.sortBy(apiObjects, ['metadata.name', 'kind']));
}), function(e: any) {
deferred.reject(e);
});
return deferred.promise;
}
}
| spadgett/origin-web-catalog | app/mockServices/mockApplications.service.ts | TypeScript | apache-2.0 | 2,086 |
Imports System.Collections.Immutable
Imports System.Reflection
Imports System.Runtime.InteropServices
Imports Microsoft.CodeAnalysis.Collections
Imports Microsoft.CodeAnalysis.VisualBasic.Symbols
Imports Roslyn.Utilities
Namespace Microsoft.CodeAnalysis.VisualBasic.ExpressionEvaluator
Friend Delegate Function GenerateMethodBody(method As EEMethodSymbol, diagnostics As DiagnosticBag) As BoundStatement
Friend NotInheritable Class EEMethodSymbol
Inherits MethodSymbol
Friend ReadOnly TypeMap As TypeSubstitution
Friend ReadOnly SubstitutedSourceMethod As MethodSymbol
Friend ReadOnly Locals As ImmutableArray(Of LocalSymbol)
Friend ReadOnly LocalsForBinding As ImmutableArray(Of LocalSymbol)
Private ReadOnly _compilation As VisualBasicCompilation
Private ReadOnly _container As EENamedTypeSymbol
Private ReadOnly _name As String
Private ReadOnly _locations As ImmutableArray(Of Location)
Private ReadOnly _typeParameters As ImmutableArray(Of TypeParameterSymbol)
Private ReadOnly _parameters As ImmutableArray(Of ParameterSymbol)
Private ReadOnly _meParameter As ParameterSymbol
Private ReadOnly _displayClassVariables As ImmutableDictionary(Of String, DisplayClassVariable)
Private ReadOnly _voidType As NamedTypeSymbol
''' <summary>
''' Invoked at most once to generate the method body.
''' (If the compilation has no errors, it will be invoked
''' exactly once, otherwise it may be skipped.)
''' </summary>
Private ReadOnly _generateMethodBody As GenerateMethodBody
Private _lazyReturnType As TypeSymbol
' NOTE: This is only used for asserts, so it could be conditional on DEBUG.
Private ReadOnly _allTypeParameters As ImmutableArray(Of TypeParameterSymbol)
Friend Sub New(
compilation As VisualBasicCompilation,
container As EENamedTypeSymbol,
name As String,
location As Location,
sourceMethod As MethodSymbol,
sourceLocals As ImmutableArray(Of LocalSymbol),
sourceLocalsForBinding As ImmutableArray(Of LocalSymbol),
sourceDisplayClassVariables As ImmutableDictionary(Of String, DisplayClassVariable),
voidType As NamedTypeSymbol,
generateMethodBody As GenerateMethodBody)
Debug.Assert(sourceMethod.IsDefinition)
Debug.Assert(sourceMethod.ContainingSymbol = container.SubstitutedSourceType.OriginalDefinition)
Debug.Assert(sourceLocals.All(Function(l) l.ContainingSymbol = sourceMethod))
_compilation = compilation
_container = container
_name = name
_locations = ImmutableArray.Create(location)
_voidType = voidType
' What we want is to map all original type parameters to the corresponding new type parameters
' (since the old ones have the wrong owners). Unfortunately, we have a circular dependency:
' 1) Each new type parameter requires the entire map in order to be able to construct its constraint list.
' 2) The map cannot be constructed until all new type parameters exist.
' Our solution is to pass each new type parameter a lazy reference to the type map. We then
' initialize the map as soon as the new type parameters are available - and before they are
' handed out - so that there is never a period where they can require the type map and find
' it uninitialized.
Dim sourceMethodTypeParameters = sourceMethod.TypeParameters
Dim allSourceTypeParameters = container.SourceTypeParameters.Concat(sourceMethodTypeParameters)
Dim getTypeMap As New Func(Of TypeSubstitution)(Function() TypeMap)
_typeParameters = sourceMethodTypeParameters.SelectAsArray(
Function(tp As TypeParameterSymbol, i As Integer, arg As Object) DirectCast(New EETypeParameterSymbol(Me, tp, i, getTypeMap), TypeParameterSymbol),
DirectCast(Nothing, Object))
_allTypeParameters = container.TypeParameters.Concat(_typeParameters)
Me.TypeMap = TypeSubstitution.Create(sourceMethod, allSourceTypeParameters, ImmutableArrayExtensions.Cast(Of TypeParameterSymbol, TypeSymbol)(_allTypeParameters))
EENamedTypeSymbol.VerifyTypeParameters(Me, _typeParameters)
Dim substitutedSourceType = container.SubstitutedSourceType
Me.SubstitutedSourceMethod = sourceMethod.AsMember(substitutedSourceType)
If _typeParameters.Any() Then
Me.SubstitutedSourceMethod = Me.SubstitutedSourceMethod.Construct(_typeParameters.As(Of TypeSymbol)())
End If
TypeParameterChecker.Check(Me.SubstitutedSourceMethod, _allTypeParameters)
' Create a map from original parameter to target parameter.
Dim parameterBuilder = ArrayBuilder(Of ParameterSymbol).GetInstance()
Dim substitutedSourceMeParameter = Me.SubstitutedSourceMethod.MeParameter
Dim subsitutedSourceHasMeParameter = substitutedSourceMeParameter IsNot Nothing
If subsitutedSourceHasMeParameter Then
_meParameter = MakeParameterSymbol(0, GeneratedNames.MakeStateMachineCapturedMeName(), substitutedSourceMeParameter) ' NOTE: Name doesn't actually matter.
Debug.Assert(_meParameter.Type = Me.SubstitutedSourceMethod.ContainingType)
parameterBuilder.Add(_meParameter)
End If
Dim ordinalOffset = If(subsitutedSourceHasMeParameter, 1, 0)
For Each substitutedSourceParameter In Me.SubstitutedSourceMethod.Parameters
Dim ordinal = substitutedSourceParameter.Ordinal + ordinalOffset
Debug.Assert(ordinal = parameterBuilder.Count)
Dim parameter = MakeParameterSymbol(ordinal, substitutedSourceParameter.Name, substitutedSourceParameter)
parameterBuilder.Add(parameter)
Next
_parameters = parameterBuilder.ToImmutableAndFree()
Dim localsBuilder = ArrayBuilder(Of LocalSymbol).GetInstance()
Dim localsMap = PooledDictionary(Of LocalSymbol, LocalSymbol).GetInstance()
For Each sourceLocal In sourceLocals
Dim local = sourceLocal.ToOtherMethod(Me, Me.TypeMap)
localsMap.Add(sourceLocal, local)
localsBuilder.Add(local)
Next
Me.Locals = localsBuilder.ToImmutableAndFree()
localsBuilder = ArrayBuilder(Of LocalSymbol).GetInstance()
For Each sourceLocal In sourceLocalsForBinding
Dim local As LocalSymbol = Nothing
If Not localsMap.TryGetValue(sourceLocal, local) Then
local = sourceLocal.ToOtherMethod(Me, Me.TypeMap)
localsMap.Add(sourceLocal, local)
End If
localsBuilder.Add(local)
Next
Me.LocalsForBinding = localsBuilder.ToImmutableAndFree()
' Create a map from variable name to display class field.
Dim displayClassVariables = PooledDictionary(Of String, DisplayClassVariable).GetInstance()
For Each pair In sourceDisplayClassVariables
Dim variable = pair.Value
Dim displayClassInstanceFromLocal = TryCast(variable.DisplayClassInstance, DisplayClassInstanceFromLocal)
Dim displayClassInstance = If(displayClassInstanceFromLocal Is Nothing,
DirectCast(New DisplayClassInstanceFromMe(Me.Parameters(0)), DisplayClassInstance),
New DisplayClassInstanceFromLocal(DirectCast(localsMap(displayClassInstanceFromLocal.Local), EELocalSymbol)))
variable = variable.SubstituteFields(displayClassInstance, Me.TypeMap)
displayClassVariables.Add(pair.Key, variable)
Next
_displayClassVariables = displayClassVariables.ToImmutableDictionary()
displayClassVariables.Free()
localsMap.Free()
_generateMethodBody = generateMethodBody
End Sub
Private Function MakeParameterSymbol(ordinal As Integer, name As String, sourceParameter As ParameterSymbol) As ParameterSymbol
Return New SynthesizedParameterSymbolWithCustomModifiers(
Me,
sourceParameter.Type,
ordinal,
sourceParameter.IsByRef,
name,
sourceParameter.CustomModifiers,
sourceParameter.HasByRefBeforeCustomModifiers)
End Function
Public Overrides ReadOnly Property MethodKind As MethodKind
Get
Return MethodKind.Ordinary
End Get
End Property
Public Overrides ReadOnly Property Name As String
Get
Return _name
End Get
End Property
Public Overrides ReadOnly Property Arity As Integer
Get
Return _typeParameters.Length
End Get
End Property
Public Overrides ReadOnly Property IsExtensionMethod As Boolean
Get
Return False
End Get
End Property
Friend Overrides ReadOnly Property HasSpecialName As Boolean
Get
Return True
End Get
End Property
Friend Overrides ReadOnly Property ImplementationAttributes As MethodImplAttributes
Get
Return Nothing
End Get
End Property
Friend Overrides ReadOnly Property HasDeclarativeSecurity As Boolean
Get
Return False
End Get
End Property
Public Overrides Function GetDllImportData() As DllImportData
Return Nothing
End Function
Friend Overrides Function GetSecurityInformation() As IEnumerable(Of Microsoft.Cci.SecurityAttribute)
Throw ExceptionUtilities.Unreachable
End Function
Friend Overrides ReadOnly Property ReturnTypeMarshallingInformation As MarshalPseudoCustomAttributeData
Get
Return Nothing
End Get
End Property
Friend Overrides Function TryGetMeParameter(<Out> ByRef meParameter As ParameterSymbol) As Boolean
meParameter = Nothing
Return True
End Function
Public Overrides ReadOnly Property IsVararg As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsSub As Boolean
Get
Return ReturnType.SpecialType = SpecialType.System_Void
End Get
End Property
Public Overrides ReadOnly Property IsAsync As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property ReturnType As TypeSymbol
Get
If _lazyReturnType Is Nothing Then
Throw New InvalidOperationException()
End If
Return _lazyReturnType
End Get
End Property
Public Overrides ReadOnly Property TypeArguments As ImmutableArray(Of TypeSymbol)
Get
Return ImmutableArrayExtensions.Cast(Of TypeParameterSymbol, TypeSymbol)(_typeParameters)
End Get
End Property
Public Overrides ReadOnly Property TypeParameters As ImmutableArray(Of TypeParameterSymbol)
Get
Return _typeParameters
End Get
End Property
Public Overrides ReadOnly Property Parameters As ImmutableArray(Of ParameterSymbol)
Get
Return _parameters
End Get
End Property
Public Overrides ReadOnly Property ExplicitInterfaceImplementations As ImmutableArray(Of MethodSymbol)
Get
Return ImmutableArray(Of MethodSymbol).Empty
End Get
End Property
Public Overrides ReadOnly Property ReturnTypeCustomModifiers As ImmutableArray(Of CustomModifier)
Get
Return ImmutableArray(Of CustomModifier).Empty
End Get
End Property
Public Overrides ReadOnly Property AssociatedSymbol As Symbol
Get
Return Nothing
End Get
End Property
Friend Overrides Function GetAppliedConditionalSymbols() As ImmutableArray(Of String)
Throw ExceptionUtilities.Unreachable
End Function
Friend Overrides ReadOnly Property CallingConvention As Cci.CallingConvention
Get
Debug.Assert(Me.IsShared)
Dim cc = Cci.CallingConvention.Default
If Me.IsVararg Then
cc = cc Or Cci.CallingConvention.ExtraArguments
End If
If Me.IsGenericMethod Then
cc = cc Or Cci.CallingConvention.Generic
End If
Return cc
End Get
End Property
Friend Overrides ReadOnly Property GenerateDebugInfoImpl As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property ContainingSymbol As Symbol
Get
Return _container
End Get
End Property
Public Overrides ReadOnly Property Locations As ImmutableArray(Of Location)
Get
Return _locations
End Get
End Property
Public Overrides ReadOnly Property DeclaringSyntaxReferences As ImmutableArray(Of SyntaxReference)
Get
Throw ExceptionUtilities.Unreachable
End Get
End Property
Public Overrides ReadOnly Property DeclaredAccessibility As Accessibility
Get
Return Accessibility.Internal
End Get
End Property
Public Overrides ReadOnly Property IsShared As Boolean
Get
Return True
End Get
End Property
Public Overrides ReadOnly Property IsOverridable As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsOverrides As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsMustOverride As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsNotOverridable As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsExternalMethod As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsIterator As Boolean
Get
Return False
End Get
End Property
Public Overrides ReadOnly Property IsOverloads As Boolean
Get
Return False
End Get
End Property
Friend Overrides ReadOnly Property ObsoleteAttributeData As ObsoleteAttributeData
Get
Throw ExceptionUtilities.Unreachable
End Get
End Property
Friend Overrides ReadOnly Property IsMethodKindBasedOnSyntax As Boolean
Get
Return False
End Get
End Property
Friend Overrides ReadOnly Property Syntax As VisualBasicSyntaxNode
Get
Return Nothing
End Get
End Property
#Disable Warning RS0010
''' <remarks>
''' The corresponding C# method,
''' <see cref="M:Microsoft.CodeAnalysis.CSharp.ExpressionEvaluator.EEMethodSymbol.GenerateMethodBody(Microsoft.CodeAnalysis.CSharp.TypeCompilationState,Microsoft.CodeAnalysis.DiagnosticBag)"/>,
''' invokes the <see cref="LocalRewriter"/> and the <see cref="LambdaRewriter"/> explicitly.
''' In VB, the caller (of this method) does that.
''' </remarks>
#Enable Warning RS0010
Friend Overrides Function GetBoundMethodBody(diagnostics As DiagnosticBag, <Out> ByRef Optional methodBodyBinder As Binder = Nothing) As BoundBlock
Dim body = _generateMethodBody(Me, diagnostics)
Debug.Assert(body IsNot Nothing)
_lazyReturnType = CalculateReturnType(body)
' Can't do this until the return type has been computed.
TypeParameterChecker.Check(Me, _allTypeParameters)
Dim syntax As VisualBasicSyntaxNode = body.Syntax
Dim statementsBuilder = ArrayBuilder(Of BoundStatement).GetInstance()
statementsBuilder.Add(body)
' Insert an implicit return statement if necessary.
If body.Kind <> BoundKind.ReturnStatement Then
statementsBuilder.Add(New BoundReturnStatement(syntax, Nothing, Nothing, Nothing))
End If
Dim originalLocalsBuilder = ArrayBuilder(Of LocalSymbol).GetInstance()
Dim originalLocalsSet = PooledHashSet(Of LocalSymbol).GetInstance()
For Each local In LocalsForBinding
Debug.Assert(Not originalLocalsSet.Contains(local))
originalLocalsBuilder.Add(local)
originalLocalsSet.Add(local)
Next
For Each local In Me.Locals
If Not originalLocalsSet.Contains(local) Then
originalLocalsBuilder.Add(local)
End If
Next
originalLocalsSet.Free()
Dim originalLocals = originalLocalsBuilder.ToImmutableAndFree()
Dim newBody = New BoundBlock(syntax, Nothing, originalLocals, statementsBuilder.ToImmutableAndFree())
If diagnostics.HasAnyErrors() Then
Return newBody
End If
DiagnosticsPass.IssueDiagnostics(newBody, diagnostics, Me)
If diagnostics.HasAnyErrors() Then
Return newBody
End If
' Check for use-site errors (e.g. missing types in the signature).
Dim useSiteInfo As DiagnosticInfo = Me.CalculateUseSiteErrorInfo()
If useSiteInfo IsNot Nothing Then
diagnostics.Add(useSiteInfo, _locations(0))
Return newBody
End If
Debug.Assert(Not newBody.HasErrors)
' NOTE: In C#, EE rewriting happens AFTER local rewriting. However, that order would be difficult
' to accommodate in VB, so we reverse it.
' Rewrite local declaration statement.
newBody = LocalDeclarationRewriter.Rewrite(_compilation, _container, newBody)
' Rewrite pseudo-variable references to helper method calls.
newBody = DirectCast(PlaceholderLocalRewriter.Rewrite(_compilation, _container, newBody), BoundBlock)
' Create a map from original local to target local.
Dim localMap = PooledDictionary(Of LocalSymbol, LocalSymbol).GetInstance()
Dim targetLocals = newBody.Locals
Debug.Assert(originalLocals.Length = targetLocals.Length)
For i = 0 To originalLocals.Length - 1
Dim originalLocal = originalLocals(i)
Dim targetLocal = targetLocals(i)
Debug.Assert(TypeOf originalLocal IsNot EELocalSymbol OrElse
DirectCast(originalLocal, EELocalSymbol).Ordinal = DirectCast(targetLocal, EELocalSymbol).Ordinal)
localMap.Add(originalLocal, targetLocal)
Next
' Variables may have been captured by lambdas in the original method
' or in the expression, and we need to preserve the existing values of
' those variables in the expression. This requires rewriting the variables
' in the expression based on the closure classes from both the original
' method and the expression, and generating a preamble that copies
' values into the expression closure classes.
'
' Consider the original method:
' Shared Sub M()
' Dim x, y, z as Integer
' ...
' F(Function() x + y)
' End Sub
' and the expression in the EE: "F(Function() x + z)".
'
' The expression is first rewritten using the closure class and local <1>
' from the original method: F(Function() <1>.x + z)
' Then lambda rewriting introduces a new closure class that includes
' the locals <1> and z, and a corresponding local <2>: F(Function() <2>.<1>.x + <2>.z)
' And a preamble is added to initialize the fields of <2>:
' <2> = New <>c__DisplayClass0()
' <2>.<1> = <1>
' <2>.z = z
' Create a map from variable name to display class field.
Dim displayClassVariables = PooledDictionary(Of String, DisplayClassVariable).GetInstance()
For Each pair In _displayClassVariables
Dim variable = pair.Value
Dim displayClassInstanceFromLocal = TryCast(variable.DisplayClassInstance, DisplayClassInstanceFromLocal)
Dim displayClassInstance = If(displayClassInstanceFromLocal Is Nothing,
DirectCast(New DisplayClassInstanceFromMe(Me.Parameters(0)), DisplayClassInstance),
New DisplayClassInstanceFromLocal(DirectCast(localMap(displayClassInstanceFromLocal.Local), EELocalSymbol)))
variable = New DisplayClassVariable(variable.Name, variable.Kind, displayClassInstance, variable.DisplayClassFields)
displayClassVariables.Add(pair.Key, variable)
Next
' Rewrite references to "Me" to refer to this method's "Me" parameter.
' Rewrite variables within body to reference existing display classes.
newBody = DirectCast(CapturedVariableRewriter.Rewrite(
If(Me.SubstitutedSourceMethod.IsShared, Nothing, Me.Parameters(0)),
displayClassVariables.ToImmutableDictionary(),
newBody,
diagnostics), BoundBlock)
displayClassVariables.Free()
If diagnostics.HasAnyErrors() Then
Return newBody
End If
' Insert locals from the original method, followed by any new locals.
Dim localBuilder = ArrayBuilder(Of LocalSymbol).GetInstance()
For Each originalLocal In Me.Locals
Dim targetLocal = localMap(originalLocal)
Debug.Assert(TypeOf targetLocal IsNot EELocalSymbol OrElse DirectCast(targetLocal, EELocalSymbol).Ordinal = localBuilder.Count)
localBuilder.Add(targetLocal)
Next
localMap.Free()
newBody = newBody.Update(newBody.StatementListSyntax, localBuilder.ToImmutableAndFree(), newBody.Statements)
TypeParameterChecker.Check(newBody, _allTypeParameters)
Return newBody
End Function
Private Function CalculateReturnType(body As BoundStatement) As TypeSymbol
Select Case body.Kind
Case BoundKind.ReturnStatement
Return DirectCast(body, BoundReturnStatement).ExpressionOpt.Type
Case BoundKind.ExpressionStatement,
BoundKind.RedimStatement
Return _voidType
Case Else
Throw ExceptionUtilities.UnexpectedValue(body.Kind)
End Select
End Function
Friend Overrides Function CalculateLocalSyntaxOffset(localPosition As Integer, localTree As SyntaxTree) As Integer
Throw ExceptionUtilities.Unreachable
End Function
End Class
End Namespace | DavidKarlas/roslyn | src/ExpressionEvaluator/VisualBasic/Source/ExpressionCompiler/Symbols/EEMethodSymbol.vb | Visual Basic | apache-2.0 | 24,290 |
-- Database: mssql
-- Change Parameter: column1Name=first_name
-- Change Parameter: column2Name=last_name
-- Change Parameter: finalColumnName=full_name
-- Change Parameter: finalColumnType=varchar(255)
-- Change Parameter: tableName=person
ALTER TABLE person ADD full_name varchar(255);
UPDATE person SET full_name = first_name + 'null' + last_name;
ALTER TABLE person DROP COLUMN first_name;
ALTER TABLE person DROP COLUMN last_name;
| mbreslow/liquibase | liquibase-core/src/test/java/liquibase/verify/saved_state/compareGeneratedSqlWithExpectedSqlForMinimalChangesets/mergeColumns/mssql.sql | SQL | apache-2.0 | 436 |
#
# Cookbook Name:: graphite
# Library:: ChefGraphite
#
# Copyright 2014, Heavy Water Ops, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module ChefGraphite
class << self
def ini_file(hash)
data = generate_conf_data(hash)
lines = Array.new
data.each do |section, config|
lines << "[#{section}]"
config.each { |key, value| lines << "#{key} = #{value}" }
lines << ""
end
lines.join("\n").concat("\n")
end
def generate_conf_data(data)
tuples = sort_tuples(section_tuples(data))
result = Hash.new
tuples.each { |tuple| result[tuple.first] = tuple.last }
result
end
def sort_tuples(tuples)
tuples.sort { |a, b| a.first <=> b.first }
end
def section_tuples(section_hashes)
section_hashes.map do |hash|
[
section_name(hash[:type], hash[:name]),
normalize(hash[:config])
]
end
end
def section_name(type, name)
if type.nil?
name
elsif name == "default"
type
else
"#{type}:#{name}"
end
end
def normalize(hash)
result = Hash.new
hash.each do |key, value|
result[key.to_s.upcase] = normalize_value(value)
end
result
end
def normalize_value(obj)
if obj.is_a? Array
obj.map { |o| normalize_value(o) }.join(", ")
else
value = obj.to_s
value.capitalize! if %w{true false}.include?(value)
value
end
end
end
end
| mbabic/graphite | libraries/chef_graphite.rb | Ruby | apache-2.0 | 2,028 |
<html><body>
<style>
body, h1, h2, h3, div, span, p, pre, a {
margin: 0;
padding: 0;
border: 0;
font-weight: inherit;
font-style: inherit;
font-size: 100%;
font-family: inherit;
vertical-align: baseline;
}
body {
font-size: 13px;
padding: 1em;
}
h1 {
font-size: 26px;
margin-bottom: 1em;
}
h2 {
font-size: 24px;
margin-bottom: 1em;
}
h3 {
font-size: 20px;
margin-bottom: 1em;
margin-top: 1em;
}
pre, code {
line-height: 1.5;
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}
pre {
margin-top: 0.5em;
}
h1, h2, h3, p {
font-family: Arial, sans serif;
}
h1, h2, h3 {
border-bottom: solid #CCC 1px;
}
.toc_element {
margin-top: 0.5em;
}
.firstline {
margin-left: 2 em;
}
.method {
margin-top: 1em;
border: solid 1px #CCC;
padding: 1em;
background: #EEE;
}
.details {
font-weight: bold;
font-size: 14px;
}
</style>
<h1><a href="videointelligence_v1.html">Cloud Video Intelligence API</a> . <a href="videointelligence_v1.operations.html">operations</a> . <a href="videointelligence_v1.operations.projects.html">projects</a> . <a href="videointelligence_v1.operations.projects.locations.html">locations</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="videointelligence_v1.operations.projects.locations.operations.html">operations()</a></code>
</p>
<p class="firstline">Returns the operations Resource.</p>
<p class="toc_element">
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="close">close()</code>
<pre>Close httplib2 connections.</pre>
</div>
</body></html> | googleapis/google-api-python-client | docs/dyn/videointelligence_v1.operations.projects.locations.html | HTML | apache-2.0 | 1,740 |
/**
* <copyright>
* Copyright (c) 2008 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
* </copyright>
*/
package org.eclipse.bpel.ui.adapters;
import java.util.List;
import org.eclipse.bpel.model.BPELPackage;
import org.eclipse.bpel.model.MessageExchanges;
import org.eclipse.bpel.ui.BPELUIPlugin;
import org.eclipse.bpel.ui.IBPELUIConstants;
import org.eclipse.bpel.ui.adapters.delegates.ReferenceContainer;
import org.eclipse.bpel.ui.editparts.MessageExchangesEditPart;
import org.eclipse.bpel.ui.editparts.OutlineTreeEditPart;
import org.eclipse.bpel.ui.properties.PropertiesLabelProvider;
import org.eclipse.gef.EditPart;
import org.eclipse.gef.EditPartFactory;
import org.eclipse.swt.graphics.Image;
import org.eclipse.bpel.ui.Messages;
/**
*
* @author Miriam Grundig (MGrundig@de.ibm.com)
*/
public class MessageExchangesAdapter extends ContainerAdapter implements EditPartFactory,
ILabeledElement, IOutlineEditPartFactory, ITrayEditPartFactory
{
public MessageExchangesAdapter() {
super();
}
/* IContainer delegate */
public IContainer createContainerDelegate() {
return new ReferenceContainer(BPELPackage.eINSTANCE.getMessageExchanges_Children());
}
/* EditPartFactory */
public EditPart createEditPart(EditPart context, Object model) {
MessageExchangesEditPart result = new MessageExchangesEditPart();
result.setLabelProvider(PropertiesLabelProvider.getInstance());
result.setModel(model);
return result;
}
/* ITrayEditPartFactory */
public EditPart createTrayEditPart(EditPart context, Object model) {
return createEditPart(context, model);
}
/* ILabeledElement */
public Image getSmallImage(Object object) {
return BPELUIPlugin.INSTANCE.getImage(IBPELUIConstants.ICON_MESSAGEEXCHANGE_16);
}
public Image getLargeImage(Object object) {
return BPELUIPlugin.INSTANCE.getImage(IBPELUIConstants.ICON_MESSAGEEXCHANGE_32);
}
public String getTypeLabel(Object object) {
return Messages.MessageExchangesAdapter_TypeLabel;
}
public String getLabel(Object object) {
return Messages.MessageExchangesAdapter_Label;
}
/* IOutlineEditPartFactory */
public EditPart createOutlineEditPart(EditPart context, final Object model) {
EditPart result = new OutlineTreeEditPart(){
protected List getModelChildren() {
MessageExchanges messageExchanges = (MessageExchanges) model;
List list = messageExchanges.getChildren();
return list;
}
};
result.setModel(model);
return result;
}
}
| chanakaudaya/developer-studio | bps/org.eclipse.bpel.ui/src/org/eclipse/bpel/ui/adapters/MessageExchangesAdapter.java | Java | apache-2.0 | 2,792 |
/**
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jasig.portal.layout.dlm;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jasig.portal.PortalException;
import org.jasig.portal.layout.IUserLayoutStore;
import org.jasig.portal.security.IPerson;
import org.jasig.portal.spring.locator.UserLayoutStoreLocator;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
/**
* Looks for, applies against the ilf, and updates accordingly the delete
* set within a plf.
*
* @version $Revision$ $Date$
* @since uPortal 2.5
*/
public class DeleteManager
{
private static final Log LOG = LogFactory.getLog(DeleteManager.class);
private static IUserLayoutStore dls = null;
/**
* Hands back the single instance of RDBMDistributedLayoutStore. There is
* already a method
* for aquiring a single instance of the configured layout store so we
* delegate over there so that all references refer to the same instance.
* This method is solely for convenience so that we don't have to keep
* calling UserLayoutStoreFactory and casting the resulting class.
*/
private static IUserLayoutStore getDLS()
{
if ( dls == null )
{
dls = UserLayoutStoreLocator.getUserLayoutStore();
}
return dls;
}
/**
Get the delete set if any from the plf and process each delete command
removing any that fail from the delete set so that the delete set is
self cleaning.
*/
static void applyAndUpdateDeleteSet( Document plf,
Document ilf,
IntegrationResult result )
{
Element dSet = null;
try
{
dSet = getDeleteSet( plf, null, false );
}
catch( Exception e )
{
LOG.error("Exception occurred while getting user's DLM delete-set.",
e);
}
if ( dSet == null )
return;
NodeList deletes = dSet.getChildNodes();
for( int i=deletes.getLength()-1; i>=0; i-- )
{
if ( applyDelete( (Element) deletes.item(i), ilf ) == false )
{
dSet.removeChild( deletes.item(i) );
result.setChangedPLF(true);
}
else
{
result.setChangedILF(true);
}
}
if ( dSet.getChildNodes().getLength() == 0 )
{
plf.getDocumentElement().removeChild( dSet );
result.setChangedPLF(true);
}
}
/**
Attempt to apply a single delete command and return true if it succeeds
or false otherwise. If the delete is disallowed or the target element
no longer exists in the document the delete command fails and returns
false.
*/
private static boolean applyDelete( Element delete, Document ilf )
{
String nodeID = delete.getAttribute( Constants.ATT_NAME );
Element e = ilf.getElementById( nodeID );
if ( e == null )
return false;
String deleteAllowed = e.getAttribute( Constants.ATT_DELETE_ALLOWED );
if ( deleteAllowed.equals( "false" ) )
return false;
Element p = (Element) e.getParentNode();
e.setIdAttribute(Constants.ATT_ID, false);
p.removeChild( e );
return true;
}
/**
Get the delete set if any stored in the root of the document or create
it is passed in create flag is true.
*/
private static Element getDeleteSet( Document plf,
IPerson person,
boolean create )
throws PortalException
{
Node root = plf.getDocumentElement();
Node child = root.getFirstChild();
while( child != null )
{
if ( child.getNodeName().equals( Constants.ELM_DELETE_SET ) )
return (Element) child;
child = child.getNextSibling();
}
if ( create == false )
return null;
String ID = null;
try
{
ID = getDLS().getNextStructDirectiveId( person );
}
catch (Exception e)
{
throw new PortalException( "Exception encountered while " +
"generating new delete set node " +
"Id for userId=" + person.getID(), e );
}
Element delSet = plf.createElement( Constants.ELM_DELETE_SET );
delSet.setAttribute( Constants.ATT_TYPE,
Constants.ELM_DELETE_SET );
delSet.setAttribute( Constants.ATT_ID, ID );
root.appendChild( delSet );
return delSet;
}
/**
Create and append a delete directive to delete the node identified by
the passed in element id. If this node contains any incorporated
elements then they must also have a delete directive added in here to
prevent incorporated channels originating in another column from
reappearing in that column because the position set entry that pulled
them into this column was now removed. (ie: the user moved an inc'd
channel to this column and then deleted the column means that the inc'd
channel should be deleted also.) This was designed to add a delete
directive for each nested element having an ID so as to work for the
future case of a tree view.
*/
public static void addDeleteDirective( Element compViewNode,
String elementID,
IPerson person )
throws PortalException
{
Document plf = (Document) person.getAttribute( Constants.PLF );
Element delSet = getDeleteSet( plf, person, true );
addDeleteDirective( compViewNode, elementID, person, plf, delSet );
}
/**
This method does the actual work of adding a delete directive and then
recursively calling itself for any incoporated children that need to be
deleted as well.
*/
private static void addDeleteDirective( Element compViewNode,
String elementID,
IPerson person,
Document plf,
Element delSet )
throws PortalException
{
String ID = null;
try
{
ID = getDLS().getNextStructDirectiveId( person );
}
catch (Exception e)
{
throw new PortalException( "Exception encountered while " +
"generating new delete node " +
"Id for userId=" + person.getID(), e );
}
Element delete = plf.createElement( Constants.ELM_DELETE );
delete.setAttribute( Constants.ATT_TYPE, Constants.ELM_DELETE );
delete.setAttribute( Constants.ATT_ID, ID );
delete.setAttributeNS( Constants.NS_URI,
Constants.ATT_NAME, elementID );
delSet.appendChild( delete );
// now pass through children and add delete directives for those with
// IDs indicating that they were incorporated
Element child = (Element) compViewNode.getFirstChild();
while( child != null )
{
String childID = child.getAttribute( "ID" );
if ( childID.startsWith( Constants.FRAGMENT_ID_USER_PREFIX ) )
addDeleteDirective( child, childID, person, plf, delSet );
child = (Element) child.getNextSibling();
}
}
}
| mgillian/uPortal | uportal-war/src/main/java/org/jasig/portal/layout/dlm/DeleteManager.java | Java | apache-2.0 | 8,567 |
package com.webtrends.harness.command.typed
import akka.actor.{Actor, ActorRef, Props}
import akka.pattern._
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
trait TypedCommandHelper { this: Actor =>
var typedCommandManager: Option[ActorRef] = None
implicit def ec: ExecutionContext = context.dispatcher
def registerTypedCommand[T<:TypedCommand[_,_]](name: String, actorClass: Class[T], checkHealth: Boolean = false): Future[ActorRef] = {
implicit val timeout = Timeout(2 seconds)
getManager().flatMap { cm =>
(cm ? RegisterCommand(name, Props(actorClass), checkHealth)).mapTo[ActorRef]
}
}
protected def getManager(): Future[ActorRef] = {
typedCommandManager match {
case Some(cm) => Future.successful(cm)
case None =>
context.system.actorSelection(HarnessConstants.TypedCommandFullName).resolveOne()(2 seconds).map { s =>
typedCommandManager = Some(s)
s
}
}
}
}
| Crashfreak/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/command/typed/TypedCommandHelper.scala | Scala | apache-2.0 | 1,064 |
/*
* Copyright (c) 2013-2016 Cinchapi Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cinchapi.concourse.server.model;
import java.nio.ByteBuffer;
import java.util.Objects;
import javax.annotation.Nullable;
import javax.annotation.concurrent.Immutable;
import com.cinchapi.concourse.server.io.Byteable;
import com.cinchapi.concourse.util.ByteBuffers;
import com.google.common.base.Preconditions;
/**
* A Position is an abstraction for the association between a
* relative location and a {@link PrimaryKey} that is used in a
* {@link SearchRecord} to specify the location of a term in a record.
*
* @author Jeff Nelson
*/
@Immutable
public final class Position implements Byteable, Comparable<Position> {
/**
* Return the Position encoded in {@code bytes} so long as those bytes
* adhere to the format specified by the {@link #getBytes()} method. This
* method assumes that all the bytes in the {@code bytes} belong to the
* Position. In general, it is necessary to get the appropriate Position
* slice from the parent ByteBuffer using
* {@link ByteBuffers#slice(ByteBuffer, int, int)}.
*
* @param bytes
* @return the Position
*/
public static Position fromByteBuffer(ByteBuffer bytes) {
PrimaryKey primaryKey = PrimaryKey.fromByteBuffer(ByteBuffers.get(
bytes, PrimaryKey.SIZE));
int index = bytes.getInt();
return new Position(primaryKey, index);
}
/**
* Return a Position that is backed by {@code primaryKey} and {@code index}.
*
* @param primaryKey
* @param index
* @return the Position
*/
public static Position wrap(PrimaryKey primaryKey, int index) {
return new Position(primaryKey, index);
}
/**
* The total number of bytes used to store each Position
*/
public static final int SIZE = PrimaryKey.SIZE + 4; // index
/**
* A cached copy of the binary representation that is returned from
* {@link #getBytes()}.
*/
private transient ByteBuffer bytes;
/**
* The index that this Position represents.
*/
private final int index;
/**
* The PrimaryKey of the record that this Position represents.
*/
private final PrimaryKey primaryKey;
/**
* Construct a new instance.
*
* @param primaryKey
* @param index
*/
private Position(PrimaryKey primaryKey, int index) {
this(primaryKey, index, null);
}
/**
* Construct a new instance.
*
* @param primaryKey
* @param index
* @param bytes;
*/
private Position(PrimaryKey primaryKey, int index,
@Nullable ByteBuffer bytes) {
Preconditions
.checkArgument(index >= 0, "Cannot have an negative index");
this.primaryKey = primaryKey;
this.index = index;
this.bytes = bytes;
}
@Override
public int compareTo(Position other) {
int comparison;
return (comparison = primaryKey.compareTo(other.primaryKey)) != 0 ? comparison
: Integer.compare(index, other.index);
}
@Override
public boolean equals(Object obj) {
if(obj instanceof Position) {
Position other = (Position) obj;
return primaryKey.equals(other.primaryKey) && index == other.index;
}
return false;
}
/**
* Return a byte buffer that represents this Value with the following order:
* <ol>
* <li><strong>primaryKey</strong> - position 0</li>
* <li><strong>index</strong> - position 8</li>
* </ol>
*
* @return the ByteBuffer representation
*/
@Override
public ByteBuffer getBytes() {
if(bytes == null) {
bytes = ByteBuffer.allocate(size());
copyTo(bytes);
bytes.rewind();
}
return ByteBuffers.asReadOnlyBuffer(bytes);
}
/**
* Return the associated {@code index}.
*
* @return the index
*/
public int getIndex() {
return index;
}
/**
* Return the associated {@code primaryKey}.
*
* @return the primaryKey
*/
public PrimaryKey getPrimaryKey() {
return primaryKey;
}
@Override
public int hashCode() {
return Objects.hash(primaryKey, index);
}
@Override
public int size() {
return SIZE;
}
@Override
public String toString() {
return "Position " + index + " in Record " + primaryKey;
}
@Override
public void copyTo(ByteBuffer buffer) {
// NOTE: Storing the index as an int instead of some size aware
// variable length is probably overkill since most indexes will be
// smaller than Byte.MAX_SIZE or Short.MAX_SIZE, but having variable
// size indexes means that the size of the entire Position (as an
// int) must be stored before the Position for proper
// deserialization. By storing the index as an int, the size of each
// Position is constant so we won't need to store the overall size
// prior to the Position to deserialize it, which is actually more
// space efficient.
primaryKey.copyTo(buffer);
buffer.putInt(index);
}
}
| remiemalik/concourse | concourse-server/src/main/java/com/cinchapi/concourse/server/model/Position.java | Java | apache-2.0 | 5,815 |
<?php
/**
* This example adds text ads to an ad group that uses upgraded URLs.
*
* To get ad groups, run GetAdGroups.php.
*
* Restriction: adwords-only
*
* Copyright 2014, Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @package GoogleApiAdsAdWords
* @subpackage v201506
* @category WebServices
* @copyright 2014, Google Inc. All Rights Reserved.
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache License,
* Version 2.0
*/
// Include the initialization file
require_once dirname(dirname(__FILE__)) . '/init.php';
require_once UTIL_PATH . '/MediaUtils.php';
// Enter parameters required by the code example.
$adGroupId = 'INSERT_AD_GROUP_ID_HERE';
/**
* Runs the example.
* @param AdWordsUser $user the user to run the example with
* @param string $adGroupId the id of the ad group to add the ads to
*/
function AddTextAdWithUpgradedUrlsExample(AdWordsUser $user, $adGroupId) {
// Get the service, which loads the required classes.
$adGroupAdService = $user->GetService('AdGroupAdService', ADWORDS_VERSION);
$numAds = 5;
$operations = array();
for ($i = 0; $i < $numAds; $i++) {
// Create text ad.
$textAd = new TextAd();
$textAd->headline = 'Cruise #' . uniqid();
$textAd->description1 = 'Visit the Red Planet in style.';
$textAd->description2 = 'Low-gravity fun for everyone!';
$textAd->displayUrl = 'www.example.com';
// Specify a tracking url for 3rd party tracking provider. You may
// specify one at customer, campaign, ad group, ad, criterion or
// feed item levels.
$textAd->trackingUrlTemplate =
'http://tracker.example.com/?season={_season}&promocode={_promocode}' .
'&u={lpurl}';
// Since your tracking url has two custom parameters, provide their
// values too. This can be provided at campaign, ad group, ad, criterion
// or feed item levels.
$seasonParameter = new CustomParameter();
$seasonParameter->key = 'season';
$seasonParameter->value = 'christmas';
$promoCodeParameter = new CustomParameter();
$promoCodeParameter->key = 'promocode';
$promoCodeParameter->value = 'NYC123';
$textAd->urlCustomParameters = new CustomParameters();
$textAd->urlCustomParameters->parameters = array($seasonParameter,
$promoCodeParameter);
// Specify a list of final urls. This field cannot be set if url field is
// set. This may be specified at ad, criterion and feed item levels.
$textAd->finalUrls = array('http://www.example.com/cruise/space/',
'http://www.example.com/locations/mars/');
// Specify a list of final mobile urls. This field cannot be set if url
// field is set, or finalUrls is unset. This may be specified at ad,
// criterion and feed item levels.
$textAd->finalMobileUrls = array('http://mobile.example.com/cruise/space/',
'http://mobile.example.com/locations/mars/');
// Create ad group ad.
$adGroupAd = new AdGroupAd();
$adGroupAd->adGroupId = $adGroupId;
$adGroupAd->ad = $textAd;
// Set additional settings (optional).
$adGroupAd->status = 'PAUSED';
// Create operation.
$operation = new AdGroupAdOperation();
$operation->operand = $adGroupAd;
$operation->operator = 'ADD';
$operations[] = $operation;
}
// Make the mutate request.
$result = $adGroupAdService->mutate($operations);
// Display results.
foreach ($result->value as $adGroupAd) {
$ad = $adGroupAd->ad;
printf("Text ad with headline '%s' and ID '%d' was added.\n",
$ad->headline, $ad->id);
printf(" displayUrl is '%s'\n",
$ad->displayUrl);
print("Upgraded URL properties:\n");
printf(" Final URLs: %s\n",
implode(', ', $ad->finalUrls));
printf(" Final Mobile URLs: %s\n",
implode(', ', $ad->finalMobileUrls));
printf(" Tracking URL template: %s\n",
$ad->trackingUrlTemplate);
printf(" Custom parameters: %s\n",
implode(', ',
array_map(function($param) {
return sprintf('%s=%s', $param->key, $param->value);
},
$ad->urlCustomParameters->parameters)));
}
}
// Don't run the example if the file is being included.
if (__FILE__ != realpath($_SERVER['PHP_SELF'])) {
return;
}
try {
// Get AdWordsUser from credentials in "../auth.ini"
// relative to the AdWordsUser.php file's directory.
$user = new AdWordsUser();
// Log every SOAP XML request and response.
$user->LogAll();
// Run the example.
AddTextAdWithUpgradedUrlsExample($user, $adGroupId);
} catch (Exception $e) {
printf("An error has occurred: %s\n", $e->getMessage());
}
| a1pro/adwordsAPI | examples/AdWords/v201506/AdvancedOperations/AddTextAdWithUpgradedUrls.php | PHP | apache-2.0 | 5,203 |
from collections import OrderedDict
from app.master.atom_grouper import AtomGrouper
class TimeBasedAtomGrouper(object):
"""
This class implements the algorithm to best split & group atoms based on historic time values. This algorithm is
somewhat complicated, so I'm going to give a summary here.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Let N be the number of concurrent executors allocated for this job.
Let T be the aggregate serial time to execute all atoms on a single executor.
Both N and T are known values at the beginning of this algorithm.
In the ideal subjob atom-grouping, we would have exactly N subjobs, each allocated with T/N amount of work that
would all end at the same time. However, in reality, there are a few factors that makes this solution unfeasible:
- There is a significant amount of variability in the times of running these atoms, so numbers are never exact.
- Certain builds will introduce new tests (for which we don't have historical time data for).
- Not all of the machines are exactly the same, so we can't expect identical performance.
We have two aims for this algorithm:
- Minimize the amount of framework overhead (time spent sending and retrieving subjobs) and maximize the amount of
time the slaves actually spend running the build.
- Don't overload any single executor with too much work--this will cause the whole build to wait on a single
executor. We want to try to get all of the executors to end as close to the same time as possible in order to
get rid of any inefficient use of slave machines.
In order to accomplish this, the algorithm implemented by this class tries to split up the majority of the atoms
into N buckets, and splits up the rest of the atoms into smaller buckets. Hopefully, the timeline graph of
executed subjobs for each of the executors would end up looking like this:
[========================================================================][===][==][==]
[===============================================================================][==]
[====================================================================][====][===][==][=]
[========================================================================][===][==][=]
[=====================================================================][====][==][==]
[==================================================================================][=]
[===================================================================][======][==][==]
The algorithm has two stages of subjob creation: the 'big chunk' stage and the 'small chunk' stage. The 'big chunk'
stage creates exactly N large subjob groupings that will consist of the majority of atoms (in terms of runtime).
The 'small chunk' stage creates ~2N short subjob groupings that will be used to fill in the gaps in order to aim for
having all of the executors end at similar times.
Notes:
- For new atoms that we don't have historic times for, we will assign it the highest atom time value in order to
avoid underestimating the length of unknown atoms.
- We will have to try tweaking the percentage of T that we want to be allocated for the initial large batch of
big subjobs. Same goes for the number and size of the smaller buckets.
"""
BIG_CHUNK_FRACTION = 0.8
def __init__(self, atoms, max_executors, atom_time_map, project_directory):
"""
:param atoms: the list of atoms for this build
:type atoms: list[app.master.atom.Atom]
:param max_executors: the maximum number of executors for this build
:type max_executors: int
:param atom_time_map: a dictionary containing the historic times for atoms for this particular job
:type atom_time_map: dict[str, float]
:type project_directory: str
"""
self._atoms = atoms
self._max_executors = max_executors
self._atom_time_map = atom_time_map
self._project_directory = project_directory
def groupings(self):
"""
Group the atoms into subjobs using historic timing data.
:return: a list of lists of atoms
:rtype: list[list[app.master.atom.Atom]]
"""
# 1). Coalesce the atoms with historic atom times, and also get total estimated runtime
try:
total_estimated_runtime = self._set_expected_atom_times(
self._atoms, self._atom_time_map, self._project_directory)
except _AtomTimingDataError:
grouper = AtomGrouper(self._atoms, self._max_executors)
return grouper.groupings()
# 2). Sort them by decreasing time, and add them to an OrderedDict
atoms_by_decreasing_time = sorted(self._atoms, key=lambda atom: atom.expected_time, reverse=True)
sorted_atom_times_left = OrderedDict([(atom, atom.expected_time) for atom in atoms_by_decreasing_time])
# 3). Group them!
# Calculate what the target 'big subjob' time is going to be for each executor's initial subjob
big_subjob_time = (total_estimated_runtime * self.BIG_CHUNK_FRACTION) / self._max_executors
# Calculate what the target 'small subjob' time is going to be
small_subjob_time = (total_estimated_runtime * (1.0 - self.BIG_CHUNK_FRACTION)) / (2 * self._max_executors)
# _group_atoms_into_sized_buckets() will remove elements from sorted_atom_times_left.
subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, big_subjob_time, self._max_executors)
small_subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, small_subjob_time, None)
subjobs.extend(small_subjobs)
return subjobs
def _set_expected_atom_times(self, new_atoms, old_atoms_with_times, project_directory):
"""
Set the expected runtime (new_atom.expected_time) of each atom in new_atoms using historic timing data.
Additionally, return the total estimated serial-runtime for this build. Although this seems like an odd thing
for this method to return, it is done here for efficiency. There can be thousands of atoms, and iterating
through them multiple times seems inefficient.
:param new_atoms: the list of atoms that will be run in this build
:type new_atoms: list[app.master.atom.Atom]
:param old_atoms_with_times: a dictionary containing the historic times for atoms for this particular job
:type old_atoms_with_times: dict[str, float]
:type project_directory: str
:return: the total estimated runtime in seconds
:rtype: float
"""
atoms_without_timing_data = []
total_time = 0
max_atom_time = 0
# Generate list for atoms that have timing data
for new_atom in new_atoms:
if new_atom.command_string not in old_atoms_with_times:
atoms_without_timing_data.append(new_atom)
continue
new_atom.expected_time = old_atoms_with_times[new_atom.command_string]
# Discover largest single atom time to use as conservative estimates for atoms with unknown times
if max_atom_time < new_atom.expected_time:
max_atom_time = new_atom.expected_time
# We want to return the atom with the project directory still in it, as this data will directly be
# sent to the slave to be run.
total_time += new_atom.expected_time
# For the atoms without historic timing data, assign them the largest atom time we have
for new_atom in atoms_without_timing_data:
new_atom.expected_time = max_atom_time
if len(new_atoms) == len(atoms_without_timing_data):
raise _AtomTimingDataError
total_time += (max_atom_time * len(atoms_without_timing_data))
return total_time
def _group_atoms_into_sized_buckets(self, sorted_atom_time_dict, target_group_time, max_groups_to_create):
"""
Given a sorted dictionary (Python FTW) of [atom, time] pairs in variable sorted_atom_time_dict, return a list
of lists of atoms that each are estimated to take target_group_time seconds. This method will generate at most
max_groups_to_create groupings, and will return once this limit is reached or when sorted_atom_time_dict is
empty.
Note, this method will modify sorted_atom_time_dict's state by removing elements as needed (often from the
middle of the collection).
:param sorted_atom_time_dict: the sorted (longest first), double-ended queue containing [atom, time] pairs.
This OrderedDict will have elements removed from this method.
:type sorted_atom_time_dict: OrderedDict[app.master.atom.Atom, float]
:param target_group_time: how long each subjob should approximately take
:type target_group_time: float
:param max_groups_to_create: the maximum number of subjobs to create. Once max_groups_to_create limit is
reached, this method will return the subjobs that have already been grouped. If set to None, then there
is no limit.
:type max_groups_to_create: int|None
:return: the groups of grouped atoms, with each group taking an estimated target_group_time
:rtype: list[list[app.master.atom.Atom]]
"""
subjobs = []
subjob_time_so_far = 0
subjob_atoms = []
while (max_groups_to_create is None or len(subjobs) < max_groups_to_create) and len(sorted_atom_time_dict) > 0:
for atom, time in sorted_atom_time_dict.items():
if len(subjob_atoms) == 0 or (time + subjob_time_so_far) <= target_group_time:
subjob_time_so_far += time
subjob_atoms.append(atom)
sorted_atom_time_dict.pop(atom)
# If (number of subjobs created so far + atoms left) is less than or equal to the total number of
# subjobs we need to create, then have each remaining atom be a subjob and return.
# The "+ 1" is here to account for the current subjob being generated, but that hasn't been
# appended to subjobs yet.
if max_groups_to_create is not None and (len(subjobs) + len(sorted_atom_time_dict) + 1) <= max_groups_to_create:
subjobs.append(subjob_atoms)
for atom, _ in sorted_atom_time_dict.items():
sorted_atom_time_dict.pop(atom)
subjobs.append([atom])
return subjobs
subjobs.append(subjob_atoms)
subjob_atoms = []
subjob_time_so_far = 0
return subjobs
class _AtomTimingDataError(Exception):
"""
An exception to represent the case where the atom timing data is either not present or incorrect.
"""
| nickzuber/ClusterRunner | app/master/time_based_atom_grouper.py | Python | apache-2.0 | 11,090 |
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.api.ads.common.lib.soap;
/**
* Used to package a SOAP call's return. Contains the return value, the request
* and response info, and the originating {@link SoapCall}.
*
* @author Adam Rogal
*/
public class SoapCallReturn {
private Object returnValue;
private RequestInfo requestInfo;
private ResponseInfo responseInfo;
private Throwable exception;
/**
* Constructor.
*/
public SoapCallReturn(){
requestInfo = new RequestInfo();
responseInfo = new ResponseInfo();
}
/**
* Gets the return value from the SOAP call that was made.
*
* @return the return value from the SOAP call that was made or {@code null}
* if there was an exception
*/
public Object getReturnValue() {
return returnValue;
}
/**
* Gets the request info from the SOAP call that was made.
*/
public RequestInfo getRequestInfo() {
return requestInfo;
}
/**
* Gets the response info from the SOAP call that was made.
*/
public ResponseInfo getResponseInfo() {
return responseInfo;
}
/**
* Gets the exception from the SOAP call that was made if one occurred.
*
* @return the exception from the SOAP call that was made or {@code null}
* if there was no exception
*/
public Throwable getException() {
return exception;
}
/**
* Builder for {@link SoapCallReturn} objects.
*
* @author Adam Rogal
*/
public static class Builder {
private SoapCallReturn soapCallReturn;
/**
* Constructor.
*/
public Builder() {
this.soapCallReturn = new SoapCallReturn();
}
/**
* Adds a return value to the SoapCallReturn under construction.
*
* @param returnValue the return value to add to the SoapCallReturn
* @return this builder
*/
public Builder withReturnValue(Object returnValue) {
soapCallReturn.returnValue = returnValue;
return this;
}
/**
* Adds a response info to the SoapCallReturn under construction.
*
* @param responseInfo the response info to add to the SoapCallReturn
* @return this builder
*/
public Builder withResponseInfo(ResponseInfo responseInfo) {
soapCallReturn.responseInfo = responseInfo;
return this;
}
/**
* Adds a request info to the SoapCallReturn under construction.
*
* @param requestInfo the request info to add to the SoapCallReturn
* @return this builder
*/
public Builder withRequestInfo(RequestInfo requestInfo) {
soapCallReturn.requestInfo = requestInfo;
return this;
}
/**
* Adds an exception to the SoapCallReturn under construction.
*
* @param exception the exception to add to the SoapCallReturn
* @return this builder
*/
public Builder withException(Throwable exception) {
soapCallReturn.exception = exception;
return this;
}
/**
* Returns the SoapCallReturn this Builder has been constructing.
*
* @return the built SoapCallReturn object
*/
public SoapCallReturn build() {
return soapCallReturn;
}
}
}
| andyj24/googleads-java-lib | modules/ads_lib/src/main/java/com/google/api/ads/common/lib/soap/SoapCallReturn.java | Java | apache-2.0 | 3,736 |
/**
* Copyright 2020 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Services} from '../../../src/services';
import {getMode} from '../../../src/mode';
import {includes} from '../../../src/string';
import {map} from '../../../src/utils/object';
import {parseExtensionUrl} from '../../../src/service/extension-script';
import {preloadFriendlyIframeEmbedExtensions} from '../../../src/friendly-iframe-embed';
import {removeElement, rootNodeFor} from '../../../src/dom';
import {urls} from '../../../src/config';
/**
* @typedef {{
* extensions: !Array<{extensionId: (string|undefined), extensionVersion: (string|undefined)}>,
* head: !Element
* }}
*/
export let ValidatedHeadDef;
// From validator/validator-main.protoascii
const ALLOWED_FONT_REGEX = new RegExp(
'https://cdn\\.materialdesignicons\\.com/' +
'([0-9]+\\.?)+/css/materialdesignicons\\.min\\.css|' +
'https://cloud\\.typography\\.com/' +
'[0-9]*/[0-9]*/css/fonts\\.css|' +
'https://fast\\.fonts\\.net/.*|' +
'https://fonts\\.googleapis\\.com/css2?\\?.*|' +
'https://fonts\\.googleapis\\.com/icon\\?.*|' +
'https://fonts\\.googleapis\\.com/earlyaccess/.*\\.css|' +
'https://maxcdn\\.bootstrapcdn\\.com/font-awesome/' +
'([0-9]+\\.?)+/css/font-awesome\\.min\\.css(\\?.*)?|' +
'https://(use|pro)\\.fontawesome\\.com/releases/v([0-9]+\\.?)+' +
'/css/[0-9a-zA-Z-]+\\.css|' +
'https://(use|pro)\\.fontawesome\\.com/[0-9a-zA-Z-]+\\.css|' +
'https://use\\.typekit\\.net/[\\w\\p{L}\\p{N}_]+\\.css'
);
// If editing please also change:
// extensions/amp-a4a/amp-a4a-format.md#allowed-amp-extensions-and-builtins
const EXTENSION_ALLOWLIST = map({
'amp-accordion': true,
'amp-ad-exit': true,
'amp-analytics': true,
'amp-anim': true,
'amp-animation': true,
'amp-audio': true,
'amp-bind': true,
'amp-carousel': true,
'amp-fit-text': true,
'amp-font': true,
'amp-form': true,
'amp-img': true,
'amp-layout': true,
'amp-lightbox': true,
'amp-mraid': true,
'amp-mustache': true,
'amp-pixel': true,
'amp-position-observer': true,
'amp-selector': true,
'amp-social-share': true,
'amp-video': true,
});
const EXTENSION_URL_PREFIX = new RegExp(
urls.cdn.replace(/\./g, '\\.') + '/v0/'
);
/**
* Sanitizes AMPHTML Ad head element and extracts extensions to be installed.
* @param {!Window} win
* @param {!Element} adElement
* @param {?Element} head
* @return {?ValidatedHeadDef}
*/
export function processHead(win, adElement, head) {
if (!head || !head.firstChild) {
return null;
}
const root = rootNodeFor(head);
const htmlTag = root.documentElement;
if (
!htmlTag ||
(!htmlTag.hasAttribute('amp4ads') &&
!htmlTag.hasAttribute('⚡️4ads') &&
!htmlTag.hasAttribute('⚡4ads')) // Unicode weirdness.
) {
return null;
}
const urlService = Services.urlForDoc(adElement);
/** @type {!Array<{extensionId: string, extensionVersion: string}>} */
const extensions = [];
const fonts = [];
const images = [];
let element = head.firstElementChild;
while (element) {
// Store next element here as the following code will remove
// certain elements from the detached DOM.
const nextElement = element.nextElementSibling;
switch (element.tagName.toUpperCase()) {
case 'SCRIPT':
handleScript(extensions, element);
break;
case 'STYLE':
handleStyle(element);
break;
case 'LINK':
handleLink(fonts, images, element);
break;
// Allow these without validation.
case 'META':
case 'TITLE':
break;
default:
removeElement(element);
break;
}
element = nextElement;
}
// Load any extensions; do not wait on their promises as this
// is just to prefetch.
preloadFriendlyIframeEmbedExtensions(win, extensions);
// Preload any fonts.
fonts.forEach((fontUrl) =>
Services.preconnectFor(win).preload(adElement.getAmpDoc(), fontUrl)
);
// Preload any AMP images.
images.forEach(
(imageUrl) =>
urlService.isSecure(imageUrl) &&
Services.preconnectFor(win).preload(adElement.getAmpDoc(), imageUrl)
);
return {
extensions,
head,
};
}
/**
* Allows json scripts and allowlisted amp elements while removing others.
* @param {!Array<{extensionId: string, extensionVersion: string}>} extensions
* @param {!Element} script
*/
function handleScript(extensions, script) {
if (script.type === 'application/json') {
return;
}
const {src} = script;
const isTesting = getMode().test || getMode().localDev;
if (
EXTENSION_URL_PREFIX.test(src) ||
// Integration tests point to local files.
(isTesting && includes(src, '/dist/'))
) {
const extensionInfo = parseExtensionUrl(src);
if (extensionInfo && EXTENSION_ALLOWLIST[extensionInfo.extensionId]) {
extensions.push(extensionInfo);
}
}
removeElement(script);
}
/**
* Collect links that are from allowed font providers or used for image
* preloading. Remove other <link> elements.
* @param {!Array<string>} fonts
* @param {!Array<string>} images
* @param {!Element} link
*/
function handleLink(fonts, images, link) {
const {href, as, rel} = link;
if (rel === 'preload' && as === 'image') {
images.push(href);
return;
}
if (rel === 'stylesheet' && ALLOWED_FONT_REGEX.test(href)) {
fonts.push(href);
return;
}
removeElement(link);
}
/**
* Remove any non `amp-custom` or `amp-keyframe` styles.
* @param {!Element} style
*/
function handleStyle(style) {
if (
style.hasAttribute('amp-custom') ||
style.hasAttribute('amp-keyframes') ||
style.hasAttribute('amp4ads-boilerplate')
) {
return;
}
removeElement(style);
}
| lannka/amphtml | extensions/amp-a4a/0.1/head-validation.js | JavaScript | apache-2.0 | 6,308 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.vxquery.compiler.rewriter.rules;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.mutable.Mutable;
import org.apache.vxquery.functions.BuiltinFunctions;
import org.apache.vxquery.functions.BuiltinOperators;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
import org.apache.hyracks.algebricks.common.utils.Pair;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalExpression;
import org.apache.hyracks.algebricks.core.algebra.base.ILogicalOperator;
import org.apache.hyracks.algebricks.core.algebra.base.IOptimizationContext;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalExpressionTag;
import org.apache.hyracks.algebricks.core.algebra.base.LogicalOperatorTag;
import org.apache.hyracks.algebricks.core.algebra.expressions.AbstractFunctionCallExpression;
import org.apache.hyracks.algebricks.core.algebra.expressions.AggregateFunctionCallExpression;
import org.apache.hyracks.algebricks.core.algebra.functions.FunctionIdentifier;
import org.apache.hyracks.algebricks.core.algebra.functions.IFunctionInfo;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AbstractLogicalOperator;
import org.apache.hyracks.algebricks.core.algebra.operators.logical.AggregateOperator;
import org.apache.hyracks.algebricks.core.rewriter.base.IAlgebraicRewriteRule;
/**
* The rule searches for aggregate operators with an aggregate function
* expression that has not been initialized for two step aggregation.
*
* <pre>
* Before
*
* plan__parent
* AGGREGATE( $v : af1( $v1 ) )
* plan__child
*
* Where af1 is a VXquery aggregate function expression configured for single
* step processing and $v1 is defined in plan__child.
*
* After
*
* if (af1 == count) aggregate operating settings:
* Step 1: count
* Step 2: sum
* if (af1 == avg) aggregate operating settings:
* Step 1: avg-local
* Step 2: avg-global
* if (af1 in (max, min, sum)) aggregate operating settings:
* Step 1: af1
* Step 2: af1
* </pre>
*
* @author prestonc
*/
public class IntroduceTwoStepAggregateRule implements IAlgebraicRewriteRule {
final Map<FunctionIdentifier, Pair<IFunctionInfo, IFunctionInfo>> AGGREGATE_MAP = new HashMap<FunctionIdentifier, Pair<IFunctionInfo, IFunctionInfo>>();
public IntroduceTwoStepAggregateRule() {
AGGREGATE_MAP.put(BuiltinFunctions.FN_AVG_1.getFunctionIdentifier(),
new Pair<IFunctionInfo, IFunctionInfo>(BuiltinOperators.AVG_LOCAL, BuiltinOperators.AVG_GLOBAL));
AGGREGATE_MAP.put(BuiltinFunctions.FN_COUNT_1.getFunctionIdentifier(),
new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_COUNT_1, BuiltinFunctions.FN_SUM_1));
AGGREGATE_MAP.put(BuiltinFunctions.FN_MAX_1.getFunctionIdentifier(),
new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_MAX_1, BuiltinFunctions.FN_MAX_1));
AGGREGATE_MAP.put(BuiltinFunctions.FN_MIN_1.getFunctionIdentifier(),
new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_MIN_1, BuiltinFunctions.FN_MIN_1));
AGGREGATE_MAP.put(BuiltinFunctions.FN_SUM_1.getFunctionIdentifier(),
new Pair<IFunctionInfo, IFunctionInfo>(BuiltinFunctions.FN_SUM_1, BuiltinFunctions.FN_SUM_1));
}
@Override
public boolean rewritePre(Mutable<ILogicalOperator> opRef, IOptimizationContext context)
throws AlgebricksException {
// Check if aggregate function.
AbstractLogicalOperator op = (AbstractLogicalOperator) opRef.getValue();
if (op.getOperatorTag() != LogicalOperatorTag.AGGREGATE) {
return false;
}
AggregateOperator aggregate = (AggregateOperator) op;
if (aggregate.getExpressions().size() == 0) {
return false;
}
Mutable<ILogicalExpression> mutableLogicalExpression = aggregate.getExpressions().get(0);
ILogicalExpression logicalExpression = mutableLogicalExpression.getValue();
if (logicalExpression.getExpressionTag() != LogicalExpressionTag.FUNCTION_CALL) {
return false;
}
AbstractFunctionCallExpression functionCall = (AbstractFunctionCallExpression) logicalExpression;
if (AGGREGATE_MAP.containsKey(functionCall.getFunctionIdentifier())) {
AggregateFunctionCallExpression aggregateFunctionCall = (AggregateFunctionCallExpression) functionCall;
if (aggregateFunctionCall.isTwoStep()) {
return false;
}
aggregateFunctionCall.setTwoStep(true);
aggregateFunctionCall.setStepOneAggregate(AGGREGATE_MAP.get(functionCall.getFunctionIdentifier()).first);
aggregateFunctionCall.setStepTwoAggregate(AGGREGATE_MAP.get(functionCall.getFunctionIdentifier()).second);
return true;
}
return false;
}
@Override
public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context) {
return false;
}
}
| prestoncarman/vxquery | vxquery-core/src/main/java/org/apache/vxquery/compiler/rewriter/rules/IntroduceTwoStepAggregateRule.java | Java | apache-2.0 | 5,872 |
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.roots.ui.configuration;
import com.intellij.ide.DataManager;
import com.intellij.openapi.actionSystem.*;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.project.ProjectBundle;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.projectRoots.SdkType;
import com.intellij.openapi.projectRoots.SdkTypeId;
import com.intellij.openapi.roots.ui.OrderEntryAppearanceService;
import com.intellij.openapi.roots.ui.configuration.projectRoot.JdkListConfigurable;
import com.intellij.openapi.roots.ui.configuration.projectRoot.ProjectSdksModel;
import com.intellij.openapi.ui.ComboBoxWithWidePopup;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.openapi.util.Computable;
import com.intellij.openapi.util.Condition;
import com.intellij.openapi.util.Conditions;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.ui.ColoredListCellRenderer;
import com.intellij.ui.ScreenUtil;
import com.intellij.ui.SimpleTextAttributes;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.ui.EmptyIcon;
import com.intellij.util.ui.JBUI;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.util.Arrays;
import java.util.Collection;
/**
* @author Eugene Zhuravlev
* @since May 18, 2005
*/
public class JdkComboBox extends ComboBoxWithWidePopup<JdkComboBox.JdkComboBoxItem> {
private static final Icon EMPTY_ICON = JBUI.scale(EmptyIcon.create(1, 16));
@Nullable
private final Condition<Sdk> myFilter;
@Nullable
private final Condition<SdkTypeId> myCreationFilter;
private JButton mySetUpButton;
private final Condition<SdkTypeId> mySdkTypeFilter;
public JdkComboBox(@NotNull final ProjectSdksModel jdkModel) {
this(jdkModel, null);
}
public JdkComboBox(@NotNull final ProjectSdksModel jdkModel,
@Nullable Condition<SdkTypeId> filter) {
this(jdkModel, filter, getSdkFilter(filter), filter, false);
}
public JdkComboBox(@NotNull final ProjectSdksModel jdkModel,
@Nullable Condition<SdkTypeId> sdkTypeFilter,
@Nullable Condition<Sdk> filter,
@Nullable Condition<SdkTypeId> creationFilter,
boolean addSuggestedItems) {
super(new JdkComboBoxModel(jdkModel, sdkTypeFilter, filter, addSuggestedItems));
myFilter = filter;
mySdkTypeFilter = sdkTypeFilter;
myCreationFilter = creationFilter;
setRenderer(new ColoredListCellRenderer<JdkComboBoxItem>() {
@Override
protected void customizeCellRenderer(@NotNull JList<? extends JdkComboBoxItem> list,
JdkComboBoxItem value,
int index,
boolean selected,
boolean hasFocus) {
if (JdkComboBox.this.isEnabled()) {
setIcon(EMPTY_ICON); // to fix vertical size
if (value instanceof InvalidJdkComboBoxItem) {
final String str = value.toString();
append(str, SimpleTextAttributes.ERROR_ATTRIBUTES);
}
else if (value instanceof ProjectJdkComboBoxItem) {
final Sdk jdk = jdkModel.getProjectSdk();
if (jdk != null) {
setIcon(((SdkType)jdk.getSdkType()).getIcon());
append(ProjectBundle.message("project.roots.project.jdk.inherited"), SimpleTextAttributes.REGULAR_ATTRIBUTES);
append(" (" + jdk.getName() + ")", SimpleTextAttributes.GRAYED_ATTRIBUTES);
}
else {
final String str = value.toString();
append(str, SimpleTextAttributes.ERROR_ATTRIBUTES);
}
}
else if (value instanceof SuggestedJdkItem) {
SdkType type = ((SuggestedJdkItem)value).getSdkType();
String home = ((SuggestedJdkItem)value).getPath();
setIcon(type.getIconForAddAction());
String version = type.getVersionString(home);
append(version == null ? type.getPresentableName() : version);
append(" (" + home + ")", SimpleTextAttributes.GRAYED_ATTRIBUTES);
}
else if (value != null) {
OrderEntryAppearanceService.getInstance().forJdk(value.getJdk(), false, selected, true).customize(this);
}
else {
customizeCellRenderer(list, new NoneJdkComboBoxItem(), index, selected, hasFocus);
}
}
}
});
}
@Override
public Dimension getPreferredSize() {
final Rectangle rec = ScreenUtil.getScreenRectangle(0, 0);
final Dimension size = super.getPreferredSize();
final int maxWidth = rec.width / 4;
if (size.width > maxWidth) {
size.width = maxWidth;
}
return size;
}
@Override
public Dimension getMinimumSize() {
final Dimension minSize = super.getMinimumSize();
final Dimension prefSize = getPreferredSize();
if (minSize.width > prefSize.width) {
minSize.width = prefSize.width;
}
return minSize;
}
public void setSetupButton(final JButton setUpButton,
@Nullable final Project project,
final ProjectSdksModel jdksModel,
final JdkComboBoxItem firstItem,
@Nullable final Condition<Sdk> additionalSetup,
final boolean moduleJdkSetup) {
setSetupButton(setUpButton, project, jdksModel, firstItem, additionalSetup,
ProjectBundle.message("project.roots.set.up.jdk.title", moduleJdkSetup ? 1 : 2));
}
public void setSetupButton(final JButton setUpButton,
@Nullable final Project project,
final ProjectSdksModel jdksModel,
final JdkComboBoxItem firstItem,
@Nullable final Condition<Sdk> additionalSetup,
final String actionGroupTitle) {
mySetUpButton = setUpButton;
mySetUpButton.addActionListener(e -> {
DefaultActionGroup group = new DefaultActionGroup();
jdksModel.createAddActions(group, this, getSelectedJdk(), jdk -> {
if (project != null) {
final JdkListConfigurable configurable = JdkListConfigurable.getInstance(project);
configurable.addJdkNode(jdk, false);
}
reloadModel(new ActualJdkComboBoxItem(jdk), project);
setSelectedJdk(jdk); //restore selection
if (additionalSetup != null) {
if (additionalSetup.value(jdk)) { //leave old selection
setSelectedJdk(firstItem.getJdk());
}
}
}, myCreationFilter);
final DataContext dataContext = DataManager.getInstance().getDataContext(this);
if (group.getChildrenCount() > 1) {
JBPopupFactory.getInstance()
.createActionGroupPopup(actionGroupTitle, group, dataContext, JBPopupFactory.ActionSelectionAid.MNEMONICS, false)
.showUnderneathOf(setUpButton);
}
else {
final AnActionEvent event =
new AnActionEvent(null, dataContext, ActionPlaces.UNKNOWN, new Presentation(""), ActionManager.getInstance(), 0);
group.getChildren(event)[0].actionPerformed(event);
}
});
}
public void setEditButton(final JButton editButton, final Project project, final Computable<Sdk> retrieveJDK){
editButton.addActionListener(e -> {
final Sdk projectJdk = retrieveJDK.compute();
if (projectJdk != null) {
ProjectStructureConfigurable.getInstance(project).select(projectJdk, true);
}
});
addActionListener(e -> {
final JdkComboBoxItem selectedItem = getSelectedItem();
if (selectedItem instanceof ProjectJdkComboBoxItem) {
editButton.setEnabled(ProjectStructureConfigurable.getInstance(project).getProjectJdksModel().getProjectSdk() != null);
}
else {
editButton.setEnabled(!(selectedItem instanceof InvalidJdkComboBoxItem) && selectedItem != null && selectedItem.getJdk() != null);
}
});
}
public JButton getSetUpButton() {
return mySetUpButton;
}
@Override
public JdkComboBoxItem getSelectedItem() {
return (JdkComboBoxItem)super.getSelectedItem();
}
@Nullable
public Sdk getSelectedJdk() {
final JdkComboBoxItem selectedItem = getSelectedItem();
return selectedItem != null? selectedItem.getJdk() : null;
}
public void setSelectedJdk(Sdk jdk) {
final int index = indexOf(jdk);
if (index >= 0) {
setSelectedIndex(index);
}
}
public void setInvalidJdk(String name) {
removeInvalidElement();
addItem(new InvalidJdkComboBoxItem(name));
setSelectedIndex(getModel().getSize() - 1);
}
private int indexOf(Sdk jdk) {
final JdkComboBoxModel model = (JdkComboBoxModel)getModel();
final int count = model.getSize();
for (int idx = 0; idx < count; idx++) {
final JdkComboBoxItem elementAt = model.getElementAt(idx);
if (jdk == null) {
if (elementAt instanceof NoneJdkComboBoxItem || elementAt instanceof ProjectJdkComboBoxItem) {
return idx;
}
}
else {
Sdk elementAtJdk = elementAt.getJdk();
if (elementAtJdk != null && jdk.getName().equals(elementAtJdk.getName())) {
return idx;
}
}
}
return -1;
}
private void removeInvalidElement() {
final JdkComboBoxModel model = (JdkComboBoxModel)getModel();
final int count = model.getSize();
for (int idx = 0; idx < count; idx++) {
final JdkComboBoxItem elementAt = model.getElementAt(idx);
if (elementAt instanceof InvalidJdkComboBoxItem) {
removeItemAt(idx);
break;
}
}
}
public void reloadModel(JdkComboBoxItem firstItem, @Nullable Project project) {
final JdkComboBoxModel model = (JdkComboBoxModel)getModel();
if (project == null) {
model.addElement(firstItem);
return;
}
model.reload(firstItem, ProjectStructureConfigurable.getInstance(project).getProjectJdksModel(), mySdkTypeFilter, myFilter, false);
}
private static class JdkComboBoxModel extends DefaultComboBoxModel<JdkComboBoxItem> {
JdkComboBoxModel(@NotNull final ProjectSdksModel jdksModel, @Nullable Condition<SdkTypeId> sdkTypeFilter,
@Nullable Condition<Sdk> sdkFilter, boolean addSuggested) {
reload(null, jdksModel, sdkTypeFilter, sdkFilter, addSuggested);
}
void reload(@Nullable final JdkComboBoxItem firstItem,
@NotNull final ProjectSdksModel jdksModel,
@Nullable Condition<SdkTypeId> sdkTypeFilter,
@Nullable Condition<Sdk> sdkFilter,
boolean addSuggested) {
removeAllElements();
if (firstItem != null) addElement(firstItem);
Sdk[] jdks = sortSdks(jdksModel.getSdks());
for (Sdk jdk : jdks) {
if (sdkFilter == null || sdkFilter.value(jdk)) {
addElement(new ActualJdkComboBoxItem(jdk));
}
}
if (addSuggested) {
addSuggestedItems(sdkTypeFilter, jdks);
}
}
@NotNull
private static Sdk[] sortSdks(@NotNull final Sdk[] sdks) {
Sdk[] clone = sdks.clone();
Arrays.sort(clone, (sdk1, sdk2) -> {
SdkType sdkType1 = (SdkType)sdk1.getSdkType();
SdkType sdkType2 = (SdkType)sdk2.getSdkType();
if (!sdkType1.getComparator().equals(sdkType2.getComparator())) return StringUtil.compare(sdkType1.getPresentableName(), sdkType2.getPresentableName(), true);
return sdkType1.getComparator().compare(sdk1, sdk2);
});
return clone;
}
void addSuggestedItems(@Nullable Condition<SdkTypeId> sdkTypeFilter, Sdk[] jdks) {
SdkType[] types = SdkType.getAllTypes();
for (SdkType type : types) {
if (sdkTypeFilter == null || sdkTypeFilter.value(type) && ContainerUtil.find(jdks, sdk -> sdk.getSdkType() == type) == null) {
Collection<String> paths = type.suggestHomePaths();
for (String path : paths) {
if (path != null && type.isValidSdkHome(path)) {
addElement(new SuggestedJdkItem(type, path));
}
}
}
}
}
}
public static Condition<Sdk> getSdkFilter(@Nullable final Condition<SdkTypeId> filter) {
return filter == null ? Conditions.alwaysTrue() : sdk -> filter.value(sdk.getSdkType());
}
public abstract static class JdkComboBoxItem {
@Nullable
public Sdk getJdk() {
return null;
}
@Nullable
public String getSdkName() {
return null;
}
}
public static class ActualJdkComboBoxItem extends JdkComboBoxItem {
private final Sdk myJdk;
public ActualJdkComboBoxItem(@NotNull Sdk jdk) {
myJdk = jdk;
}
@Override
public String toString() {
return myJdk.getName();
}
@Nullable
@Override
public Sdk getJdk() {
return myJdk;
}
@Nullable
@Override
public String getSdkName() {
return myJdk.getName();
}
}
public static class ProjectJdkComboBoxItem extends JdkComboBoxItem {
public String toString() {
return ProjectBundle.message("jdk.combo.box.project.item");
}
}
public static class NoneJdkComboBoxItem extends JdkComboBoxItem {
public String toString() {
return ProjectBundle.message("jdk.combo.box.none.item");
}
}
private static class InvalidJdkComboBoxItem extends JdkComboBoxItem {
private final String mySdkName;
InvalidJdkComboBoxItem(String name) {
mySdkName = name;
}
@Override
public String getSdkName() {
return mySdkName;
}
public String toString() {
return ProjectBundle.message("jdk.combo.box.invalid.item", mySdkName);
}
}
public static class SuggestedJdkItem extends JdkComboBoxItem {
private final SdkType mySdkType;
private final String myPath;
SuggestedJdkItem(@NotNull SdkType sdkType, @NotNull String path) {
mySdkType = sdkType;
myPath = path;
}
@NotNull
public SdkType getSdkType() {
return mySdkType;
}
@NotNull
public String getPath() {
return myPath;
}
@Override
public String toString() {
return myPath;
}
}
}
| ThiagoGarciaAlves/intellij-community | java/idea-ui/src/com/intellij/openapi/roots/ui/configuration/JdkComboBox.java | Java | apache-2.0 | 15,058 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.util.resource.Resources;
@Private
@Unstable
public class FSLeafQueue extends FSQueue {
private static final Log LOG = LogFactory.getLog(
FSLeafQueue.class.getName());
private final List<AppSchedulable> runnableAppScheds = // apps that are runnable
new ArrayList<AppSchedulable>();
private final List<AppSchedulable> nonRunnableAppScheds =
new ArrayList<AppSchedulable>();
private Resource demand = Resources.createResource(0);
// Variables used for preemption
private long lastTimeAtMinShare;
private long lastTimeAtHalfFairShare;
// Track the AM resource usage for this queue
private Resource amResourceUsage;
private final ActiveUsersManager activeUsersManager;
public FSLeafQueue(String name, FairScheduler scheduler,
FSParentQueue parent) {
super(name, scheduler, parent);
this.lastTimeAtMinShare = scheduler.getClock().getTime();
this.lastTimeAtHalfFairShare = scheduler.getClock().getTime();
activeUsersManager = new ActiveUsersManager(getMetrics());
amResourceUsage = Resource.newInstance(0, 0);
}
public void addApp(FSSchedulerApp app, boolean runnable) {
AppSchedulable appSchedulable = new AppSchedulable(scheduler, app, this);
app.setAppSchedulable(appSchedulable);
if (runnable) {
runnableAppScheds.add(appSchedulable);
} else {
nonRunnableAppScheds.add(appSchedulable);
}
}
// for testing
void addAppSchedulable(AppSchedulable appSched) {
runnableAppScheds.add(appSched);
}
/**
* Removes the given app from this queue.
* @return whether or not the app was runnable
*/
public boolean removeApp(FSSchedulerApp app) {
if (runnableAppScheds.remove(app.getAppSchedulable())) {
// Update AM resource usage
if (app.isAmRunning() && app.getAMResource() != null) {
Resources.subtractFrom(amResourceUsage, app.getAMResource());
}
return true;
} else if (nonRunnableAppScheds.remove(app.getAppSchedulable())) {
return false;
} else {
throw new IllegalStateException("Given app to remove " + app +
" does not exist in queue " + this);
}
}
public Collection<AppSchedulable> getRunnableAppSchedulables() {
return runnableAppScheds;
}
public List<AppSchedulable> getNonRunnableAppSchedulables() {
return nonRunnableAppScheds;
}
@Override
public void collectSchedulerApplications(
Collection<ApplicationAttemptId> apps) {
for (AppSchedulable appSched : runnableAppScheds) {
apps.add(appSched.getApp().getApplicationAttemptId());
}
for (AppSchedulable appSched : nonRunnableAppScheds) {
apps.add(appSched.getApp().getApplicationAttemptId());
}
}
@Override
public void setPolicy(SchedulingPolicy policy)
throws AllocationConfigurationException {
if (!SchedulingPolicy.isApplicableTo(policy, SchedulingPolicy.DEPTH_LEAF)) {
throwPolicyDoesnotApplyException(policy);
}
super.policy = policy;
}
@Override
public void recomputeShares() {
policy.computeShares(getRunnableAppSchedulables(), getFairShare());
}
@Override
public Resource getDemand() {
return demand;
}
@Override
public Resource getResourceUsage() {
Resource usage = Resources.createResource(0);
for (AppSchedulable app : runnableAppScheds) {
Resources.addTo(usage, app.getResourceUsage());
}
for (AppSchedulable app : nonRunnableAppScheds) {
Resources.addTo(usage, app.getResourceUsage());
}
return usage;
}
public Resource getAmResourceUsage() {
return amResourceUsage;
}
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
demand = Resources.createResource(0);
for (AppSchedulable sched : runnableAppScheds) {
if (Resources.equals(demand, maxRes)) {
break;
}
updateDemandForApp(sched, maxRes);
}
for (AppSchedulable sched : nonRunnableAppScheds) {
if (Resources.equals(demand, maxRes)) {
break;
}
updateDemandForApp(sched, maxRes);
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand
+ "; the max is " + maxRes);
}
}
private void updateDemandForApp(AppSchedulable sched, Resource maxRes) {
sched.updateDemand();
Resource toAdd = sched.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
+ "; Total resource consumption for " + getName() + " now "
+ demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
}
@Override
public Resource assignContainer(FSSchedulerNode node) {
Resource assigned = Resources.none();
if (LOG.isDebugEnabled()) {
LOG.debug("Node " + node.getNodeName() + " offered to queue: " + getName());
}
if (!assignContainerPreCheck(node)) {
return assigned;
}
Comparator<Schedulable> comparator = policy.getComparator();
Collections.sort(runnableAppScheds, comparator);
for (AppSchedulable sched : runnableAppScheds) {
if (SchedulerAppUtils.isBlacklisted(sched.getApp(), node, LOG)) {
continue;
}
assigned = sched.assignContainer(node);
if (!assigned.equals(Resources.none())) {
break;
}
}
return assigned;
}
@Override
public RMContainer preemptContainer() {
RMContainer toBePreempted = null;
if (LOG.isDebugEnabled()) {
LOG.debug("Queue " + getName() + " is going to preempt a container " +
"from its applications.");
}
// If this queue is not over its fair share, reject
if (!preemptContainerPreCheck()) {
return toBePreempted;
}
// Choose the app that is most over fair share
Comparator<Schedulable> comparator = policy.getComparator();
AppSchedulable candidateSched = null;
for (AppSchedulable sched : runnableAppScheds) {
if (candidateSched == null ||
comparator.compare(sched, candidateSched) > 0) {
candidateSched = sched;
}
}
// Preempt from the selected app
if (candidateSched != null) {
toBePreempted = candidateSched.preemptContainer();
}
return toBePreempted;
}
@Override
public List<FSQueue> getChildQueues() {
return new ArrayList<FSQueue>(1);
}
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user) {
QueueUserACLInfo userAclInfo =
recordFactory.newRecordInstance(QueueUserACLInfo.class);
List<QueueACL> operations = new ArrayList<QueueACL>();
for (QueueACL operation : QueueACL.values()) {
if (hasAccess(operation, user)) {
operations.add(operation);
}
}
userAclInfo.setQueueName(getQueueName());
userAclInfo.setUserAcls(operations);
return Collections.singletonList(userAclInfo);
}
public long getLastTimeAtMinShare() {
return lastTimeAtMinShare;
}
public void setLastTimeAtMinShare(long lastTimeAtMinShare) {
this.lastTimeAtMinShare = lastTimeAtMinShare;
}
public long getLastTimeAtHalfFairShare() {
return lastTimeAtHalfFairShare;
}
public void setLastTimeAtHalfFairShare(long lastTimeAtHalfFairShare) {
this.lastTimeAtHalfFairShare = lastTimeAtHalfFairShare;
}
@Override
public int getNumRunnableApps() {
return runnableAppScheds.size();
}
@Override
public ActiveUsersManager getActiveUsersManager() {
return activeUsersManager;
}
/**
* Check whether this queue can run this application master under the
* maxAMShare limit
*
* @param amResource
* @return true if this queue can run
*/
public boolean canRunAppAM(Resource amResource) {
float maxAMShare =
scheduler.getAllocationConfiguration().getQueueMaxAMShare(getName());
if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
return true;
}
Resource maxAMResource = Resources.multiply(getFairShare(), maxAMShare);
Resource ifRunAMResource = Resources.add(amResourceUsage, amResource);
return !policy
.checkIfAMResourceUsageOverLimit(ifRunAMResource, maxAMResource);
}
public void addAMResourceUsage(Resource amResource) {
if (amResource != null) {
Resources.addTo(amResourceUsage, amResource);
}
}
@Override
public void recoverContainer(Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) {
// TODO Auto-generated method stub
}
}
| tseen/Federated-HDFS | tseenliu/FedHDFS-hadoop-src/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java | Java | apache-2.0 | 10,600 |
/*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.governator.lifecycle.warmup;
import com.google.inject.Singleton;
import com.netflix.governator.annotations.WarmUp;
public class Flat
{
/*
Root classes without dependencies
*/
@Singleton
public static class A
{
public volatile Recorder recorder;
@WarmUp
public void warmUp() throws InterruptedException
{
recorder.record("A");
}
}
@Singleton
public static class B
{
public volatile Recorder recorder;
@WarmUp
public void warmUp() throws InterruptedException
{
recorder.record("B");
}
}
}
| skinzer/governator | governator-legacy/src/test/java/com/netflix/governator/lifecycle/warmup/Flat.java | Java | apache-2.0 | 1,291 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.idm.engine.impl.cmd;
import java.io.Serializable;
import org.flowable.engine.common.api.FlowableIllegalArgumentException;
import org.flowable.engine.common.api.FlowableObjectNotFoundException;
import org.flowable.engine.common.impl.interceptor.Command;
import org.flowable.engine.common.impl.interceptor.CommandContext;
import org.flowable.idm.api.Picture;
import org.flowable.idm.api.User;
import org.flowable.idm.engine.impl.util.CommandContextUtil;
/**
* @author Tom Baeyens
*/
public class SetUserPictureCmd implements Command<Object>, Serializable {
private static final long serialVersionUID = 1L;
protected String userId;
protected Picture picture;
public SetUserPictureCmd(String userId, Picture picture) {
this.userId = userId;
this.picture = picture;
}
@Override
public Object execute(CommandContext commandContext) {
if (userId == null) {
throw new FlowableIllegalArgumentException("userId is null");
}
User user = CommandContextUtil.getIdmEngineConfiguration().getIdmIdentityService()
.createUserQuery().userId(userId)
.singleResult();
if (user == null) {
throw new FlowableObjectNotFoundException("user " + userId + " doesn't exist", User.class);
}
CommandContextUtil.getUserEntityManager(commandContext).setUserPicture(user, picture);
return null;
}
}
| zwets/flowable-engine | modules/flowable-idm-engine/src/main/java/org/flowable/idm/engine/impl/cmd/SetUserPictureCmd.java | Java | apache-2.0 | 2,022 |
---
title: "Como incluir os anúncios do Google AdSense em seu site"
description: "Siga as etapas para saber como incluir anúncios em seu site. Crie uma conta do Google AdSense, crie blocos de anúncios, veicule os blocos em seu site, configure as definições de pagamento e receba seus pagamentos."
updated_on: 2014-07-31
key-takeaways:
tldr:
- "Para criar uma conta do Google AdSense, é preciso ter 18 anos, ter uma Conta do Google e fornecer seu endereço."
- "Seu site deve estar ativo antes do envio da inscrição e o conteúdo do site deve estar em conformidade com as políticas do Google AdSense."
- "Crie blocos de anúncios responsivos para garantir que o anúncio esteja adequado para qualquer dispositivo utilizado pelo usuário."
- "Verifique as configurações de pagamento e espere o dinheiro começar a entrar."
notes:
crawler:
- "Certifique-se de que o rastreador do Google AdSense consegue acessar seu site (consulte <a href='https://support.google.com/adsense/answer/10532'>este tópico de ajuda</a>)."
body:
- "Cole todo o código do anúncio na tag de corpo, caso contrário, os anúncios não serão exibidos."
smarttag:
- "<code>data-ad-client</code> e <code>data-ad-slot</code> serão exclusivas para cada anúncio gerado."
- "A tag <code>data-ad-format=auto</code> no código de anúncio gerado ativa o comportamento de dimensão inteligente para o bloco de anúncios responsivo."
---
<p class="intro">
Siga as etapas para saber como incluir anúncios em seu site. Crie uma conta do Google AdSense, crie blocos de anúncios, veicule os blocos em seu site, configure as definições de pagamento e receba seus pagamentos.
</p>
{% include shared/toc.liquid %}
{% include shared/takeaway.liquid list=page.key-takeaways.tldr %}
## Como criar página de exemplo com os anúncios
Neste passo a passo, você criará uma página simples com anúncios responsivos por meio do Google AdSense e do Web Starter Kit:
<img src="images/ad-ss-600.png" sizes="100vw"
srcset="images/ad-ss-1200.png 1200w,
images/ad-ss-900.png 900w,
images/ad-ss-600.png 600w,
images/ad-ss-300.png 300w"
alt="Site de exemplo com anúncios para computador e celular">
Se você ainda não conhece o Web Start Kit, consulte a documentação [Configurar o Web Starter Kit]({{site.fundamentals}}/tools/setup/setup_kit.html), em inglês.
Para incluir anúncios em seu site e receber pagamentos, você precisa seguir estas etapas simples:
1. Criar uma conta do Google AdSense.
2. Criar blocos de anúncios.
3. Veicular blocos de anúncios em uma página.
4. Configurar definições de pagamento.
## Criar uma conta do Google AdSense
Para veicular anúncios em seu site, você precisará de uma conta ativa do Google AdSense. Se você ainda não tem uma conta do Google AdSense, será preciso [criá-la](https://www.google.com/adsense/) e concordar com os termos de serviço do Google AdSense. Ao criar a conta, você precisa verificar:
* Que tem pelo menos 18 anos e possui uma Conta do Google verificada.
* Que possui um site ativo ou outro conteúdo on-line em conformidade com
[políticas do programa Google AdSense](https://support.google.com/adsense/answer/48182). Há anúncios hospedados neste site.
* Você tem um endereço postal e um endereço de e-mail associado à sua conta bancária, para poder receber os pagamentos.
## Criar blocos de anúncios
Um bloco de anúncios é um conjunto de anúncios exibidos em sua página em razão do JavaScript que você adiciona à sua página. Você tem três opções de dimensionamento para os blocos de anúncios:
* **[Responsivo (Recomendado)](https://support.google.com/adsense/answer/3213689)**.
* [Predefinido](https://support.google.com/adsense/answer/6002621).
* [Dimensão personalizada](https://support.google.com/adsense/answer/3289364).
Você está criando um site responsivo, use blocos de anúncios responsivos.
Os anúncios responsivos se redimensionam automaticamente com base no tamanho do dispositivo e na largura do contêiner pai.
Os anúncios responsivos atuam in-line com seu layout responsivo, garantindo que o site ficará bonito em qualquer dispositivo.
Se você não usar blocos de anúncios responsivos, será preciso escrever muito mais códigos para controlar como os anúncios são exibidos com base no dispositivo de um usuário. Mesmo se você especificar o tamanho exato de seus blocos de anúncios, utilize os blocos responsivos no [modo avançado]({{site.fundamentals}}/monetization/ads/customize-ads.html#what-if-responsive-sizing-isnt-enough).
Para simplificar o código e poupar tempo e esforço, o código do anúncio responsivo adapta automaticamente o tamanho do bloco de anúncios ao layout de sua página.
O código calcula o tamanho necessário dinamicamente, com base na largura do contêiner pai do bloco de anúncios, e depois escolhe o tamanho de anúncio com o melhor desempenho que se encaixa no contêiner.
Por exemplo, um site otimizado para dispositivos móveis com largura de 360 px pode exibir um bloco de 320 x 50.
Rastreie os [tamanhos de anúncios com o melhor desempenho](https://support.google.com/adsense/answer/6002621#top) no [Guia de tamanhos de anúncios] do Google AdSense (https://support.google.com/adsense/answer/6002621#top).
### Para criar um bloco de anúncios responsivo
1. Acesse a [guia `Meus anúncios`](https://www.google.com/adsense/app#myads-springboard).
2. Clique em <strong>+Novo bloco de anúncios</strong>.
3. Forneça a seu bloco de anúncios um nome exclusivo. Esse nome é exibido no código de anúncio que é colado em seu site, então faça uma descrição.
4. Selecione <strong>Responsivo</strong> no menu suspenso `Tamanho do anúncio`.
5. Selecione <strong>Inserir anúncios gráficos e de texto</strong> no menu suspenso `Tipo de anúncio`.
6. Clique em <strong>Salvar e gerar código</strong>.
7. Na caixa <strong>Código de anúncio</strong> exibida, selecione a opção <strong>Dimensionamento inteligente (recomendado)</strong> no menu suspenso `Modo`.
Esse é o modo recomendado e que não exige alterações em seu código de anúncio.
Depois de criar o bloco de anúncios, o Google AdSense fornece um snippet de código a ser incluído em seu site, semelhante ao código abaixo:
{% highlight html %}
<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<!-- Top ad in web starter kit sample -->
<ins class="adsbygoogle"
style="display:block"
data-ad-client="XX-XXX-XXXXXXXXXXXXXXXX"
data-ad-slot="XXXXXXXXXX"
data-ad-format="auto"></ins>
<script>
(adsbygoogle = window.adsbygoogle || []).push({});
</script>
{% endhighlight %}
{% include shared/remember.liquid title="Note" list=page.notes.smarttag %}
## Incluir blocos de anúncios em seu site
Para incluir o anúncio na página, precisamos colar o snippet fornecido pelo Google AdSense em sua marcação. Se você deseja incluir vários anúncios, pode reutilizar o mesmo bloco de anúncios ou criar vários blocos de anúncios.
1. Abra o `index.html` na pasta `app`.
2. Cole o snippet fornecido na tag `main`.
3. Salve o arquivo e tente visualizá-lo no navegador, depois tente abri-lo em um dispositivo móvel ou pelo emulador do Google Chrome.
{% include shared/remember.liquid title="Remember" list=page.notes.body %}
<div>
<a href="/web/fundamentals/resources/samples/monetization/ads/">
<img src="images/ad-ss-600.png" sizes="100vw"
srcset="images/ad-ss-1200.png 1200w,
images/ad-ss-900.png 900w,
images/ad-ss-600.png 600w,
images/ad-ss-300.png 300w"
alt="Site de exemplo com anúncios para computador e celular">
<br>
Tente
</a>
</div>
## Configurar definições de pagamento
Está imaginando quando seu pagamento do Google AdSense será feito? Tentando descobrir se você será pago neste mês ou no próximo? Não deixe de concluir as etapas abaixo:
1. Verifique se você forneceu quaisquer informações fiscais necessárias no [perfil de recebedor](https://www.google.com/adsense/app#payments3/h=BILLING_PROFILE).
2. Confirme se o nome e o endereço do recebedor estão corretos.
3. Selecione a forma de pagamento na [página `Configurações de pagamento`](https://www.google.com/adsense/app#payments3/h=ACCOUNT_SETTINGS).
4. Insira seu [número pessoal de identificação (PIN, na sigla em inglês)](https://support.google.com/adsense/answer/157667). Esse PIN confirma a exatidão de suas informações de conta.
5. Verifique se seu saldo atinge o [limite de pagamento](https://support.google.com/adsense/answer/1709871).
Consulte a [Introdução aos pagamentos do Google AdSense](https://support.google.com/adsense/answer/1709858) caso tenha dúvidas adicionais.
| beaufortfrancois/WebFundamentals | src/content/pt-br/fundamentals/discovery-and-distribution/monetization/ads/include-ads.markdown | Markdown | apache-2.0 | 8,808 |
/*
Copyright 2016 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package com.gs.fw.common.mithra.test.tax;
import com.gs.fw.finder.Operation;
import java.util.*;
public class FormRoleList extends FormRoleListAbstract
{
public FormRoleList()
{
super();
}
public FormRoleList(int initialSize)
{
super(initialSize);
}
public FormRoleList(Collection c)
{
super(c);
}
public FormRoleList(Operation operation)
{
super(operation);
}
}
| goldmansachs/reladomo | reladomo/src/test/java/com/gs/fw/common/mithra/test/tax/FormRoleList.java | Java | apache-2.0 | 1,001 |
/**
* Purpose of package - find largest number from 2 and 3 numbers.
* @since 1.0
* @author skuznetsov
* @version 2.0
*/
package ru.skuznetsov;
| kuznetsovsergeyymailcom/homework | chapter_001/maximumFromTwoNumbers/src/test/java/ru/skuznetsov/package-info.java | Java | apache-2.0 | 145 |
////////////////////////////////////////////////////////////////////////////////
/// @brief associative array implementation
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2014 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Martin Schoenert
/// @author Copyright 2014, ArangoDB GmbH, Cologne, Germany
/// @author Copyright 2006-2013, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_BASICS_C_ASSOCIATIVE_H
#define ARANGODB_BASICS_C_ASSOCIATIVE_H 1
#include <functional>
#include "Basics/Common.h"
#include "Basics/locks.h"
// -----------------------------------------------------------------------------
// --SECTION-- ASSOCIATIVE ARRAY
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// --SECTION-- public types
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief associative array
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_associative_array_s {
uint64_t (*hashKey) (struct TRI_associative_array_s*, void*);
uint64_t (*hashElement) (struct TRI_associative_array_s*, void*);
void (*clearElement) (struct TRI_associative_array_s*, void*);
bool (*isEmptyElement) (struct TRI_associative_array_s*, void*);
bool (*isEqualKeyElement) (struct TRI_associative_array_s*, void*, void*);
bool (*isEqualElementElement) (struct TRI_associative_array_s*, void*, void*);
uint32_t _elementSize;
uint32_t _nrAlloc; // the size of the table
uint32_t _nrUsed; // the number of used entries
char* _table; // the table itself
#ifdef TRI_INTERNAL_STATS
uint64_t _nrFinds; // statistics: number of lookup calls
uint64_t _nrAdds; // statistics: number of insert calls
uint64_t _nrRems; // statistics: number of remove calls
uint64_t _nrResizes; // statistics: number of resizes
uint64_t _nrProbesF; // statistics: number of misses while looking up
uint64_t _nrProbesA; // statistics: number of misses while inserting
uint64_t _nrProbesD; // statistics: number of misses while removing
uint64_t _nrProbesR; // statistics: number of misses while adding
#endif
TRI_memory_zone_t* _memoryZone;
}
TRI_associative_array_t;
// -----------------------------------------------------------------------------
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief initialises an array
////////////////////////////////////////////////////////////////////////////////
int TRI_InitAssociativeArray (TRI_associative_array_t*,
TRI_memory_zone_t*,
size_t elementSize,
uint64_t (*hashKey) (TRI_associative_array_t*, void*),
uint64_t (*hashElement) (TRI_associative_array_t*, void*),
void (*clearElement) (TRI_associative_array_t*, void*),
bool (*isEmptyElement) (TRI_associative_array_t*, void*),
bool (*isEqualKeyElement) (TRI_associative_array_t*, void*, void*),
bool (*isEqualElementElement) (TRI_associative_array_t*, void*, void*));
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array, but does not free the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_DestroyAssociativeArray (TRI_associative_array_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array and frees the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_FreeAssociativeArray (TRI_memory_zone_t*, TRI_associative_array_t*);
// -----------------------------------------------------------------------------
// --SECTION-- public functions
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given a key
////////////////////////////////////////////////////////////////////////////////
void* TRI_LookupByKeyAssociativeArray (TRI_associative_array_t*, void* key);
////////////////////////////////////////////////////////////////////////////////
/// @brief finds an element given a key, returns NULL if not found
////////////////////////////////////////////////////////////////////////////////
void* TRI_FindByKeyAssociativeArray (TRI_associative_array_t*, void* key);
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given an element
////////////////////////////////////////////////////////////////////////////////
void* TRI_LookupByElementAssociativeArray (TRI_associative_array_t*, void* element);
////////////////////////////////////////////////////////////////////////////////
/// @brief finds an element given an element, returns NULL if not found
////////////////////////////////////////////////////////////////////////////////
void* TRI_FindByElementAssociativeArray (TRI_associative_array_t*, void* element);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an element to the array
////////////////////////////////////////////////////////////////////////////////
bool TRI_InsertElementAssociativeArray (TRI_associative_array_t*, void* element, bool overwrite);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an key/element to the array
////////////////////////////////////////////////////////////////////////////////
bool TRI_InsertKeyAssociativeArray (TRI_associative_array_t*, void* key, void* element, bool overwrite);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an element from the array
////////////////////////////////////////////////////////////////////////////////
bool TRI_RemoveElementAssociativeArray (TRI_associative_array_t*, void* element, void* old);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an key/element to the array
////////////////////////////////////////////////////////////////////////////////
bool TRI_RemoveKeyAssociativeArray (TRI_associative_array_t*, void* key, void* old);
////////////////////////////////////////////////////////////////////////////////
/// @brief get the number of elements from the array
////////////////////////////////////////////////////////////////////////////////
size_t TRI_GetLengthAssociativeArray (const TRI_associative_array_t* const);
// -----------------------------------------------------------------------------
// --SECTION-- ASSOCIATIVE POINTERS
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// --SECTION-- public types
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief associative array of pointers
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_associative_pointer_s {
uint64_t (*hashKey) (struct TRI_associative_pointer_s*, void const*);
uint64_t (*hashElement) (struct TRI_associative_pointer_s*, void const*);
bool (*isEqualKeyElement) (struct TRI_associative_pointer_s*, void const*, void const*);
bool (*isEqualElementElement) (struct TRI_associative_pointer_s*, void const*, void const*);
uint32_t _nrAlloc; // the size of the table
uint32_t _nrUsed; // the number of used entries
void** _table; // the table itself
#ifdef TRI_INTERNAL_STATS
uint64_t _nrFinds; // statistics: number of lookup calls
uint64_t _nrAdds; // statistics: number of insert calls
uint64_t _nrRems; // statistics: number of remove calls
uint64_t _nrResizes; // statistics: number of resizes
uint64_t _nrProbesF; // statistics: number of misses while looking up
uint64_t _nrProbesA; // statistics: number of misses while inserting
uint64_t _nrProbesD; // statistics: number of misses while removing
uint64_t _nrProbesR; // statistics: number of misses while adding
#endif
TRI_memory_zone_t* _memoryZone;
}
TRI_associative_pointer_t;
// -----------------------------------------------------------------------------
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief initialises an array
////////////////////////////////////////////////////////////////////////////////
int TRI_InitAssociativePointer (TRI_associative_pointer_t* array,
TRI_memory_zone_t*,
uint64_t (*hashKey) (TRI_associative_pointer_t*, void const*),
uint64_t (*hashElement) (TRI_associative_pointer_t*, void const*),
bool (*isEqualKeyElement) (TRI_associative_pointer_t*, void const*, void const*),
bool (*isEqualElementElement) (TRI_associative_pointer_t*, void const*, void const*));
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array, but does not free the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_DestroyAssociativePointer (TRI_associative_pointer_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array and frees the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_FreeAssociativePointer (TRI_memory_zone_t*, TRI_associative_pointer_t*);
// -----------------------------------------------------------------------------
// --SECTION-- public functions
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief General hash function that can be used to hash a pointer
////////////////////////////////////////////////////////////////////////////////
uint64_t TRI_HashPointerKeyAssociativePointer (TRI_associative_pointer_t*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief General hash function that can be used to hash a string key
////////////////////////////////////////////////////////////////////////////////
uint64_t TRI_HashStringKeyAssociativePointer (TRI_associative_pointer_t*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief General function to determine equality of two string values
////////////////////////////////////////////////////////////////////////////////
bool TRI_EqualStringKeyAssociativePointer (TRI_associative_pointer_t*,
void const*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief reserves space in the array for extra elements
////////////////////////////////////////////////////////////////////////////////
bool TRI_ReserveAssociativePointer (TRI_associative_pointer_t*,
int32_t);
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given a key
////////////////////////////////////////////////////////////////////////////////
void* TRI_LookupByKeyAssociativePointer (TRI_associative_pointer_t*,
void const* key);
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given an element
////////////////////////////////////////////////////////////////////////////////
void* TRI_LookupByElementAssociativePointer (TRI_associative_pointer_t*,
void const* element);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_InsertElementAssociativePointer (TRI_associative_pointer_t*,
void* element,
bool overwrite);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an key/element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_InsertKeyAssociativePointer (TRI_associative_pointer_t*,
void const* key,
void* element,
bool overwrite);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an key/element to the array
/// returns a status code, and *found will contain a found element (if any)
////////////////////////////////////////////////////////////////////////////////
int TRI_InsertKeyAssociativePointer2 (TRI_associative_pointer_t*,
void const*,
void*,
void const**);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an element from the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_RemoveElementAssociativePointer (TRI_associative_pointer_t*,
void const* element);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an key/element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_RemoveKeyAssociativePointer (TRI_associative_pointer_t*,
void const* key);
////////////////////////////////////////////////////////////////////////////////
/// @brief get the number of elements from the array
////////////////////////////////////////////////////////////////////////////////
size_t TRI_GetLengthAssociativePointer (const TRI_associative_pointer_t* const);
// -----------------------------------------------------------------------------
// --SECTION-- ASSOCIATIVE SYNCED
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// --SECTION-- public types
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief associative array of synced pointers
///
/// Note that lookup, insert, and remove are protected using a read-write lock.
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_associative_synced_s {
uint64_t (*hashKey) (struct TRI_associative_synced_s*, void const*);
uint64_t (*hashElement) (struct TRI_associative_synced_s*, void const*);
bool (*isEqualKeyElement) (struct TRI_associative_synced_s*, void const*, void const*);
bool (*isEqualElementElement) (struct TRI_associative_synced_s*, void const*, void const*);
uint32_t _nrAlloc; // the size of the table
uint32_t _nrUsed; // the number of used entries
void** _table; // the table itself
TRI_read_write_lock_t _lock;
TRI_memory_zone_t* _memoryZone;
}
TRI_associative_synced_t;
// -----------------------------------------------------------------------------
// --SECTION-- constructors and destructors
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief initialises an array
////////////////////////////////////////////////////////////////////////////////
int TRI_InitAssociativeSynced (TRI_associative_synced_t* array,
TRI_memory_zone_t*,
uint64_t (*hashKey) (TRI_associative_synced_t*, void const*),
uint64_t (*hashElement) (TRI_associative_synced_t*, void const*),
bool (*isEqualKeyElement) (TRI_associative_synced_t*, void const*, void const*),
bool (*isEqualElementElement) (TRI_associative_synced_t*, void const*, void const*));
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array, but does not free the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_DestroyAssociativeSynced (TRI_associative_synced_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief destroys an array and frees the pointer
////////////////////////////////////////////////////////////////////////////////
void TRI_FreeAssociativeSynced (TRI_memory_zone_t*, TRI_associative_synced_t*);
// -----------------------------------------------------------------------------
// --SECTION-- public functions
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given a key
////////////////////////////////////////////////////////////////////////////////
void const* TRI_LookupByKeyAssociativeSynced (TRI_associative_synced_t*, void const* key);
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given a key and calls the callback function while
/// the read-lock is held
////////////////////////////////////////////////////////////////////////////////
template<typename T>
T TRI_ProcessByKeyAssociativeSynced (TRI_associative_synced_t* array,
void const* key,
std::function<T(void const*)> callback) {
// compute the hash
uint64_t const hash = array->hashKey(array, key);
// search the table
TRI_ReadLockReadWriteLock(&array->_lock);
uint64_t const n = array->_nrAlloc;
uint64_t i, k;
i = k = hash % n;
for (; i < n && array->_table[i] != nullptr && ! array->isEqualKeyElement(array, key, array->_table[i]); ++i);
if (i == n) {
for (i = 0; i < k && array->_table[i] != nullptr && ! array->isEqualKeyElement(array, key, array->_table[i]); ++i);
}
T result = callback(array->_table[i]);
TRI_ReadUnlockReadWriteLock(&array->_lock);
// return whatever we found
return result;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief lookups an element given an element
////////////////////////////////////////////////////////////////////////////////
void const* TRI_LookupByElementAssociativeSynced (TRI_associative_synced_t*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_InsertElementAssociativeSynced (TRI_associative_synced_t*,
void*,
bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief adds an key/element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_InsertKeyAssociativeSynced (TRI_associative_synced_t*,
void const*,
void*,
bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an element from the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_RemoveElementAssociativeSynced (TRI_associative_synced_t*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief removes an key/element to the array
////////////////////////////////////////////////////////////////////////////////
void* TRI_RemoveKeyAssociativeSynced (TRI_associative_synced_t*,
void const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief get the number of elements from the array
////////////////////////////////////////////////////////////////////////////////
size_t TRI_GetLengthAssociativeSynced (TRI_associative_synced_t* const);
#endif
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// Local Variables:
// mode: outline-minor
// outline-regexp: "/// @brief\\|/// {@inheritDoc}\\|/// @page\\|// --SECTION--\\|/// @\\}"
// End:
| kkdd/arangodb | lib/Basics/associative.h | C | apache-2.0 | 23,212 |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/generated"
)
const (
// Poll interval for StatefulSet tests
StatefulSetPoll = 10 * time.Second
// Timeout interval for StatefulSet operations
StatefulSetTimeout = 10 * time.Minute
// Timeout for stateful pods to change state
StatefulPodTimeout = 5 * time.Minute
)
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
func CreateStatefulSetService(name string, labels map[string]string) *v1.Service {
headlessService := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Selector: labels,
},
}
headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
headlessService.Spec.ClusterIP = "None"
return headlessService
}
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
func StatefulSetFromManifest(fileName, ns string) *apps.StatefulSet {
var ss apps.StatefulSet
Logf("Parsing statefulset from %v", fileName)
data := generated.ReadOrDie(fileName)
json, err := utilyaml.ToJSON(data)
Expect(err).NotTo(HaveOccurred())
Expect(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ss)).NotTo(HaveOccurred())
ss.Namespace = ns
if ss.Spec.Selector == nil {
ss.Spec.Selector = &metav1.LabelSelector{
MatchLabels: ss.Spec.Template.Labels,
}
}
return &ss
}
// StatefulSetTester is a struct that contains utility methods for testing StatefulSet related functionality. It uses a
// clientset.Interface to communicate with the API server.
type StatefulSetTester struct {
c clientset.Interface
}
// NewStatefulSetTester creates a StatefulSetTester that uses c to interact with the API server.
func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
return &StatefulSetTester{c}
}
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.StatefulSet {
mkpath := func(file string) string {
return filepath.Join(manifestPath, file)
}
ss := StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
svcYaml := generated.ReadOrDie(mkpath("service.yaml"))
ssYaml := generated.ReadOrDie(mkpath("statefulset.yaml"))
Logf(fmt.Sprintf("creating " + ss.Name + " service"))
RunKubectlOrDieInput(string(svcYaml[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
RunKubectlOrDieInput(string(ssYaml[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
return ss
}
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) error {
for _, cmd := range []string{
// Print inode, size etc
fmt.Sprintf("ls -idlh %v", mountPath),
// Print subdirs
fmt.Sprintf("find %v", mountPath),
// Try writing
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
} {
if err := s.ExecInStatefulPods(ss, cmd); err != nil {
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
}
}
return nil
}
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string) error {
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
stdout, err := RunHostCmd(statefulPod.Namespace, statefulPod.Name, cmd)
Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
if err != nil {
return err
}
}
return nil
}
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
cmd := "printf $(hostname)"
podList := s.GetPodList(ss)
for _, statefulPod := range podList.Items {
hostname, err := RunHostCmd(statefulPod.Namespace, statefulPod.Name, cmd)
if err != nil {
return err
}
if hostname != statefulPod.Name {
return fmt.Errorf("unexpected hostname (%s) and stateful pod name (%s) not equal", hostname, statefulPod.Name)
}
}
return nil
}
// Saturate waits for all Pods in ss to become Running and Ready.
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
var i int32
for i = 0; i < *(ss.Spec.Replicas); i++ {
Logf("Waiting for stateful pod at index " + fmt.Sprintf("%v", i+1) + " to enter Running")
s.WaitForRunningAndReady(i+1, ss)
Logf("Marking stateful pod at index " + fmt.Sprintf("%v", i) + " healthy")
s.SetHealthy(ss)
}
}
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) {
name := getStatefulSetPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := s.c.Core().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
}
}
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
type VerifyStatefulPodFunc func(*v1.Pod)
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is is applied to the Pod to "visit" it.
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
name := getStatefulSetPodNameAtIndex(index, ss)
pod, err := s.c.Core().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
verify(pod)
}
func getStatefulSetPodNameAtIndex(index int, ss *apps.StatefulSet) string {
// TODO: we won't use "-index" as the name strategy forever,
// pull the name out from an identity mapper.
return fmt.Sprintf("%v-%v", ss.Name, index)
}
// Scale scales ss to count replicas.
func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) error {
name := ss.Name
ns := ss.Namespace
s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
var statefulPodList *v1.PodList
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
statefulPodList = s.GetPodList(ss)
if int32(len(statefulPodList.Items)) == count {
return true, nil
}
return false, nil
})
if pollErr != nil {
unhealthy := []string{}
for _, statefulPod := range statefulPodList.Items {
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
if delTs != nil || phase != v1.PodRunning || !readiness {
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
}
}
return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy)
}
return nil
}
// UpdateReplicas updates the replicas of ss to count.
func (s *StatefulSetTester) UpdateReplicas(ss *apps.StatefulSet, count int32) {
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { ss.Spec.Replicas = &count })
}
// Restart scales ss to 0 and then back to its previous number of replicas.
func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
oldReplicas := *(ss.Spec.Replicas)
ExpectNoError(s.Scale(ss, 0))
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
}
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) {
for i := 0; i < 3; i++ {
ss, err := s.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
Failf("failed to get statefulset %q: %v", name, err)
}
update(ss)
ss, err = s.c.Apps().StatefulSets(ns).Update(ss)
if err == nil {
return
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
Failf("failed to update statefulset %q: %v", name, err)
}
}
Failf("too many retries draining statefulset %q", name)
}
// GetPodList gets the current Pods in ss.
func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
ExpectNoError(err)
podList, err := s.c.Core().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
ExpectNoError(err)
return podList
}
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
// to scale to count.
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration, hard bool) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
podList := s.GetPodList(ss)
statefulPodCount := len(podList.Items)
if statefulPodCount != count {
logPodStates(podList.Items)
if hard {
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else {
Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
}
time.Sleep(1 * time.Second)
continue
}
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second)
}
}
func (s *StatefulSetTester) waitForRunning(numStatefulPods int32, ss *apps.StatefulSet, shouldBeReady bool) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
podList := s.GetPodList(ss)
if int32(len(podList.Items)) < numStatefulPods {
Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numStatefulPods)
return false, nil
}
if int32(len(podList.Items)) > numStatefulPods {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numStatefulPods, len(podList.Items))
}
for _, p := range podList.Items {
isReady := podutil.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady
Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil
}
}
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.Apps().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
podList := s.GetPodList(ssGet)
return until(ssGet, podList)
})
if pollErr != nil {
Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) {
s.waitForRunning(numStatefulPods, ss, true)
}
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and not Ready.
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) {
s.waitForRunning(numStatefulPods, ss, false)
}
// BreakProbe breaks the readiness probe for Nginx StatefulSet containers.
func (s *StatefulSetTester) BreakProbe(ss *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
}
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/", path)
return s.ExecInStatefulPods(ss, cmd)
}
// RestoreProbe restores the readiness probe for Nginx StatefulSet containers.
func (s *StatefulSetTester) RestoreProbe(ss *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
}
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/", path)
return s.ExecInStatefulPods(ss, cmd)
}
// SetHealthy updates the StatefulSet InitAnnotation to true in order to set a StatefulSet Pod to be Running and Ready.
func (s *StatefulSetTester) SetHealthy(ss *apps.StatefulSet) {
podList := s.GetPodList(ss)
markedHealthyPod := ""
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
Failf("Found pod in %v cannot set health", pod.Status.Phase)
}
if IsStatefulSetPodInitialized(pod) {
continue
}
if markedHealthyPod != "" {
Failf("Found multiple non-healthy stateful pods: %v and %v", pod.Name, markedHealthyPod)
}
p, err := UpdatePodWithRetries(s.c, pod.Namespace, pod.Name, func(update *v1.Pod) {
update.Annotations[apps.StatefulSetInitAnnotation] = "true"
})
ExpectNoError(err)
Logf("Set annotation %v to %v on pod %v", apps.StatefulSetInitAnnotation, p.Annotations[apps.StatefulSetInitAnnotation], pod.Name)
markedHealthyPod = pod.Name
}
}
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.ReadyReplicas != expectedReplicas {
Logf("Waiting for stateful set status to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
return false, nil
}
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
}
}
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := s.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.Replicas != expectedReplicas {
Logf("Waiting for stateful set status to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
return false, nil
}
return true, nil
})
if pollErr != nil {
Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
}
}
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
func (p *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
if expectedServiceName != ss.Spec.ServiceName {
return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s",
expectedServiceName, ss.Spec.ServiceName)
}
return nil
}
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
sst := &StatefulSetTester{c: c}
ssList, err := c.Apps().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
// Deleting a pvc without doing this will leak volumes, #25101.
errList := []string{}
for _, ss := range ssList.Items {
Logf("Scaling statefulset %v to 0", ss.Name)
if err := sst.Scale(&ss, 0); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
sst.WaitForStatusReplicas(&ss, 0)
Logf("Deleting statefulset %v", ss.Name)
// Use OrphanDependents=false so it's deleted synchronously.
// We already made sure the Pods are gone inside Scale().
if err := c.Apps().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
}
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.Core().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc
Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil
}
}
return true, nil
})
if pvcPollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
}
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
pvList, err := c.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
}
waitingFor := []string{}
for _, pv := range pvList.Items {
if pvNames.Has(pv.Name) {
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
}
}
if len(waitingFor) == 0 {
return true, nil
}
Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil
})
if pollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
}
if len(errList) != 0 {
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
}
}
// IsStatefulSetPodInitialized returns true if pod's StatefulSetInitAnnotation exists and is set to true.
func IsStatefulSetPodInitialized(pod v1.Pod) bool {
initialized, ok := pod.Annotations[apps.StatefulSetInitAnnotation]
if !ok {
return false
}
inited, err := strconv.ParseBool(initialized)
if err != nil {
Failf("Couldn't parse statefulset init annotations %v", initialized)
}
return inited
}
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
return v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "anything",
},
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
},
},
},
}
}
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
mounts := append(statefulPodMounts, podMounts...)
claims := []v1.PersistentVolumeClaim{}
for _, m := range statefulPodMounts {
claims = append(claims, NewStatefulSetPVC(m.Name))
}
vols := []v1.Volume{}
for _, m := range podMounts {
vols = append(vols, v1.Volume{
Name: m.Name,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name),
},
},
})
}
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: apps.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: func(i int32) *int32 { return &i }(replicas),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
VolumeMounts: mounts,
},
},
Volumes: vols,
},
},
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
VolumeClaimTemplates: claims,
ServiceName: governingSvcName,
},
}
}
// SetStatefulSetInitializedAnnotation sets teh StatefulSetInitAnnotation to value.
func SetStatefulSetInitializedAnnotation(ss *apps.StatefulSet, value string) {
ss.Spec.Template.ObjectMeta.Annotations["pod.alpha.kubernetes.io/initialized"] = value
}
| caesarxuchao/kubernetes | test/e2e/framework/statefulset_utils.go | GO | apache-2.0 | 23,045 |
/*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.internal.gosu.parser;
import gw.lang.reflect.IMethodInfo;
/**
*/
public class ReducedDelegateFunctionSymbol extends ReducedDynamicFunctionSymbol implements IReducedDelegateFunctionSymbol {
private IMethodInfo _targetMethodInfo;
ReducedDelegateFunctionSymbol(DelegateFunctionSymbol dfs) {
super( dfs );
_targetMethodInfo = dfs.getMi();
}
@Override
public IMethodInfo getTargetMethodInfo() {
return _targetMethodInfo;
}
}
| dumitru-petrusca/gosu-lang | gosu-core/src/main/java/gw/internal/gosu/parser/ReducedDelegateFunctionSymbol.java | Java | apache-2.0 | 511 |
"""
Tests for the integration test suite itself.
"""
import logging
import os
import subprocess
from collections import defaultdict
from pathlib import Path
from typing import Set
import yaml
from get_test_group import patterns_from_group
__maintainer__ = 'adam'
__contact__ = 'tools-infra-team@mesosphere.io'
log = logging.getLogger(__file__)
def _tests_from_pattern(ci_pattern: str) -> Set[str]:
"""
From a CI pattern, get all tests ``pytest`` would collect.
"""
tests = set([]) # type: Set[str]
args = [
'pytest',
'--disable-pytest-warnings',
'--collect-only',
ci_pattern,
'-q',
]
# Test names will not be in ``stderr`` so we ignore that.
result = subprocess.run(
args=args,
stdout=subprocess.PIPE,
env={**os.environ, **{'PYTHONIOENCODING': 'UTF-8'}},
)
output = result.stdout
for line in output.splitlines():
if b'error in' in line:
message = (
'Error collecting tests for pattern "{ci_pattern}". '
'Full output:\n'
'{output}'
).format(
ci_pattern=ci_pattern,
output=output,
)
raise Exception(message)
# Whitespace is important to avoid confusing pytest warning messages
# with test names. For example, the pytest output may contain '3 tests
# deselected' which would conflict with a test file called
# test_agent_deselected.py if we ignored whitespace.
if (
line and
# Some tests show warnings on collection.
b' warnings' not in line and
# Some tests are skipped on collection.
b'skipped in' not in line and
# Some tests are deselected by the ``pytest.ini`` configuration.
b' deselected' not in line and
not line.startswith(b'no tests ran in')
):
tests.add(line.decode())
return tests
def test_test_groups() -> None:
"""
The test suite is split into various "groups".
This test confirms that the groups together contain all tests, and each
test is collected only once.
"""
test_group_file = Path('test_groups.yaml')
test_group_file_contents = test_group_file.read_text()
test_groups = yaml.load(test_group_file_contents)['groups']
test_patterns = []
for group in test_groups:
test_patterns += patterns_from_group(group_name=group)
# Turn this into a list otherwise we can't cannonically state whether every test was collected _exactly_ once :-)
tests_to_patterns = defaultdict(list) # type: Mapping[str, List]
for pattern in test_patterns:
tests = _tests_from_pattern(ci_pattern=pattern)
for test in tests:
tests_to_patterns[test].append(pattern)
errs = []
for test_name, patterns in tests_to_patterns.items():
message = (
'Test "{test_name}" will be run once for each pattern in '
'{patterns}. '
'Each test should be run only once.'
).format(
test_name=test_name,
patterns=patterns,
)
if len(patterns) != 1:
assert len(patterns) != 1, message
errs.append(message)
if errs:
for message in errs:
log.error(message)
raise Exception("Some tests are not collected exactly once, see errors.")
all_tests = _tests_from_pattern(ci_pattern='')
assert tests_to_patterns.keys() - all_tests == set()
assert all_tests - tests_to_patterns.keys() == set()
| GoelDeepak/dcos | packages/dcos-integration-test/extra/test_meta.py | Python | apache-2.0 | 3,623 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/cpu/layout_assignment.h"
#include <numeric>
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/cpu/dot_op_emitter.h"
#include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h"
#include "tensorflow/core/lib/core/errors.h"
namespace xla {
namespace cpu {
Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
auto row_major_shape = [](const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
};
auto col_major_shape = [](const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
};
// We want to change the layout of constant arrays to be column major when all
// of their users are dot operations that can be made faster with the flipped
// layout. To avoid going quadriatic over the # of instructions, we cache
// this property in should_make_rhs_col_major -- it maps a constant to true if
// all of the users of said constant are dot operations that can be sped up.
// This cache is populated lazily as we encounter dot operations traversing
// the instruction stream.
tensorflow::gtl::FlatMap<const HloInstruction*, bool>
should_make_rhs_col_major_cache;
auto should_make_rhs_col_major = [&](const HloInstruction& instruction) {
if (ProfitableToImplementDotInUntiledLlvmIr(instruction) !=
DotInLlvmIrProfitable::kWithColumnMajorRhs) {
return false;
}
const auto* rhs = instruction.operand(1);
if (rhs->opcode() != HloOpcode::kConstant) {
return false;
}
auto it = should_make_rhs_col_major_cache.find(rhs);
if (it != should_make_rhs_col_major_cache.end()) {
return it->second;
}
bool result = std::all_of(
rhs->users().begin(), rhs->users().end(), [&](HloInstruction* user) {
return ProfitableToImplementDotInUntiledLlvmIr(*user) ==
DotInLlvmIrProfitable::kWithColumnMajorRhs &&
user->operand(0) != rhs;
});
InsertOrDie(&should_make_rhs_col_major_cache, rhs, result);
return result;
};
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConvolution &&
PotentiallyImplementedAsEigenConvolution(*instruction)) {
const HloInstruction* convolution = instruction;
const HloInstruction* lhs_instruction = convolution->operand(0);
const HloInstruction* rhs_instruction = convolution->operand(1);
// In order to implement `convolution` with Eigen convolution, the layouts
// of the input, filter, and output need to be row-major.
//
// These constraints are not hard constraints. Ideally, we should decide
// which layouts to choose according to some cost model.
Shape output_shape(row_major_shape(convolution->shape()));
Shape input_shape(row_major_shape(lhs_instruction->shape()));
Shape filter_shape(row_major_shape(rhs_instruction->shape()));
// Set layouts of the instructions' shapes.
TF_RETURN_IF_ERROR(
constraints->SetOperandLayout(input_shape, convolution, 0));
TF_RETURN_IF_ERROR(
constraints->SetOperandLayout(filter_shape, convolution, 1));
TF_RETURN_IF_ERROR(
constraints->SetInstructionLayout(output_shape, convolution));
} else if (should_make_rhs_col_major(*instruction)) {
auto* dot = instruction;
const auto& rhs_shape = dot->operand(1)->shape();
TF_RETURN_IF_ERROR(
constraints->SetOperandLayout(col_major_shape(rhs_shape), dot, 1));
} else if (PotentiallyImplementedAsEigenDot(*instruction)) {
const HloInstruction* dot = instruction;
// In order to implement `dot` with Eigen dot, the layouts of the lhs,
// rhs, and output need to be row-major.
//
// These constraints are not hard constraints. Ideally, we should decide
// which layouts to choose according to some cost model.
Shape output_shape(row_major_shape(dot->shape()));
const HloInstruction* lhs_instruction = dot->operand(0);
Shape lhs_shape(row_major_shape(lhs_instruction->shape()));
TF_RETURN_IF_ERROR(constraints->SetOperandLayout(lhs_shape, dot, 0));
// dot is a kDot or a kTransposeDot fusion node. In the latter case, if
// it represents X @ X, it may have just one operand.
if (dot->operand_count() > 1) {
const HloInstruction* rhs_instruction = dot->operand(1);
Shape rhs_shape(row_major_shape(rhs_instruction->shape()));
TF_RETURN_IF_ERROR(constraints->SetOperandLayout(rhs_shape, dot, 1));
}
// Set layouts of the instructions' shapes.
TF_RETURN_IF_ERROR(constraints->SetInstructionLayout(output_shape, dot));
} else {
for (int64 operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
// Skip operands which already have a constraint.
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
// Skip over forwarded operands.
if (constraints->OperandBufferForwarded(instruction, operand_no)) {
continue;
}
Shape operand_shape(
row_major_shape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(constraints->SetOperandLayout(
operand_shape, instruction, operand_no));
}
// Skip over the root instruction for the top-level computation.
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
// Skip instructions which don't produce array shapes (tuples, opaque,
// etc.).
if (!ShapeUtil::IsArray(instruction->shape())) {
continue;
}
}
}
return tensorflow::Status::OK();
}
} // namespace cpu
} // namespace xla
| horance-liu/tensorflow | tensorflow/compiler/xla/service/cpu/layout_assignment.cc | C++ | apache-2.0 | 7,084 |
/* xlsx.js (C) 2013-present SheetJS -- http://sheetjs.com */
/* eslint-env node */
/* vim: set ts=2 ft=javascript: */
/// <reference types="../node_modules/@types/node/" />
const n = "xlsx";
import X = require("xlsx");
import 'exit-on-epipe';
import * as fs from 'fs';
import program = require('commander');
program
.version(X.version)
.usage('[options] <file> [sheetname]')
.option('-f, --file <file>', 'use specified workbook')
.option('-s, --sheet <sheet>', 'print specified sheet (default first sheet)')
.option('-N, --sheet-index <idx>', 'use specified sheet index (0-based)')
.option('-p, --password <pw>', 'if file is encrypted, try with specified pw')
.option('-l, --list-sheets', 'list sheet names and exit')
.option('-o, --output <file>', 'output to specified file')
.option('-B, --xlsb', 'emit XLSB to <sheetname> or <file>.xlsb')
.option('-M, --xlsm', 'emit XLSM to <sheetname> or <file>.xlsm')
.option('-X, --xlsx', 'emit XLSX to <sheetname> or <file>.xlsx')
.option('-I, --xlam', 'emit XLAM to <sheetname> or <file>.xlam')
.option('-Y, --ods', 'emit ODS to <sheetname> or <file>.ods')
.option('-8, --xls', 'emit XLS to <sheetname> or <file>.xls (BIFF8)')
.option('-5, --biff5','emit XLS to <sheetname> or <file>.xls (BIFF5)')
.option('-2, --biff2','emit XLS to <sheetname> or <file>.xls (BIFF2)')
.option('-i, --xla', 'emit XLA to <sheetname> or <file>.xla')
.option('-6, --xlml', 'emit SSML to <sheetname> or <file>.xls (2003 XML)')
.option('-T, --fods', 'emit FODS to <sheetname> or <file>.fods (Flat ODS)')
.option('-S, --formulae', 'emit list of values and formulae')
.option('-j, --json', 'emit formatted JSON (all fields text)')
.option('-J, --raw-js', 'emit raw JS object (raw numbers)')
.option('-A, --arrays', 'emit rows as JS objects (raw numbers)')
.option('-H, --html', 'emit HTML to <sheetname> or <file>.html')
.option('-D, --dif', 'emit DIF to <sheetname> or <file>.dif (Lotus DIF)')
.option('-U, --dbf', 'emit DBF to <sheetname> or <file>.dbf (MSVFP DBF)')
.option('-K, --sylk', 'emit SYLK to <sheetname> or <file>.slk (Excel SYLK)')
.option('-P, --prn', 'emit PRN to <sheetname> or <file>.prn (Lotus PRN)')
.option('-E, --eth', 'emit ETH to <sheetname> or <file>.eth (Ethercalc)')
.option('-t, --txt', 'emit TXT to <sheetname> or <file>.txt (UTF-8 TSV)')
.option('-r, --rtf', 'emit RTF to <sheetname> or <file>.txt (Table RTF)')
.option('-z, --dump', 'dump internal representation as JSON')
.option('--props', 'dump workbook properties as CSV')
.option('-F, --field-sep <sep>', 'CSV field separator', ",")
.option('-R, --row-sep <sep>', 'CSV row separator', "\n")
.option('-n, --sheet-rows <num>', 'Number of rows to process (0=all rows)')
.option('--codepage <cp>', 'default to specified codepage when ambiguous')
.option('--req <module>', 'require module before processing')
.option('--sst', 'generate shared string table for XLS* formats')
.option('--compress', 'use compression when writing XLSX/M/B and ODS')
.option('--read', 'read but do not generate output')
.option('--book', 'for single-sheet formats, emit a file per worksheet')
.option('--all', 'parse everything; write as much as possible')
.option('--dev', 'development mode')
.option('--sparse', 'sparse mode')
.option('-q, --quiet', 'quiet mode');
program.on('--help', function() {
console.log(' Default output format is CSV');
console.log(' Support email: dev@sheetjs.com');
console.log(' Web Demo: http://oss.sheetjs.com/js-'+n+'/');
});
/* flag, bookType, default ext */
const workbook_formats = [
['xlsx', 'xlsx', 'xlsx'],
['xlsm', 'xlsm', 'xlsm'],
['xlam', 'xlam', 'xlam'],
['xlsb', 'xlsb', 'xlsb'],
['xls', 'xls', 'xls'],
['xla', 'xla', 'xla'],
['biff5', 'biff5', 'xls'],
['ods', 'ods', 'ods'],
['fods', 'fods', 'fods']
];
const wb_formats_2 = [
['xlml', 'xlml', 'xls']
];
program.parse(process.argv);
let filename = '', sheetname = '';
if(program.args[0]) {
filename = program.args[0];
if(program.args[1]) sheetname = program.args[1];
}
if(program.sheet) sheetname = program.sheet;
if(program.file) filename = program.file;
if(!filename) {
console.error(n + ": must specify a filename");
process.exit(1);
}
if(!fs.existsSync(filename)) {
console.error(n + ": " + filename + ": No such file or directory");
process.exit(2);
}
const opts: X.ParsingOptions = {};
let wb: X.WorkBook;
if(program.listSheets) opts.bookSheets = true;
if(program.sheetRows) opts.sheetRows = program.sheetRows;
if(program.password) opts.password = program.password;
let seen = false;
function wb_fmt() {
seen = true;
opts.cellFormula = true;
opts.cellNF = true;
if(program.output) sheetname = program.output;
}
function isfmt(m: string): boolean {
if(!program.output) return false;
const t = m.charAt(0) === "." ? m : "." + m;
return program.output.slice(-t.length) === t;
}
workbook_formats.forEach(function(m) { if(program[m[0]] || isfmt(m[0])) { wb_fmt(); } });
wb_formats_2.forEach(function(m) { if(program[m[0]] || isfmt(m[0])) { wb_fmt(); } });
if(seen) {
} else if(program.formulae) opts.cellFormula = true;
else opts.cellFormula = false;
const wopts: X.WritingOptions = ({WTF:opts.WTF, bookSST:program.sst}/*:any*/);
if(program.compress) wopts.compression = true;
if(program.all) {
opts.cellFormula = true;
opts.bookVBA = true;
opts.cellNF = true;
opts.cellHTML = true;
opts.cellStyles = true;
opts.sheetStubs = true;
opts.cellDates = true;
wopts.cellStyles = true;
wopts.bookVBA = true;
}
if(program.sparse) opts.dense = false; else opts.dense = true;
if(program.codepage) opts.codepage = +program.codepage;
if(program.dev) {
opts.WTF = true;
wb = X.readFile(filename, opts);
} else try {
wb = X.readFile(filename, opts);
} catch(e) {
let msg = (program.quiet) ? "" : n + ": error parsing ";
msg += filename + ": " + e;
console.error(msg);
process.exit(3);
}
if(program.read) process.exit(0);
if(!wb) { console.error(n + ": error parsing " + filename + ": empty workbook"); process.exit(0); }
/*:: if(!wb) throw new Error("unreachable"); */
if(program.listSheets) {
console.log((wb.SheetNames||[]).join("\n"));
process.exit(0);
}
if(program.dump) {
console.log(JSON.stringify(wb));
process.exit(0);
}
if(program.props) {
dump_props(wb);
process.exit(0);
}
/* full workbook formats */
workbook_formats.forEach(function(m) { if(program[m[0]] || isfmt(m[0])) {
wopts.bookType = <X.BookType>(m[1]);
X.writeFile(wb, program.output || sheetname || ((filename || "") + "." + m[2]), wopts);
process.exit(0);
} });
wb_formats_2.forEach(function(m) { if(program[m[0]] || isfmt(m[0])) {
wopts.bookType = <X.BookType>(m[1]);
X.writeFile(wb, program.output || sheetname || ((filename || "") + "." + m[2]), wopts);
process.exit(0);
} });
let target_sheet = sheetname || '';
if(target_sheet === '') {
if(program.sheetIndex < (wb.SheetNames||[]).length) target_sheet = wb.SheetNames[program.sheetIndex];
else target_sheet = (wb.SheetNames||[""])[0];
}
let ws: X.WorkSheet;
try {
ws = wb.Sheets[target_sheet];
if(!ws) {
console.error("Sheet " + target_sheet + " cannot be found");
process.exit(3);
}
} catch(e) {
console.error(n + ": error parsing "+filename+" "+target_sheet+": " + e);
process.exit(4);
}
if(!program.quiet && !program.book) console.error(target_sheet);
/* single worksheet file formats */
[
['biff2', '.xls'],
['biff3', '.xls'],
['biff4', '.xls'],
['sylk', '.slk'],
['html', '.html'],
['prn', '.prn'],
['eth', '.eth'],
['rtf', '.rtf'],
['txt', '.txt'],
['dbf', '.dbf'],
['dif', '.dif']
].forEach(function(m) { if(program[m[0]] || isfmt(m[1])) {
wopts.bookType = <X.BookType>(m[0]);
X.writeFile(wb, program.output || sheetname || ((filename || "") + m[1]), wopts);
process.exit(0);
} });
let oo = "", strm = false;
if(!program.quiet) console.error(target_sheet);
if(program.formulae) oo = X.utils.sheet_to_formulae(ws).join("\n");
else if(program.json) oo = JSON.stringify(X.utils.sheet_to_json(ws));
else if(program.rawJs) oo = JSON.stringify(X.utils.sheet_to_json(ws,{raw:true}));
else if(program.arrays) oo = JSON.stringify(X.utils.sheet_to_json(ws,{raw:true, header:1}));
else {
strm = true;
const stream: NodeJS.ReadableStream = X.stream.to_csv(ws, {FS:program.fieldSep, RS:program.rowSep});
if(program.output) stream.pipe(fs.createWriteStream(program.output));
else stream.pipe(process.stdout);
}
if(!strm) {
if(program.output) fs.writeFileSync(program.output, oo);
else console.log(oo);
}
/*:: } */
/*:: } */
function dump_props(wb: X.WorkBook) {
let propaoa: any[][] = [];
propaoa = (<any>Object).entries({...wb.Props, ...wb.Custprops});
console.log(X.utils.sheet_to_csv(X.utils.aoa_to_sheet(propaoa)));
}
| SheetJS/js-xlsx | types/bin_xlsx.ts | TypeScript | apache-2.0 | 8,728 |
//------------------------------------------------------------------------------
// <自动生成>
// 此代码由工具生成。
//
// 对此文件的更改可能会导致不正确的行为,并且如果
// 重新生成代码,这些更改将会丢失。
// </自动生成>
//------------------------------------------------------------------------------
namespace DTcms.Web.admin.manager {
public partial class role_edit {
/// <summary>
/// form1 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.HtmlControls.HtmlForm form1;
/// <summary>
/// ddlRoleType 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.DropDownList ddlRoleType;
/// <summary>
/// txtRoleName 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.TextBox txtRoleName;
/// <summary>
/// rptList 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.Repeater rptList;
/// <summary>
/// btnSubmit 控件。
/// </summary>
/// <remarks>
/// 自动生成的字段。
/// 若要进行修改,请将字段声明从设计器文件移到代码隐藏文件。
/// </remarks>
protected global::System.Web.UI.WebControls.Button btnSubmit;
}
}
| LutherW/MTMS | Source/DTcms.Web/admin/manager/role_edit.aspx.designer.cs | C# | apache-2.0 | 2,149 |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc as wexc
from neutron.api.v2 import base
from neutron.common import constants as n_const
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc
from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver
from neutron.plugins.ml2.drivers import type_vlan as vlan_config
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
PHYS_NET = 'physnet1'
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
VLAN_START = 1000
VLAN_END = 1100
NEXUS_IP_ADDR = '1.1.1.1'
NETWORK_NAME = 'test_network'
NETWORK_NAME_2 = 'test_network_2'
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the ML2 plugin to use VLANs in the range of 1000-1100.
- Configure the Cisco mechanism driver to use an imaginary switch
at NEXUS_IP_ADDR.
- Create a mock NETCONF client (ncclient) for the Cisco mechanism
driver
"""
self.addCleanup(mock.patch.stopall)
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['cisco_nexus'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
self.addCleanup(ml2_config.cfg.CONF.reset)
# Configure the ML2 VLAN parameters
phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
vlan_config.cfg.CONF.set_override('network_vlan_ranges',
[phys_vrange],
'ml2_type_vlan')
self.addCleanup(vlan_config.cfg.CONF.reset)
# Configure the Cisco Nexus mechanism driver
nexus_config = {
(NEXUS_IP_ADDR, 'username'): 'admin',
(NEXUS_IP_ADDR, 'password'): 'mySecretPassword',
(NEXUS_IP_ADDR, 'ssh_port'): 22,
(NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2}
nexus_patch = mock.patch.dict(
cisco_config.ML2MechCiscoConfig.nexus_dict,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# The NETCONF client module is not included in the DevStack
# distribution, so mock this module for unit testing.
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
# Mock port values for 'status' and 'binding:segmentation_id'
mock_status = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_status_active').start()
mock_status.return_value = n_const.PORT_STATUS_ACTIVE
def _mock_get_vlanid(context):
network = context.network.current
if network['name'] == NETWORK_NAME:
return VLAN_START
else:
return VLAN_START + 1
mock_vlanid = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_get_vlanid').start()
mock_vlanid.side_effect = _mock_get_vlanid
super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN)
self.port_create_status = 'DOWN'
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Confirm last config sent to Nexus contains specified keywords."""
last_cfg = (self.mock_ncclient.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
add_keyword_expected=False):
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True):
vlan_deleted = self._is_in_last_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and
vlan_deleted == vlan_deletion_expected)
class TestCiscoBasicGet(CiscoML2MechanismTestCase,
test_db_plugin.TestBasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestPortsV2):
@contextlib.contextmanager
def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1,
device_id=DEVICE_ID_1,
host_id=COMP_HOST_NAME):
"""Create network, subnet, and port resources for test cases.
Create a network, subnet, port and then update the port, yield the
result, then delete the port, subnet and network.
:param name: Name of network to be created.
:param cidr: cidr address of subnetwork to be created.
:param device_id: Device ID to use for port to be created/updated.
:param host_id: Host ID to use for port create/update.
"""
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
with self.port(subnet=subnet, cidr=cidr) as port:
data = {'port': {portbindings.HOST_ID: host_id,
'device_id': device_id,
'device_owner': 'compute:none',
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
yield res.status_int
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API chooses the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Confirm that for the first VLAN configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch contains the keyword 'add'.
"""
# First vlan should be configured without 'add' keyword
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
# Second vlan should be configured with 'add' keyword
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=True))
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('connect.side_effect',
AttributeError):
with self._create_resources() as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusConnectFailed)
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
with self.port(subnet=subnet, fmt=self.fmt) as port:
data = {'port': {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
req.get_response(self.api)
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
yield
# Create network and subnet
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1,
vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_resources() as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
def mock_edit_config_a(target, config):
if all(word in config for word in ['state', 'active']):
raise Exception("Can't modify state for extended")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_a):
with self._create_resources() as result_status:
self.assertEqual(result_status, wexc.HTTPOk.code)
def mock_edit_config_b(target, config):
if all(word in config for word in ['no', 'shutdown']):
raise Exception("Command is only allowed on VLAN")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_b):
with self._create_resources() as result_status:
self.assertEqual(result_status, wexc.HTTPOk.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
def mock_edit_config(target, config):
if all(word in config for word in ['state', 'active']):
raise ValueError
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config):
with self._create_resources() as result_status:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(self._is_in_last_nexus_cfg(['<no>', '<vlan>']))
self._assertExpectedHTTP(result_status,
c_exc.NexusConfigFailed)
def test_nexus_host_not_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_resources(host_id='fake_host') as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusComputeHostNotConfigured)
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty host_id and device_id values during port creation.
"""
with self._create_resources(device_id='', host_id='') as result_status:
self._assertExpectedHTTP(result_status,
c_exc.NexusMissingRequiredFields)
class TestCiscoNetworksV2(CiscoML2MechanismTestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
class TestCiscoSubnetsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestSubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
class TestCiscoPortsV2XML(TestCiscoPortsV2):
fmt = 'xml'
class TestCiscoNetworksV2XML(TestCiscoNetworksV2):
fmt = 'xml'
class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2):
fmt = 'xml'
| Juniper/neutron | neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py | Python | apache-2.0 | 24,807 |
#!/bin/sh
while [ "1" == "1" ]
do
uptime
sleep 1
done | tgou/RocketMQ | rocketmq-store/sbin/showload.sh | Shell | apache-2.0 | 60 |
package org.apereo.cas.ticket.registry;
import org.apereo.cas.ticket.Ticket;
import org.infinispan.Cache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
/**
* This is {@link InfinispanTicketRegistry}. Infinispan is a distributed in-memory
* key/value data store with optional schema.
* It offers advanced functionality such as transactions, events, querying and distributed processing.
* See <a href="http://infinispan.org/features/">http://infinispan.org/features/</a> for more info.
*
* @author Misagh Moayyed
* @since 4.2.0
*/
public class InfinispanTicketRegistry extends AbstractTicketRegistry {
private static final Logger LOGGER = LoggerFactory.getLogger(InfinispanTicketRegistry.class);
private Cache<String, Ticket> cache;
/**
* Instantiates a new Infinispan ticket registry.
*
* @param cache the cache
*/
public InfinispanTicketRegistry(final Cache<String, Ticket> cache) {
this.cache = cache;
LOGGER.info("Setting up Infinispan Ticket Registry...");
}
@Override
public Ticket updateTicket(final Ticket ticket) {
this.cache.put(ticket.getId(), ticket);
return ticket;
}
@Override
public void addTicket(final Ticket ticketToAdd) {
final Ticket ticket = encodeTicket(ticketToAdd);
final long idleTime = ticket.getExpirationPolicy().getTimeToIdle() <= 0
? ticket.getExpirationPolicy().getTimeToLive()
: ticket.getExpirationPolicy().getTimeToIdle();
LOGGER.debug("Adding ticket [{}] to cache store to live [{}] seconds and stay idle for [{}]",
ticket.getId(), ticket.getExpirationPolicy().getTimeToLive(), idleTime);
this.cache.put(ticket.getId(), ticket,
ticket.getExpirationPolicy().getTimeToLive(), TimeUnit.SECONDS,
idleTime, TimeUnit.SECONDS);
}
@Override
public Ticket getTicket(final String ticketId) {
final String encTicketId = encodeTicketId(ticketId);
if (ticketId == null) {
return null;
}
return Ticket.class.cast(cache.get(encTicketId));
}
@Override
public boolean deleteSingleTicket(final String ticketId) {
this.cache.remove(ticketId);
return getTicket(ticketId) == null;
}
@Override
public long deleteAll() {
final int size = this.cache.size();
this.cache.clear();
return size;
}
/**
* Retrieve all tickets from the registry.
* <p>
* Note! Usage of this method can be computational and I/O intensive and should not be used for other than
* debugging.
*
* @return collection of tickets currently stored in the registry. Tickets
* might or might not be valid i.e. expired.
*/
@Override
public Collection<Ticket> getTickets() {
return decodeTickets(this.cache.values());
}
}
| petracvv/cas | support/cas-server-support-infinispan-ticket-registry/src/main/java/org/apereo/cas/ticket/registry/InfinispanTicketRegistry.java | Java | apache-2.0 | 2,994 |
//===--- TaskStatusRecord.h - Structures to track task status --*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// Swift ABI describing "status records", the mechanism by which
// tasks track dynamic information about their child tasks, custom
// cancellation hooks, and other information which may need to be exposed
// asynchronously outside of the task.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_ABI_TASKSTATUS_H
#define SWIFT_ABI_TASKSTATUS_H
#include "swift/ABI/MetadataValues.h"
#include "swift/ABI/Task.h"
namespace swift {
/// The abstract base class for all status records.
///
/// TaskStatusRecords are typically allocated on the stack (possibly
/// in the task context), partially initialized, and then atomically
/// added to the task with `swift_task_addTaskStatusRecord`. While
/// registered with the task, a status record should only be
/// modified in ways that respect the possibility of asynchronous
/// access by a cancelling thread. In particular, the chain of
/// status records must not be disturbed. When the task leaves
/// the scope that requires the status record, the record can
/// be unregistered from the task with `removeStatusRecord`,
/// at which point the memory can be returned to the system.
class TaskStatusRecord {
public:
TaskStatusRecordFlags Flags;
TaskStatusRecord *Parent;
TaskStatusRecord(TaskStatusRecordKind kind,
TaskStatusRecord *parent = nullptr)
: Flags(kind) {
resetParent(parent);
}
TaskStatusRecord(const TaskStatusRecord &) = delete;
TaskStatusRecord &operator=(const TaskStatusRecord &) = delete;
TaskStatusRecordKind getKind() const { return Flags.getKind(); }
TaskStatusRecord *getParent() const { return Parent; }
/// Change the parent of this unregistered status record to the
/// given record.
///
/// This should be used when the record has been previously initialized
/// without knowing what the true parent is. If we decide to cache
/// important information (e.g. the earliest timeout) in the innermost
/// status record, this is the method that should fill that in
/// from the parent.
void resetParent(TaskStatusRecord *newParent) {
Parent = newParent;
// TODO: cache
}
/// Splice a record out of the status-record chain.
///
/// Unlike resetParent, this assumes that it's just removing one or
/// more records from the chain and that there's no need to do any
/// extra cache manipulation.
void spliceParent(TaskStatusRecord *newParent) { Parent = newParent; }
};
/// A deadline for the task. If this is reached, the task will be
/// automatically cancelled. The deadline can also be queried and used
/// in other ways.
struct TaskDeadline {
// FIXME: I don't really know what this should look like right now.
// It's probably target-specific.
uint64_t Value;
bool operator==(const TaskDeadline &other) const {
return Value == other.Value;
}
bool operator<(const TaskDeadline &other) const {
return Value < other.Value;
}
};
/// A status record which states that there's an active deadline
/// within the task.
class DeadlineStatusRecord : public TaskStatusRecord {
TaskDeadline Deadline;
public:
DeadlineStatusRecord(TaskDeadline deadline)
: TaskStatusRecord(TaskStatusRecordKind::Deadline), Deadline(deadline) {}
TaskDeadline getDeadline() const { return Deadline; }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::Deadline;
}
};
/// A status record which states that a task has one or
/// more active child tasks.
class ChildTaskStatusRecord : public TaskStatusRecord {
AsyncTask *FirstChild;
public:
ChildTaskStatusRecord(AsyncTask *child)
: TaskStatusRecord(TaskStatusRecordKind::ChildTask), FirstChild(child) {}
ChildTaskStatusRecord(AsyncTask *child, TaskStatusRecordKind kind)
: TaskStatusRecord(kind), FirstChild(child) {
assert(kind == TaskStatusRecordKind::ChildTask);
assert(!child->hasGroupChildFragment() &&
"Group child tasks must be tracked in their respective "
"TaskGroupTaskStatusRecord, and not as independent "
"ChildTaskStatusRecord "
"records.");
}
/// Return the first child linked by this record. This may be null;
/// if not, it (and all of its successors) are guaranteed to satisfy
/// `isChildTask()`.
AsyncTask *getFirstChild() const { return FirstChild; }
static AsyncTask *getNextChildTask(AsyncTask *task) {
return task->childFragment()->getNextChild();
}
using child_iterator = LinkedListIterator<AsyncTask, getNextChildTask>;
llvm::iterator_range<child_iterator> children() const {
return child_iterator::rangeBeginning(getFirstChild());
}
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::ChildTask;
}
};
/// A status record which states that a task has a task group.
///
/// A record always is a specific `TaskGroupImpl`.
///
/// The child tasks are stored as an invasive single-linked list, starting
/// from `FirstChild` and continuing through the `NextChild` pointers of all
/// the linked children.
///
/// All children of the specific `Group` are stored "by" this record,
/// so that they may be cancelled when this task becomes cancelled.
///
/// When the group exits, it may simply remove this single record from the task
/// running it. As it has guaranteed that the tasks have already completed.
///
/// Group child tasks DO NOT have their own `ChildTaskStatusRecord` entries,
/// and are only tracked by their respective `TaskGroupTaskStatusRecord`.
class TaskGroupTaskStatusRecord : public TaskStatusRecord {
AsyncTask *FirstChild;
AsyncTask *LastChild;
public:
TaskGroupTaskStatusRecord()
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup),
FirstChild(nullptr),
LastChild(nullptr) {
}
TaskGroupTaskStatusRecord(AsyncTask *child)
: TaskStatusRecord(TaskStatusRecordKind::TaskGroup),
FirstChild(child),
LastChild(child) {
assert(!LastChild || !LastChild->childFragment()->getNextChild());
}
TaskGroup *getGroup() { return reinterpret_cast<TaskGroup *>(this); }
/// Return the first child linked by this record. This may be null;
/// if not, it (and all of its successors) are guaranteed to satisfy
/// `isChildTask()`.
AsyncTask *getFirstChild() const { return FirstChild; }
/// Attach the passed in `child` task to this group.
void attachChild(AsyncTask *child) {
assert(child->hasGroupChildFragment());
assert(child->groupChildFragment()->getGroup() == getGroup());
auto oldLastChild = LastChild;
LastChild = child;
if (!FirstChild) {
// This is the first child we ever attach, so store it as FirstChild.
FirstChild = child;
return;
}
oldLastChild->childFragment()->setNextChild(child);
}
void detachChild(AsyncTask *child) {
assert(child && "cannot remove a null child from group");
if (FirstChild == child) {
FirstChild = getNextChildTask(child);
if (FirstChild == nullptr) {
LastChild = nullptr;
}
return;
}
AsyncTask *prev = FirstChild;
// Remove the child from the linked list, i.e.:
// prev -> afterPrev -> afterChild
// ==
// child -> afterChild
// Becomes:
// prev --------------> afterChild
while (prev) {
auto afterPrev = getNextChildTask(prev);
if (afterPrev == child) {
auto afterChild = getNextChildTask(child);
prev->childFragment()->setNextChild(afterChild);
if (child == LastChild) {
LastChild = prev;
}
return;
}
prev = afterPrev;
}
}
static AsyncTask *getNextChildTask(AsyncTask *task) {
return task->childFragment()->getNextChild();
}
using child_iterator = LinkedListIterator<AsyncTask, getNextChildTask>;
llvm::iterator_range<child_iterator> children() const {
return child_iterator::rangeBeginning(getFirstChild());
}
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::TaskGroup;
}
};
/// A cancellation record which states that a task has an arbitrary
/// function that needs to be called if the task is cancelled.
///
/// The end of any call to the function will be ordered before the
/// end of a call to unregister this record from the task. That is,
/// code may call `removeStatusRecord` and freely
/// assume after it returns that this function will not be
/// subsequently used.
class CancellationNotificationStatusRecord : public TaskStatusRecord {
public:
using FunctionType = SWIFT_CC(swift) void(SWIFT_CONTEXT void *);
private:
FunctionType *__ptrauth_swift_cancellation_notification_function Function;
void *Argument;
public:
CancellationNotificationStatusRecord(FunctionType *fn, void *arg)
: TaskStatusRecord(TaskStatusRecordKind::CancellationNotification),
Function(fn), Argument(arg) {}
void run() { Function(Argument); }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::CancellationNotification;
}
};
/// A status record which says that a task has an arbitrary
/// function that needs to be called if the task's priority is escalated.
///
/// The end of any call to the function will be ordered before the
/// end of a call to unregister this record from the task. That is,
/// code may call `removeStatusRecord` and freely
/// assume after it returns that this function will not be
/// subsequently used.
class EscalationNotificationStatusRecord : public TaskStatusRecord {
public:
using FunctionType = void(void *, JobPriority);
private:
FunctionType *__ptrauth_swift_escalation_notification_function Function;
void *Argument;
public:
EscalationNotificationStatusRecord(FunctionType *fn, void *arg)
: TaskStatusRecord(TaskStatusRecordKind::EscalationNotification),
Function(fn), Argument(arg) {}
void run(JobPriority newPriority) { Function(Argument, newPriority); }
static bool classof(const TaskStatusRecord *record) {
return record->getKind() == TaskStatusRecordKind::EscalationNotification;
}
};
} // end namespace swift
#endif
| glessard/swift | include/swift/ABI/TaskStatus.h | C | apache-2.0 | 10,787 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Unit tests for the Dataset.py module'''
import unittest
from ocw.dataset import Dataset, Bounds
import numpy as np
import datetime as dt
class TestDatasetAttributes(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.name = 'foo'
self.origin = {'path': '/a/fake/file/path'}
self.test_dataset = Dataset(self.lat,
self.lon,
self.time,
self.value,
variable=self.variable,
name=self.name,
origin=self.origin)
def test_lats(self):
self.assertItemsEqual(self.test_dataset.lats, self.lat)
def test_lons(self):
self.assertItemsEqual(self.test_dataset.lons, self.lon)
def test_times(self):
self.assertItemsEqual(self.test_dataset.times, self.time)
def test_values(self):
self.assertEqual(self.test_dataset.values.all(), self.value.all())
def test_variable(self):
self.assertEqual(self.test_dataset.variable, self.variable)
def test_name(self):
self.assertEqual(self.test_dataset.name, self.name)
def test_origin(self):
self.assertEqual(self.test_dataset.origin, self.origin)
class TestInvalidDatasetInit(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.values_in_wrong_order = flat_array.reshape(5, 5, 12)
def test_bad_lat_shape(self):
self.lat = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_lon_shape(self):
self.lon = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_times_shape(self):
self.time = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_values_shape(self):
self.value = np.array([1, 2, 3, 4, 5])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_shape_mismatch(self):
# If we change lats to this the shape of value will not match
# up with the length of the lats array.
self.lat = self.lat[:-2]
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_given_in_wrong_order(self):
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order)
def test_lons_values_incorrectly_gridded(self):
times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
lats = np.arange(-30, 30)
bad_lons = np.arange(360)
flat_array = np.arange(len(times) * len(lats) * len(bad_lons))
values = flat_array.reshape(len(times), len(lats), len(bad_lons))
ds = Dataset(lats, bad_lons, times, values)
np.testing.assert_array_equal(ds.lons, np.arange(-180, 180))
def test_reversed_lats(self):
ds = Dataset(self.lat[::-1], self.lon, self.time, self.value)
np.testing.assert_array_equal(ds.lats, self.lat)
class TestDatasetFunctions(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.test_dataset = Dataset(self.lat, self.lon, self.time,
self.value, self.variable)
def test_spatial_boundaries(self):
self.assertEqual(
self.test_dataset.spatial_boundaries(),
(min(self.lat), max(self.lat), min(self.lon), max(self.lon)))
def test_time_range(self):
self.assertEqual(
self.test_dataset.time_range(),
(dt.datetime(2000, 1, 1), dt.datetime(2000, 12, 1)))
def test_spatial_resolution(self):
self.assertEqual(self.test_dataset.spatial_resolution(), (2, 2))
def test_temporal_resolution(self):
self.assertEqual(self.test_dataset.temporal_resolution(), 'monthly')
class TestBounds(unittest.TestCase):
def setUp(self):
self.bounds = Bounds(-80, 80, # Lats
-160, 160, # Lons
dt.datetime(2000, 1, 1), # Start time
dt.datetime(2002, 1, 1)) # End time
# Latitude tests
def test_inverted_min_max_lat(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = 81
with self.assertRaises(ValueError):
self.bounds.lat_max = -81
# Lat Min
def test_out_of_bounds_lat_min(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = -91
with self.assertRaises(ValueError):
self.bounds.lat_min = 91
# Lat Max
def test_out_of_bounds_lat_max(self):
with self.assertRaises(ValueError):
self.bounds.lat_max = -91
with self.assertRaises(ValueError):
self.bounds.lat_max = 91
# Longitude tests
def test_inverted_max_max_lon(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = 161
with self.assertRaises(ValueError):
self.bounds.lon_max = -161
# Lon Min
def test_out_of_bounds_lon_min(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = -181
with self.assertRaises(ValueError):
self.bounds.lon_min = 181
# Lon Max
def test_out_of_bounds_lon_max(self):
with self.assertRaises(ValueError):
self.bounds.lon_max = -181
with self.assertRaises(ValueError):
self.bounds.lon_max = 181
# Temporal tests
def test_inverted_start_end_times(self):
with self.assertRaises(ValueError):
self.bounds.start = dt.datetime(2003, 1, 1)
with self.assertRaises(ValueError):
self.bounds.end = dt.datetime(1999, 1, 1)
# Start tests
def test_invalid_start(self):
with self.assertRaises(ValueError):
self.bounds.start = "This is not a date time object"
# End tests
def test_invalid_end(self):
with self.assertRaises(ValueError):
self.bounds.end = "This is not a date time object"
if __name__ == '__main__':
unittest.main()
| MJJoyce/climate | ocw/tests/test_dataset.py | Python | apache-2.0 | 8,091 |
'use strict';
module.exports = function (math) {
var util = require('../../util/index'),
BigNumber = math.type.BigNumber,
collection = require('../../type/collection'),
isNumber = util.number.isNumber,
isBoolean = util['boolean'].isBoolean,
isInteger = util.number.isInteger,
isCollection = collection.isCollection;
/**
* Compute the factorial of a value
*
* Factorial only supports an integer value as argument.
* For matrices, the function is evaluated element wise.
*
* Syntax:
*
* math.factorial(n)
*
* Examples:
*
* math.factorial(5); // returns 120
* math.factorial(3); // returns 6
*
* See also:
*
* combinations, permutations
*
* @param {Number | BigNumber | Array | Matrix} n An integer number
* @return {Number | BigNumber | Array | Matrix} The factorial of `n`
*/
math.factorial = function factorial (n) {
var value, res;
if (arguments.length != 1) {
throw new math.error.ArgumentsError('factorial', arguments.length, 1);
}
if (isNumber(n)) {
if (!isInteger(n) || n < 0) {
throw new TypeError('Positive integer value expected in function factorial');
}
value = n - 1;
res = n;
while (value > 1) {
res *= value;
value--;
}
if (res == 0) {
res = 1; // 0! is per definition 1
}
return res;
}
if (n instanceof BigNumber) {
if (!(isPositiveInteger(n))) {
throw new TypeError('Positive integer value expected in function factorial');
}
var one = new BigNumber(1);
value = n.minus(one);
res = n;
while (value.gt(one)) {
res = res.times(value);
value = value.minus(one);
}
if (res.equals(0)) {
res = one; // 0! is per definition 1
}
return res;
}
if (isBoolean(n)) {
return 1; // factorial(1) = 1, factorial(0) = 1
}
if (isCollection(n)) {
return collection.deepMap(n, factorial);
}
throw new math.error.UnsupportedTypeError('factorial', math['typeof'](n));
};
/**
* Test whether BigNumber n is a positive integer
* @param {BigNumber} n
* @returns {boolean} isPositiveInteger
*/
var isPositiveInteger = function(n) {
return n.isInteger() && n.gte(0);
};
};
| wyom/mathjs | lib/function/probability/factorial.js | JavaScript | apache-2.0 | 2,388 |
/* This file is automatically generated. DO NOT EDIT! */
/* Generated from: NetBSD: mknative-gcc,v 1.59 2011/07/03 06:47:12 mrg Exp */
/* Generated from: NetBSD: mknative.common,v 1.9 2007/02/05 18:26:01 apb Exp */
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define if the compiler has a thread header that is non single. */
#define HAVE_GTHR_DEFAULT 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the <sched.h> header file. */
#define HAVE_SCHED_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define LT_OBJDIR ".libs/"
/* Define to 1 if your C compiler doesn't accept -c and -o together. */
/* #undef NO_MINUS_C_MINUS_O */
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "package-unused"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "package-unused version-unused"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "libobjc"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "version-unused"
/* Define if the compiler is configured for setjmp/longjmp exceptions. */
/* #undef SJLJ_EXCEPTIONS */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
| ayyucedemirbas/Minix-Source-Code | minix-master/external/gpl3/gcc/lib/libobjc/arch/mipsel/config.h | C | apache-2.0 | 2,329 |
/*
* Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.ec2.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import javax.xml.stream.events.XMLEvent;
import com.amazonaws.services.ec2.model.*;
import com.amazonaws.transform.Unmarshaller;
import com.amazonaws.transform.MapEntry;
import com.amazonaws.transform.StaxUnmarshallerContext;
import com.amazonaws.transform.SimpleTypeStaxUnmarshallers.*;
/**
* Describe Tags Result StAX Unmarshaller
*/
public class DescribeTagsResultStaxUnmarshaller implements Unmarshaller<DescribeTagsResult, StaxUnmarshallerContext> {
public DescribeTagsResult unmarshall(StaxUnmarshallerContext context) throws Exception {
DescribeTagsResult describeTagsResult = new DescribeTagsResult();
int originalDepth = context.getCurrentDepth();
int targetDepth = originalDepth + 1;
if (context.isStartOfDocument()) targetDepth += 1;
while (true) {
XMLEvent xmlEvent = context.nextEvent();
if (xmlEvent.isEndDocument()) return describeTagsResult;
if (xmlEvent.isAttribute() || xmlEvent.isStartElement()) {
if (context.testExpression("tagSet/item", targetDepth)) {
describeTagsResult.getTags().add(TagDescriptionStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
} else if (xmlEvent.isEndElement()) {
if (context.getCurrentDepth() < originalDepth) {
return describeTagsResult;
}
}
}
}
private static DescribeTagsResultStaxUnmarshaller instance;
public static DescribeTagsResultStaxUnmarshaller getInstance() {
if (instance == null) instance = new DescribeTagsResultStaxUnmarshaller();
return instance;
}
}
| XidongHuang/aws-sdk-for-java | src/main/java/com/amazonaws/services/ec2/model/transform/DescribeTagsResultStaxUnmarshaller.java | Java | apache-2.0 | 2,423 |
.enyo-hflexbox, .enyo-vflexbox {
display: -webkit-box;
-webkit-box-sizing: border-box;
}
.enyo-hflexbox {
-webkit-box-orient: horizontal;
}
.enyo-vflexbox {
-webkit-box-orient: vertical;
}
| enyojs/enyo-1.0 | framework/source/base/themes/default-theme/css/FlexLayout.css | CSS | apache-2.0 | 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.core.metamodel.facets.properties.property.disabled;
import org.apache.isis.applib.annotation.Editing;
import org.apache.isis.applib.annotation.Property;
import org.apache.isis.applib.annotation.When;
import org.apache.isis.applib.annotation.Where;
import org.apache.isis.core.metamodel.facetapi.FacetHolder;
import org.apache.isis.core.metamodel.facets.members.disabled.DisabledFacet;
import org.apache.isis.core.metamodel.facets.members.disabled.DisabledFacetAbstractImpl;
public class DisabledFacetForPropertyAnnotation extends DisabledFacetAbstractImpl {
public static DisabledFacet create(final Property property, final FacetHolder holder) {
if (property == null) {
return null;
}
final Editing editing = property.editing();
final String disabledReason = property.editingDisabledReason();
switch (editing) {
case AS_CONFIGURED:
// nothing needs to be done here; the DomainObjectFactory (processing @DomainObject annotation)
// will install an ImmutableFacetForDomainObjectAnnotation on the domain object and then a
// DisabledFacetOnPropertyDerivedFromImmutable facet will be installed.
return null;
case DISABLED:
return new DisabledFacetForPropertyAnnotation(disabledReason, holder);
case ENABLED:
return null;
}
return null;
}
private DisabledFacetForPropertyAnnotation(final String reason, final FacetHolder holder) {
super(When.ALWAYS, Where.EVERYWHERE, reason, holder);
}
}
| howepeng/isis | core/metamodel/src/main/java/org/apache/isis/core/metamodel/facets/properties/property/disabled/DisabledFacetForPropertyAnnotation.java | Java | apache-2.0 | 2,470 |
module ActionMailer
# Returns the version of the currently loaded ActionMailer as a <tt>Gem::Version</tt>
def self.gem_version
Gem::Version.new VERSION::STRING
end
module VERSION
MAJOR = 4
MINOR = 1
TINY = 6
PRE = nil
STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".")
end
end
| WilliamFdosS/os-management | vendor/bundle/ruby/2.1.0/gems/actionmailer-4.1.6/lib/action_mailer/gem_version.rb | Ruby | apache-2.0 | 318 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.infer;
import com.facebook.buck.core.config.BuckConfig;
import com.facebook.buck.core.config.ConfigView;
import com.facebook.buck.core.model.TargetConfiguration;
import com.facebook.buck.core.model.UnconfiguredBuildTarget;
import com.facebook.buck.core.sourcepath.SourcePath;
import com.facebook.buck.core.toolchain.toolprovider.ToolProvider;
import com.facebook.buck.core.toolchain.toolprovider.impl.ConstantToolProvider;
import com.facebook.buck.core.util.immutables.BuckStyleValue;
import com.facebook.buck.rules.tool.config.ToolConfig;
import com.google.common.collect.ImmutableList;
import java.nio.file.Paths;
import java.util.Optional;
import org.immutables.value.Value;
/** Infer specific buck config */
@BuckStyleValue
public abstract class InferConfig implements ConfigView<BuckConfig> {
// TODO(arr): change to just "infer" when cxx and java configs are consolidated
private static final String SECTION = "infer_java";
private static final String DIST_FIELD = "dist";
private static final String DEFAULT_DIST_BINARY = "infer";
@Override
public abstract BuckConfig getDelegate();
public static InferConfig of(BuckConfig delegate) {
return ImmutableInferConfig.of(delegate);
}
@Value.Lazy
public Optional<ToolProvider> getBinary() {
return getDelegate().getView(ToolConfig.class).getToolProvider(SECTION, "binary");
}
/**
* Depending on the type of dist (plain path vs target) either return a {@link
* ConstantToolProvider} or {@link InferDistFromTargetProvider} with properly set up parse time
* deps.
*/
@Value.Lazy
public Optional<ToolProvider> getDist() {
Optional<String> valueOpt = getDelegate().getValue(SECTION, DIST_FIELD);
if (!valueOpt.isPresent()) {
return Optional.empty();
}
String value = valueOpt.get();
Optional<UnconfiguredBuildTarget> targetOpt =
getDelegate().getMaybeUnconfiguredBuildTarget(SECTION, DIST_FIELD);
ToolProvider toolProvider =
targetOpt
.map(this::mkDistProviderFromTarget)
.orElseGet(() -> this.mkDistProviderFromPath(value));
return Optional.of(toolProvider);
}
@Value.Lazy
public String getDistBinary() {
return getDelegate().getValue(SECTION, "dist_binary").orElse(DEFAULT_DIST_BINARY);
}
@Value.Lazy
public Optional<String> getVersion() {
return getDelegate().getValue(SECTION, "version");
}
@Value.Lazy
public Optional<SourcePath> getConfigFile(TargetConfiguration targetConfiguration) {
return getDelegate().getSourcePath(SECTION, "config_file", targetConfiguration);
}
@Value.Lazy
public ImmutableList<String> getNullsafeArgs() {
return getDelegate().getListWithoutComments(SECTION, "nullsafe_args");
}
/** Directory with third party signatures for nullsafe. */
@Value.Lazy
public Optional<SourcePath> getNullsafeThirdPartySignatures(
TargetConfiguration targetConfiguration) {
return getDelegate()
.getSourcePath(SECTION, "nullsafe_third_party_signatures", targetConfiguration);
}
@Value.Lazy
public Boolean getPrettyPrint() {
return getDelegate().getBooleanValue(SECTION, "pretty_print", false);
}
@Value.Lazy
public Boolean executeRemotely() {
return getDelegate().getBooleanValue(SECTION, "execute_remotely", false);
}
private ToolProvider mkDistProviderFromTarget(UnconfiguredBuildTarget target) {
String source = String.format("[%s] %s", SECTION, DIST_FIELD);
return new InferDistFromTargetProvider(target, getDistBinary(), source);
}
private ToolProvider mkDistProviderFromPath(String path) {
String errorMessage = String.format("%s:%s path not found", SECTION, DIST_FIELD);
return new ConstantToolProvider(
new InferDistTool(
() -> getDelegate().getPathSourcePath(Paths.get(path), errorMessage), getDistBinary()));
}
}
| facebook/buck | src/com/facebook/buck/infer/InferConfig.java | Java | apache-2.0 | 4,497 |
/*
* Copyright 2016 The OpenYOLO Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openyolo.spi.assetlinks.data;
import static junit.framework.Assert.assertNotNull;
import static junit.framework.Assert.assertTrue;
import java.util.List;
import org.json.JSONObject;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.robolectric.RobolectricTestRunner;
import org.robolectric.annotation.Config;
import org.valid4j.errors.RequireViolation;
/**
* Tests for {@link WebAssetStatementDeserializer}.
*/
@RunWith(RobolectricTestRunner.class)
@Config(manifest = Config.NONE)
public class WebAssetStatementDeserializerTest {
@Test(expected = RequireViolation.class)
public void testNullJson() {
new WebAssetStatementDeserializer().deserialize(null);
}
@Test
public void testNoTarget() {
JSONObject json = new JSONObject();
final List<WebSiteAssetStatement> assetStatements = new WebAssetStatementDeserializer()
.deserialize(json);
assertNotNull(assetStatements);
assertTrue(assetStatements.isEmpty());
}
}
| iainmcgin/OpenYOLO-Android | spi/javatests/java/org/openyolo/spi/assetlinks/data/WebAssetStatementDeserializerTest.java | Java | apache-2.0 | 1,656 |
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef __itkSquaredEdgeLengthDecimationQuadEdgeMeshFilter_h
#define __itkSquaredEdgeLengthDecimationQuadEdgeMeshFilter_h
#include "itkEdgeDecimationQuadEdgeMeshFilter.h"
namespace itk
{
/**
* \class SquaredEdgeLengthDecimationQuadEdgeMeshFilter
* \brief
* \ingroup ITKQuadEdgeMeshFiltering
*/
template< class TInput, class TOutput, class TCriterion >
class ITK_EXPORT SquaredEdgeLengthDecimationQuadEdgeMeshFilter:
public EdgeDecimationQuadEdgeMeshFilter< TInput, TOutput, TCriterion >
{
public:
typedef SquaredEdgeLengthDecimationQuadEdgeMeshFilter Self;
typedef SmartPointer< Self > Pointer;
typedef SmartPointer< const Self > ConstPointer;
typedef EdgeDecimationQuadEdgeMeshFilter<
TInput, TOutput, TCriterion > Superclass;
/** Run-time type information (and related methods). */
itkTypeMacro(SquaredEdgeLengthDecimationQuadEdgeMeshFilter, EdgeDecimationQuadEdgeMeshFilter);
/** New macro for creation of through a Smart Pointer */
itkNewMacro(Self);
typedef TInput InputMeshType;
typedef typename InputMeshType::Pointer InputMeshPointer;
typedef TOutput OutputMeshType;
typedef typename OutputMeshType::Pointer OutputMeshPointer;
typedef typename OutputMeshType::PointIdentifier OutputPointIdentifier;
typedef typename OutputMeshType::PointType OutputPointType;
typedef typename OutputMeshType::QEType OutputQEType;
typedef typename OutputMeshType::EdgeCellType OutputEdgeCellType;
typedef typename OutputMeshType::CellsContainerIterator OutputCellsContainerIterator;
typedef TCriterion CriterionType;
typedef typename CriterionType::MeasureType MeasureType;
typedef typename Superclass::PriorityType PriorityType;
typedef typename Superclass::PriorityQueueItemType PriorityQueueItemType;
typedef typename Superclass::PriorityQueueType PriorityQueueType;
typedef typename Superclass::PriorityQueuePointer PriorityQueuePointer;
typedef typename Superclass::QueueMapType QueueMapType;
typedef typename Superclass::QueueMapIterator QueueMapIterator;
typedef typename Superclass::OperatorType OperatorType;
typedef typename Superclass::OperatorPointer OperatorPointer;
protected:
SquaredEdgeLengthDecimationQuadEdgeMeshFilter();
virtual ~SquaredEdgeLengthDecimationQuadEdgeMeshFilter();
/**
* \brief Compute the measure value for iEdge
* \param[in] iEdge
* \return measure value, here the squared edge length
*/
inline MeasureType MeasureEdge(OutputQEType *iEdge)
{
OutputMeshPointer output = this->GetOutput();
OutputPointIdentifier id_org = iEdge->GetOrigin();
OutputPointIdentifier id_dest = iEdge->GetDestination();
OutputPointType org = output->GetPoint(id_org);
OutputPointType dest = output->GetPoint(id_dest);
return static_cast< MeasureType >( org.SquaredEuclideanDistanceTo(dest) );
}
/**
* \param[in] iEdge
* \return the optimal point location
*/
OutputPointType Relocate(OutputQEType *iEdge);
private:
SquaredEdgeLengthDecimationQuadEdgeMeshFilter(const Self &);
void operator=(const Self &);
};
}
#include "itkSquaredEdgeLengthDecimationQuadEdgeMeshFilter.hxx"
#endif
| paulnovo/ITK | Modules/Filtering/QuadEdgeMeshFiltering/include/itkSquaredEdgeLengthDecimationQuadEdgeMeshFilter.h | C | apache-2.0 | 4,147 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.xpack.core.security.ScrollHelper;
import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction;
import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest;
import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest;
import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest;
import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping;
import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel;
import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames;
import org.elasticsearch.xpack.security.authc.support.CachingRealm;
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
import org.elasticsearch.xpack.security.support.SecurityIndexManager;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static org.elasticsearch.action.DocWriteResponse.Result.CREATED;
import static org.elasticsearch.action.DocWriteResponse.Result.DELETED;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME;
import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING;
import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS;
import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isIndexDeleted;
import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isMoveFromRedToNonRed;
/**
* This store reads + writes {@link ExpressionRoleMapping role mappings} in an Elasticsearch
* {@link RestrictedIndicesNames#SECURITY_MAIN_ALIAS index}.
* <br>
* The store is responsible for all read and write operations as well as
* {@link #resolveRoles(UserData, ActionListener) resolving roles}.
* <p>
* No caching is done by this class, it is handled at a higher level and no polling for changes
* is done by this class. Modification operations make a best effort attempt to clear the cache
* on all nodes for the user that was modified.
*/
public class NativeRoleMappingStore implements UserRoleMapper {
private static final Logger logger = LogManager.getLogger(NativeRoleMappingStore.class);
static final String DOC_TYPE_FIELD = "doc_type";
static final String DOC_TYPE_ROLE_MAPPING = "role-mapping";
private static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_";
private static final ActionListener<Object> NO_OP_ACTION_LISTENER = new ActionListener<Object>() {
@Override
public void onResponse(Object o) {
// nothing
}
@Override
public void onFailure(Exception e) {
// nothing
}
};
private final Settings settings;
private final Client client;
private final SecurityIndexManager securityIndex;
private final ScriptService scriptService;
private final List<String> realmsToRefresh = new CopyOnWriteArrayList<>();
public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex, ScriptService scriptService) {
this.settings = settings;
this.client = client;
this.securityIndex = securityIndex;
this.scriptService = scriptService;
}
private String getNameFromId(String id) {
assert id.startsWith(ID_PREFIX);
return id.substring(ID_PREFIX.length());
}
private String getIdForName(String name) {
return ID_PREFIX + name;
}
/**
* Loads all mappings from the index.
* <em>package private</em> for unit testing
*/
protected void loadMappings(ActionListener<List<ExpressionRoleMapping>> listener) {
if (securityIndex.isIndexUpToDate() == false) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
final QueryBuilder query = QueryBuilders.termQuery(DOC_TYPE_FIELD, DOC_TYPE_ROLE_MAPPING);
final Supplier<ThreadContext.StoredContext> supplier = client.threadPool().getThreadContext().newRestorableContext(false);
try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) {
SearchRequest request = client.prepareSearch(SECURITY_MAIN_ALIAS)
.setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings))
.setQuery(query)
.setSize(1000)
.setFetchSource(true)
.request();
request.indicesOptions().ignoreUnavailable();
ScrollHelper.fetchAllByEntity(client, request,
new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection<ExpressionRoleMapping> mappings) ->
listener.onResponse(mappings.stream().filter(Objects::nonNull).collect(Collectors.toList())),
ex -> {
logger.error(new ParameterizedMessage("failed to load role mappings from index [{}] skipping all mappings.",
SECURITY_MAIN_ALIAS), ex);
listener.onResponse(Collections.emptyList());
})),
doc -> buildMapping(getNameFromId(doc.getId()), doc.getSourceRef()));
}
}
protected ExpressionRoleMapping buildMapping(String id, BytesReference source) {
try (InputStream stream = source.streamInput();
XContentParser parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) {
return ExpressionRoleMapping.parse(id, parser);
} catch (Exception e) {
logger.warn(new ParameterizedMessage("Role mapping [{}] cannot be parsed and will be skipped", id), e);
return null;
}
}
/**
* Stores (create or update) a single mapping in the index
*/
public void putRoleMapping(PutRoleMappingRequest request, ActionListener<Boolean> listener) {
modifyMapping(request.getName(), this::innerPutMapping, request, listener);
}
/**
* Deletes a named mapping from the index
*/
public void deleteRoleMapping(DeleteRoleMappingRequest request, ActionListener<Boolean> listener) {
modifyMapping(request.getName(), this::innerDeleteMapping, request, listener);
}
private <Request, Result> void modifyMapping(String name, CheckedBiConsumer<Request, ActionListener<Result>, Exception> inner,
Request request, ActionListener<Result> listener) {
if (securityIndex.isIndexUpToDate() == false) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
} else {
try {
inner.accept(request, ActionListener.wrap(r -> refreshRealms(listener, r), listener::onFailure));
} catch (Exception e) {
logger.error(new ParameterizedMessage("failed to modify role-mapping [{}]", name), e);
listener.onFailure(e);
}
}
}
private void innerPutMapping(PutRoleMappingRequest request, ActionListener<Boolean> listener) {
final ExpressionRoleMapping mapping = request.getMapping();
securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
final XContentBuilder xContentBuilder;
try {
xContentBuilder = mapping.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true);
} catch (IOException e) {
listener.onFailure(e);
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, getIdForName(mapping.getName()))
.setSource(xContentBuilder)
.setRefreshPolicy(request.getRefreshPolicy())
.request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
boolean created = indexResponse.getResult() == CREATED;
listener.onResponse(created);
}
@Override
public void onFailure(Exception e) {
logger.error(new ParameterizedMessage("failed to put role-mapping [{}]", mapping.getName()), e);
listener.onFailure(e);
}
}, client::index);
});
}
private void innerDeleteMapping(DeleteRoleMappingRequest request, ActionListener<Boolean> listener) {
final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze();
if (frozenSecurityIndex.indexExists() == false) {
listener.onResponse(false);
} else if (securityIndex.isAvailable() == false) {
listener.onFailure(frozenSecurityIndex.getUnavailableReason());
} else {
securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareDelete(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, getIdForName(request.getName()))
.setRefreshPolicy(request.getRefreshPolicy())
.request(),
new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
boolean deleted = deleteResponse.getResult() == DELETED;
listener.onResponse(deleted);
}
@Override
public void onFailure(Exception e) {
logger.error(new ParameterizedMessage("failed to delete role-mapping [{}]", request.getName()), e);
listener.onFailure(e);
}
}, client::delete);
});
}
}
/**
* Retrieves one or more mappings from the index.
* If <code>names</code> is <code>null</code> or {@link Set#isEmpty empty}, then this retrieves all mappings.
* Otherwise it retrieves the specified mappings by name.
*/
public void getRoleMappings(Set<String> names, ActionListener<List<ExpressionRoleMapping>> listener) {
if (names == null || names.isEmpty()) {
getMappings(listener);
} else {
getMappings(new ActionListener<List<ExpressionRoleMapping>>() {
@Override
public void onResponse(List<ExpressionRoleMapping> mappings) {
final List<ExpressionRoleMapping> filtered = mappings.stream()
.filter(m -> names.contains(m.getName()))
.collect(Collectors.toList());
listener.onResponse(filtered);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
private void getMappings(ActionListener<List<ExpressionRoleMapping>> listener) {
if (securityIndex.isAvailable()) {
loadMappings(listener);
} else {
logger.info("The security index is not yet available - no role mappings can be loaded");
if (logger.isDebugEnabled()) {
logger.debug("Security Index [{}] [exists: {}] [available: {}] [mapping up to date: {}]",
SECURITY_MAIN_ALIAS,
securityIndex.indexExists(),
securityIndex.isAvailable(),
securityIndex.isMappingUpToDate()
);
}
listener.onResponse(Collections.emptyList());
}
}
/**
* Provides usage statistics for this store.
* The resulting map contains the keys
* <ul>
* <li><code>size</code> - The total number of mappings stored in the index</li>
* <li><code>enabled</code> - The number of mappings that are
* {@link ExpressionRoleMapping#isEnabled() enabled}</li>
* </ul>
*/
public void usageStats(ActionListener<Map<String, Object>> listener) {
if (securityIndex.isAvailable() == false) {
reportStats(listener, Collections.emptyList());
} else {
getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure));
}
}
private void reportStats(ActionListener<Map<String, Object>> listener, List<ExpressionRoleMapping> mappings) {
Map<String, Object> usageStats = new HashMap<>();
usageStats.put("size", mappings.size());
usageStats.put("enabled", mappings.stream().filter(ExpressionRoleMapping::isEnabled).count());
listener.onResponse(usageStats);
}
public void onSecurityIndexStateChange(SecurityIndexManager.State previousState, SecurityIndexManager.State currentState) {
if (isMoveFromRedToNonRed(previousState, currentState) || isIndexDeleted(previousState, currentState) ||
previousState.isIndexUpToDate != currentState.isIndexUpToDate) {
refreshRealms(NO_OP_ACTION_LISTENER, null);
}
}
private <Result> void refreshRealms(ActionListener<Result> listener, Result result) {
if (realmsToRefresh.isEmpty()) {
listener.onResponse(result);
return;
}
final String[] realmNames = this.realmsToRefresh.toArray(Strings.EMPTY_ARRAY);
executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRealmCacheAction.INSTANCE, new ClearRealmCacheRequest().realms(realmNames),
ActionListener.wrap(
response -> {
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"Cleared cached in realms [{}] due to role mapping change", Arrays.toString(realmNames)));
listener.onResponse(result);
},
ex -> {
logger.warn(new ParameterizedMessage("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)), ex);
listener.onFailure(ex);
}));
}
@Override
public void resolveRoles(UserData user, ActionListener<Set<String>> listener) {
getRoleMappings(null, ActionListener.wrap(
mappings -> {
final ExpressionModel model = user.asModel();
final Set<String> roles = mappings.stream()
.filter(ExpressionRoleMapping::isEnabled)
.filter(m -> m.getExpression().match(model))
.flatMap(m -> {
final Set<String> roleNames = m.getRoleNames(scriptService, model);
logger.trace("Applying role-mapping [{}] to user-model [{}] produced role-names [{}]",
m.getName(), model, roleNames);
return roleNames.stream();
})
.collect(Collectors.toSet());
logger.debug("Mapping user [{}] to roles [{}]", user, roles);
listener.onResponse(roles);
}, listener::onFailure
));
}
/**
* Indicates that the provided realm should have its cache cleared if this store is updated
* (that is, {@link #putRoleMapping(PutRoleMappingRequest, ActionListener)} or
* {@link #deleteRoleMapping(DeleteRoleMappingRequest, ActionListener)} are called).
* @see ClearRealmCacheAction
*/
@Override
public void refreshRealmOnChange(CachingRealm realm) {
realmsToRefresh.add(realm.name());
}
}
| coding0011/elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java | Java | apache-2.0 | 18,616 |
# This is the Ruby 2.0-specific kernel file.
# Currently, all 1.9 features are in 2.0. We will need to
# differentiate when there are features from 1.9 removed
# in 2.0.
# These are loads so they don't pollute LOADED_FEATURES
load 'jruby/kernel19.rb'
load 'jruby/kernel20/enumerable.rb'
load 'jruby/kernel20/range.rb'
load 'jruby/kernel20/load_error.rb' | evandor/skysail-framework | skysail.server.text.asciidoc/resources/jruby/kernel20.rb | Ruby | apache-2.0 | 355 |
/*
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
/*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.org.apache.xerces.internal.impl.dv.xs;
import java.math.BigInteger;
import javax.xml.datatype.DatatypeConstants;
import javax.xml.datatype.Duration;
import com.sun.org.apache.xerces.internal.impl.dv.InvalidDatatypeValueException;
import com.sun.org.apache.xerces.internal.impl.dv.ValidationContext;
/**
* Used to validate the <yearMonthDuration> type
*
* @xerces.internal
*
* @author Ankit Pasricha, IBM
*
* @version $Id: YearMonthDurationDV.java,v 1.6 2010-11-01 04:39:47 joehw Exp $
*/
class YearMonthDurationDV extends DurationDV {
public Object getActualValue(String content, ValidationContext context)
throws InvalidDatatypeValueException {
try {
return parse(content, DurationDV.YEARMONTHDURATION_TYPE);
}
catch (Exception ex) {
throw new InvalidDatatypeValueException("cvc-datatype-valid.1.2.1", new Object[]{content, "yearMonthDuration"});
}
}
protected Duration getDuration(DateTimeData date) {
int sign = 1;
if ( date.year<0 || date.month<0) {
sign = -1;
}
return datatypeFactory.newDuration(sign == 1,
date.year != DatatypeConstants.FIELD_UNDEFINED?BigInteger.valueOf(sign*date.year):null,
date.month != DatatypeConstants.FIELD_UNDEFINED?BigInteger.valueOf(sign*date.month):null,
null,
null,
null,
null);
}
}
| shun634501730/java_source_cn | src_en/com/sun/org/apache/xerces/internal/impl/dv/xs/YearMonthDurationDV.java | Java | apache-2.0 | 2,247 |
package depends
import (
"fmt"
"sync"
logutil "github.com/docker/infrakit/pkg/log"
"github.com/docker/infrakit/pkg/types"
)
var log = logutil.New("module", "run/depends")
// ParseDependsFunc returns a list of dependencies of this spec.
type ParseDependsFunc func(types.Spec) (Runnables, error)
var (
parsers = map[string]map[types.InterfaceSpec]ParseDependsFunc{}
lock = sync.RWMutex{}
)
// Register registers a helper for parsing for dependencies based on a key (e.g. 'group')
// and interface spec (Group/1.0)
func Register(key string, interfaceSpec types.InterfaceSpec, f ParseDependsFunc) {
lock.Lock()
defer lock.Unlock()
if _, has := parsers[key]; !has {
parsers[key] = map[types.InterfaceSpec]ParseDependsFunc{}
}
if _, has := parsers[key][interfaceSpec]; has {
panic(fmt.Errorf("duplicate depdency parser for %v / %v", key, interfaceSpec))
}
parsers[key][interfaceSpec] = f
}
// Resolve returns the dependencies listed in the spec as well as inside the properties.
// InterfaceSpec is optional. If nil, the first match by key (kind) is used. If nothing is registered, returns nil
// and no error. Error is returned for exceptions (eg. parsing, etc.)
func Resolve(spec types.Spec, key string, interfaceSpec *types.InterfaceSpec) (Runnables, error) {
lock.RLock()
defer lock.RUnlock()
m, has := parsers[key]
if !has {
return nil, nil
}
if interfaceSpec == nil {
for _, parse := range m {
// First match
return parse(spec)
}
}
parse, has := m[*interfaceSpec]
if !has {
return nil, nil
}
return parse(spec)
}
| kaufers/infrakit | pkg/run/depends/depends.go | GO | apache-2.0 | 1,571 |
/*
* Copyright 2014 Space Dynamics Laboratory - Utah State University Research Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.usu.sdl.openstorefront.web.test.system;
import edu.usu.sdl.openstorefront.core.api.LookupService;
import edu.usu.sdl.openstorefront.core.api.model.AsyncTaskCallback;
import edu.usu.sdl.openstorefront.core.api.model.TaskFuture;
import edu.usu.sdl.openstorefront.core.api.model.TaskRequest;
import edu.usu.sdl.openstorefront.core.entity.ErrorTypeCode;
import edu.usu.sdl.openstorefront.service.manager.AsyncTaskManager;
import edu.usu.sdl.openstorefront.web.test.BaseTestCase;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
/**
*
* @author dshurtleff
*/
public class AsyncProxyTest
extends BaseTestCase
{
private List<ErrorTypeCode> errorTypeCodes = new ArrayList<>();
public AsyncProxyTest()
{
this.description = "Async Proxy Test";
}
@Override
protected void runInternalTest()
{
results.append("Call back style: <br>");
TaskRequest taskRequest = new TaskRequest();
taskRequest.setAllowMultiple(true);
taskRequest.setName(UUID.randomUUID().toString());
taskRequest.setCallback(new AsyncTaskCallback()
{
@Override
public void beforeExecute(TaskFuture taskFuture)
{
}
@Override
public void afterExecute(TaskFuture taskFuture)
{
try {
results.append("Runnning in callback: <br>");
List<ErrorTypeCode> errorTypeCodesLocal = (List<ErrorTypeCode>) taskFuture.getFuture().get();
errorTypeCodesLocal.forEach(code -> {
results.append(code.getCode()).append(" - ").append(code.getDescription()).append("<br>");
});
} catch (InterruptedException | ExecutionException ex) {
throw new RuntimeException(ex);
}
}
});
LookupService asyncLookup = service.getAsyncProxy(service.getLookupService(), taskRequest);
asyncLookup.findLookup(ErrorTypeCode.class);
results.append("Lookup style: <br>");
TaskFuture taskFuture = AsyncTaskManager.getTaskByName(taskRequest.getName());
try {
errorTypeCodes = (List<ErrorTypeCode>) taskFuture.getFuture().get();
errorTypeCodes.forEach(code -> {
results.append(code.getCode()).append(" - ").append(code.getDescription()).append("<br>");
});
Thread.sleep(100);
} catch (InterruptedException | ExecutionException ex) {
throw new RuntimeException(ex);
}
}
}
| tyler-travis/openstorefront | server/openstorefront/openstorefront-web/src/main/java/edu/usu/sdl/openstorefront/web/test/system/AsyncProxyTest.java | Java | apache-2.0 | 2,955 |
/**
* Timeline.css
*/
*, *:after, *:before { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; }
body, html {
font-size: 100%;
padding: 0;
margin: 0;
}
.main a{
color:#0085a1;
}
.main a:hover {
/*color: #000;*/
/*text-decoration: none;*/
}
.main, .container > header {
width: 90%;
max-width: 69em;
margin: 0 auto;
padding: 0 1.375em;
}
@font-face {
font-family: 'huxico';
src:url('../fonts/huxico.eot');
src:url('../fonts/huxico.eot?#iefix') format('embedded-opentype'),
url('../fonts/huxico.woff') format('woff'),
url('../fonts/huxico.ttf') format('truetype'),
url('../fonts/huxico.svg#huxico') format('svg');
font-weight: normal;
font-style: normal;
} /* Made with http://icomoon.io/ */
.cbp_tmtimeline {
padding: 0;
list-style: none;
position: relative;
max-width: 600px;
margin: 0 auto;
margin-top: -120px;
}
.cbp_tmtimeline > li {
position: relative;
box-sizing: border-box;
display: inline-block;
vertical-align: top;
width: 100%;
}
/* The date/time */
.cbp_tmlabel time {
font-size: 1.0em;
position: absolute;
top: 0px;
line-height: 64px;
right: 5px;
margin-right: 1em;
color: #a5a7aa;
}
/* The Duoshuo */
.cbp_tmlabel .comment{
padding: 0 1.5em 0.5em;
}
/* The card */
.cbp_tmtimeline > li .cbp_tmlabel {
background: white;
color: rgb(78, 78, 78);
/* line-height: 1.4; */
position: relative;
border-radius: 3px;
border: 1px solid #edeeee;
margin: 0 0 30px 0;
font-weight: 400;
font-size: 95%;
}
.cbp_tmtimeline > li .cbp_tmlabel h2 {
font-weight: bold;
font-size: 15px;
padding: 14px 20px;
margin: 0;
height: 64px;
line-height: 36px;
}
.cbp_tmtimeline > li:nth-child(odd) .cbp_tmlabel:after {
border-right-color: #FFFFFF;
display: no;
}
/* The icons */
.cbp_tmtimeline > li .cbp_tmicon {
width: 40px;
height: 40px;
font-family: 'huxico';
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
font-size: 1.4em;
line-height: 40px;
-webkit-font-smoothing: antialiased;
position: absolute;
color: #fff;
background: #46a4da;
border-radius: 50%;
box-shadow: 0 0 0 8px #afdcf8;
text-align: center;
left: 20%;
top: 0;
margin: 0 0 0 -25px;
}
.cbp_tmicon-phone:before {
content: "\e004";
font-family: 'huxico';
}
.cbp_tmicon-screen:before {
content: "\e005";
font-family: 'huxico';
}
.cbp_tmicon-html:before {
content: "\e001";
font-family: 'huxico';
}
.cbp_tmicon-video:before {
content: "\e002";
font-family: 'huxico';
}
.cbp_tmicon-font:before {
content: "\e003";
font-family: 'huxico';
}
/** Venders Logo **/
.i-vue:before {
content: "";
display: block;
width: 40px;
height: 40px;
background: url("../images/logo-vue.png") 50% 50% no-repeat;
background-size: 20px;
}
.i-kissy:before {
content: "";
display: block;
width: 40px;
height: 40px;
background: url("../images/logo-kissy.png") 50% 50% no-repeat;
background-size: 20px;
}
.i-jquery:before {
content: "";
display: block;
width: 40px;
height: 40px;
background: url("../images/logo-jquery.png") 50% 50% no-repeat;
background-size: 20px;
}
.i-react:before {
content: "";
display: block;
width: 40px;
height: 40px;
background: url("../images/logo-react.png") 50% 50% no-repeat;
background-size: 22px;
}
/******* Hux Branch *****/
html{
font-size: 90%;
}
body{
background: #fafafa;
}
header{
background: #6ca8fb;
text-align: center;
height: 620px;
min-width: 100%;
padding: 5em 0px !important;
position: relative;
border-bottom: 1px solid #edeeee;
}
header h1 {
font-size: 70px;
color: white;
font-weight: 100;
margin-top: 26px;
margin-bottom: 10px;
line-height: 1;
letter-spacing: 0.1em;
}
header p{
text-align: center;
color: white;
font-size: 16px;
line-height: 1.6em;
letter-spacing: 1px;
font-weight: 300;
margin: 1.7em;
}
header img{
width: 190px;
}
.cbp_tmlabel .hux_ul{
border:none;
padding:0;
margin:0;
}
.cbp_tmlabel .hux_ul li{
padding:0;
margin: 0;
list-style: none;
}
.cbp_tmlabel img{
width: 100%;
margin-bottom: 1em;
border-top: 1px solid #eeefef;
border-bottom: 1px solid #eeefef;
}
.cbp_tmlabel p{
padding: 1em 0;
/*text-indent: 2em;*/
margin: 0 1.5em;
border-top: 1px solid rgba(255,255,255,0.4);
}
.cbp_tmlabel ul{
padding: 0em 0;
margin: 0 1.5em;
list-style: none;
}
.cbp_tmlabel ul li{
padding: 0.5em 0;
}
.cbp_tmlabel ul li.skill{
overflow: auto;
margin-top: 9px;
border-top: 1px solid #eeefef;
}
.cbp_tmlabel ul li.skill>span, .cbp_tmlabel ul li.skill>a{
display: block;
float: left;
font-size: 1.3rem;
width: 40px;
height: 40px;
text-align: center;
line-height: 40px;
margin-right: 8px;
color: #a5a7aa;
transition: all 0.3s ease;
}
.cbp_tmlabel ul li a:hover{
background: rgba(0,0,0,0.05);
border-radius: 50%;
}
.cbp_tmlabel ul li.skill>span:nth-child(1){
margin-left: -7px;
}
.cbp_tmlabel ul li.skill span:before{
line-height: 40px;
}
.cbp_tmlabel ul li.skill span.link{
float: right;
font-size: 0.8em;
line-height: 30px;
width: auto;
padding: 0 11px;
min-width: 70px;
box-sizing: border-box;
height: 32px;
border-radius: 3px;
background: white;
border: 1px solid #D5D6D8;
margin: 4px 0px;
}
.cbp_tmlabel ul li.skill span.link:active{
background: #eee;
}
.cbp_tmlabel ul li.skill span.link a {
color: #a5a7aa;
display: block;
width: 100%;
height: 100%;
font-size: 14px;
font-weight: bold;
}
@media screen and (max-width: 400px) {
.main{
width: 100%;
padding-bottom: 0px;
}
html{
font-size: 90%;
}
header p{
font-size: 1.05em;
letter-spacing: 0px;
margin: 1.5em;
}
.cbp_tmlabel ul li.skill>span, .cbp_tmlabel ul li.skill>a{
margin-right: 2px;
}
}
| hyb628/huxpro | portfolio/css/timeline.css | CSS | apache-2.0 | 5,737 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.transactions;
import java.util.Collection;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCheckedException;
import org.junit.Ignore;
/** */
public class AtomicVolatilePartitionCounterStateConsistencyTest extends AtomicPartitionCounterStateConsistencyTest {
/** {@inheritDoc} */
@Override protected boolean persistenceEnabled() {
return false;
}
/** {@inheritDoc} */
@Override protected int partitions() {
return 1024;
}
/** {@inheritDoc} */
@Ignore
@Override public void testSingleThreadedUpdateOrder() throws Exception {
// Not applicable for volatile mode.
}
/** {@inheritDoc} */
@Ignore
@Override public void testPartitionConsistencyCancelledRebalanceCoordinatorIsDemander() throws Exception {
// Not applicable for volatile mode.
}
/** {@inheritDoc} */
@Ignore
@Override public void testLateAffinityChangeDuringExchange() throws Exception {
// Not applicable for volatile mode.
}
/** {@inheritDoc} */
@Override protected void forceCheckpoint(Collection<Ignite> nodes) throws IgniteCheckedException {
// No-op.
}
}
| samaitra/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AtomicVolatilePartitionCounterStateConsistencyTest.java | Java | apache-2.0 | 2,030 |
/*
* Copyright (c) 2010-2012 Grid Dynamics Consulting Services, Inc, All Rights Reserved
* http://www.griddynamics.com
*
* This library is free software; you can redistribute it and/or modify it under the terms of
* the Apache License; either
* version 2.0 of the License, or any later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.griddynamics.jagger.dbapi.parameter;
import com.google.common.base.Objects;
public class GroupKey {
private String upperName;
private String leftName;
public GroupKey(String upperName) {
this.upperName = upperName;
this.leftName = upperName;
}
public GroupKey(String upperName, String leftName) {
this.upperName = upperName;
this.leftName = leftName;
}
public String getUpperName() {
return upperName;
}
public String getLeftName() {
return leftName;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GroupKey groupKey = (GroupKey) o;
if (leftName != null ? !leftName.equals(groupKey.leftName) : groupKey.leftName != null) return false;
if (upperName != null ? !upperName.equals(groupKey.upperName) : groupKey.upperName != null) return false;
return true;
}
@Override
public int hashCode() {
int result = upperName != null ? upperName.hashCode() : 0;
result = 31 * result + (leftName != null ? leftName.hashCode() : 0);
return result;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("upperName", upperName)
.add("leftName", leftName)
.toString();
}
}
| Nmishin/jagger | dbapi/src/main/java/com/griddynamics/jagger/dbapi/parameter/GroupKey.java | Java | apache-2.0 | 2,536 |
using Newtonsoft.Json.Linq;
using System;
namespace Kudu.Core.Functions
{
public interface IKeyJsonOps<T>
{
int NumberOfKeysInDefaultFormat
{
get;
}
// key generation is based on run time
string GenerateKeyJson(Tuple<string,string>[] keyPairs, string functionRt, out string unencryptedKey);
// read existing key file based on the content format, not the run time version
string GetKeyValueFromJson(string json, out bool isEncrypted);
T GenerateKeyObject(string functionKey, string functionName);
}
}
| EricSten-MSFT/kudu | Kudu.Contracts/Functions/IKeyJsonOps.cs | C# | apache-2.0 | 611 |
@echo off
set VISUAL_STUDIO_INSTALL_PATH=C:\Program Files (x86)\Microsoft Visual Studio 14.0
call "%VISUAL_STUDIO_INSTALL_PATH%\VC\vcvarsall.bat" x86_amd64
powershell -ExecutionPolicy RemoteSigned -File run_package_dot_bat_instead_64.ps1
| robertgrimm/ds3_c_sdk | win32/package_64.bat | Batchfile | apache-2.0 | 242 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import sys
import os
from datetime import date
# eventlet/gevent should not monkey patch anything.
os.environ["GEVENT_NOPATCH"] = "yes"
os.environ["EVENTLET_NOPATCH"] = "yes"
#os.environ["CELERY_LOADER"] = "default"
this = os.path.dirname(os.path.abspath(__file__))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.join(os.pardir, "tests"))
sys.path.append(os.path.join(this, "_ext"))
#import celery
# General configuration
# ---------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
]
html_show_sphinx = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Apache Flume'
copyright = '2009-%s The Apache Software Foundation' % date.today().year
keep_warnings = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ".".join(map(str, celery.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
#release = celery.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
#intersphinx_mapping = {
# "http://docs.python.org/dev": None,
# "http://kombu.readthedocs.org/en/latest/": None,
# "http://django-celery.readthedocs.org/en/latest": None,
#}
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
highlight_language = 'none'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['../resources/images']
html_logo = 'images/flume-logo.png'
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
#html_theme = 'default'
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html'],
}
| tmgstevens/flume | flume-ng-doc/sphinx/conf.py | Python | apache-2.0 | 3,274 |
require 'chef/knife/base_sync'
require 'chef/knife/core/object_loader'
class Chef
class Knife
class PackSync < Chef::Knife
include ::BaseSync
VISIBILITY_ALT_NS_TAG = 'enableForOrg'
banner "Loads packs into OneOps.\nUsage:\n circuit pack [OPTIONS] [PACKS...]"
option :all,
:short => "-a",
:long => "--all",
:description => "Sync all packs"
option :register,
:short => "-r REGISTER",
:long => "--register REGISTER",
:description => "Specify the source register name to use during sync"
option :version,
:short => "-v VERSION",
:long => "--version VERSION",
:description => "Specify the source register version to use during sync"
option :pack_path,
:short => "-o PATH:PATH",
:long => "--pack-path PATH:PATH",
:description => "A colon-separated path to look for packs in",
:proc => lambda {|o| o.split(":")}
option :reload,
:long => "--reload",
:description => "Force pack sync even if digest signatue has not changed (not applicable for packs with semantic versioning)"
option :clean,
:long => "--clean",
:description => "Remove the current pack (and corresponding namespace) and then sync - 'fresh start' (not applicable for packs with semantic versioning)"
option :semver,
:long => "--semver",
:description => "Creates new patch version for each change"
def run
t1 = Time.now
ENV['CMS_TRACE'] = 'true' if config[:cms_trace]
config[:pack_path] ||= Chef::Config[:pack_path]
config[:register] ||= Chef::Config[:register]
config[:version] ||= Chef::Config[:version]
config[:semver] ||= ENV['SEMVER'].present?
Chef::Pack.config = config
@packs_loader ||= Knife::Core::ObjectLoader.new(Chef::Pack, ui)
validate_packs # safety measure: make sure no packs conflict in scope
circuit_ns_path = get_packs_ns
unless Cms::Namespace.first(:params => {:nsPath => circuit_ns_path})
ui.error("Can't find namespace #{circuit_ns_path}. Please register your source first with the register command.")
exit 1
end
if config[:all]
files = config[:pack_path].inject([]) {|a, dir| a + Dir.glob("#{dir}/*.rb").sort}
else
files = @name_args.inject([]) {|a, pack| a << "#{pack}.rb"}
end
if files.blank?
ui.error 'You must specify pack name(s) or use the --all option to sync all.'
exit(1)
end
comments = "#{ENV['USER']}:#{$0} #{config[:msg]}"
loaded_files = files.inject([]) {|a, f| a << f if sync_pack(f, comments); a}
t2 = Time.now
ui.info("\nProcessed #{files.size} files, loaded #{loaded_files.size} packs.\nDone at #{t2} in #{(t2 - t1).round(1)}sec")
end
def validate_packs
pack_map = {}
config[:pack_path].each do |dir|
Dir.glob("#{dir}/*.rb").each do |file|
pack = @packs_loader.load_from(config[:pack_path], file)
key = "#{get_source}**#{pack.name.downcase}**#{pack.version.presence || config[:version].split('.').first}"
if pack_map.has_key?(key)
ui.error("Conflict of pack source-name-version: #{key} is defined in #{file} and #{pack_map[key]}")
exit 1
else
pack_map[key] = file
end
end
end
end
private
def get_source
config[:register]
end
def get_packs_ns
"#{Chef::Config[:nspath]}/#{get_source}/packs"
end
def get_pack_ns(pack)
"#{get_packs_ns}/#{pack.name}/#{pack.version}"
end
def sync_pack(file, comments)
@existing_pack_ci_map ||= Cms::Ci.all(:params => {:nsPath => get_packs_ns,
:ciClassName => 'mgmt.Pack'}).
inject({}) {|h, p| h[p.ciName.downcase] = p; h}
pack = @packs_loader.load_from(config[:pack_path], file)
pack_ci = @existing_pack_ci_map[pack.name.downcase]
pack.name(pack_ci ? pack_ci.ciName : pack.name.downcase) # This kludge is deal with legacy problem of some existing packs loaded but not converted to down case.
if pack.ignore
ui.info("Ignoring pack #{pack.name} version #{pack.version.presence || config[:version]}")
return false
elsif config[:semver] || pack.semver?
signature = sync_pack_semver(pack, comments)
else
signature = sync_pack_no_semver(pack, comments)
end
sync_docs(pack)
ui.info("Successfully synched pack #{pack.name} version #{pack.version} #{"[signature: #{signature}]" if signature}".green)
return signature
end
def sync_pack_semver(pack, comments)
ui.info("\n--------------------------------------------------")
ui.info(" #{pack.name} #{pack.version} ".blue(true))
ui.info('--------------------------------------------------')
if config[:reload]
ui.warn('Reload option is not available in semver mode, all pack versions are '\
'immutable. If you need to force a new patch version, make a change in '\
'the pack (i.e. pack description) or specify patch version explicitly.')
end
signature = check_pack_version_ver_update(pack)
return false unless signature # If pack signature matches nothing to do.
Log.debug(pack.to_yaml) if Log.debug?
version_ci = setup_pack_version(pack, comments, signature)
begin
ns = get_pack_ns(pack)
# Upload design template
sync_env(ns, 'mgmt.catalog', pack, '_default', pack.design_resources, comments)
# Upload manifest templates
pack.environments.each do |env, _|
setup_mode(pack, env, comments)
sync_env("#{ns}/#{env}", 'mgmt.manifest', pack, env, pack.environment_resources(env), comments)
end
rescue Exception => e
ui.error(e.message)
ui.info('Attempting to clean up...')
begin
version_ci.destroy
rescue Exception
ui.warn("Failed to clean up pack #{pack.name} version #{pack.version}!")
end
raise e
end
return signature
end
def sync_pack_no_semver(pack, comments)
signature = Digest::MD5.hexdigest(pack.signature)
pack.version((pack.version.presence || config[:version]).split('.').first) # default to the global knife version if not specified
ui.info("\n--------------------------------------------------")
ui.info(" #{pack.name} ver.#{pack.version} ".blue(true))
ui.info('--------------------------------------------------')
pack_ci = @existing_pack_ci_map[pack.name.downcase]
if pack_ci && config[:clean]
@existing_pack_ci_map.delete(pack.name.downcase)
pack_ci.destroy
end
# If pack signature matches but reload option is not set - bail
return false if !config[:reload] && check_pack_version_no_ver_update(pack, signature)
Log.debug(pack.to_yaml) if Log.debug?
# First, check to see if anything from CMS need to flip to pending_deletion
fix_delta_cms(pack)
version_ci = setup_pack_version(pack, comments, '')
ns = get_pack_ns(pack)
# Upload design template
sync_env(ns, 'mgmt.catalog', pack, '_default', pack.design_resources, comments)
# Upload manifest templates
pack.environments.each do |env, _|
setup_mode(pack, env, comments)
sync_env("#{ns}/#{env}", 'mgmt.manifest', pack, env, pack.environment_resources(env), comments)
end
version_ci.ciAttributes.commit = signature
unless save(version_ci)
ui.warn("Failed to update signature for pack #{pack.name} version #{pack.version}")
end
return signature
end
def fix_delta_cms(pack)
nsPath = get_pack_ns(pack)
cmsEnvs = ['_default'] + Cms::Ci.all(:params => {:nsPath => nsPath, :ciClassName => 'mgmt.Mode'}).map(&:ciName)
cmsEnvs.each do |env|
relations = fix_rels_from_cms(pack, env)
fix_ci_from_cms(pack, env, relations, cmsEnvs)
end
end
def fix_rels_from_cms(pack, env = '_default')
pack_rels = pack.relations
target_rels = []
scope = (env == '_default') ? '' : "/#{env}"
Cms::Relation.all(:params => {:nsPath => "#{get_pack_ns(pack)}#{scope}",
:includeToCi => true,
:includeFromCi => true}).each do |r|
new_state = nil
fromCiName = r.fromCi.ciName
toCiName = r.toCi.ciName
relationShort = r.relationName.split('.').last
key = "#{fromCiName}::#{relationShort.scan(/[A-Z][a-z]+/).join('_').downcase}::#{toCiName}"
exists_in_pack = pack_rels.include?(key)
# Search through resource to determine if relation exists or not
unless exists_in_pack
case relationShort
when 'Payload'
exists_in_pack = pack.resources[fromCiName] && pack.resources[fromCiName].include?('payloads') &&
pack.resources[fromCiName]['payloads'].include?(toCiName)
when 'WatchedBy'
exists_in_pack = pack.resources[fromCiName] && pack.resources[fromCiName].include?('monitors') &&
pack.resources[fromCiName]['monitors'].include?(toCiName)
when 'Requires'
exists_in_pack = pack.resources[fromCiName] && pack.resources[toCiName]
when 'Entrypoint'
exists_in_pack = pack.entrypoints.include?(toCiName)
end
end
target_rels.push(toCiName) if exists_in_pack && !target_rels.include?(toCiName)
if exists_in_pack && r.relationState == 'pending_deletion'
new_state = 'default'
elsif !exists_in_pack && r.relationState != 'pending_deletion'
new_state = 'pending_deletion'
end
if new_state
r.relationState = new_state
if save(r)
ui.debug("Successfuly updated ciRelationState to #{new_state} #{r.relationName} #{r.fromCi.ciName} <-> #{r.toCi.ciName} for #{env}")
else
ui.error("Failed to update ciRelationState to #{new_state} #{r.relationName} #{r.fromCi.ciName} <-> #{r.toCi.ciName} for #{env}")
end
end
end
target_rels
end
def fix_ci_from_cms(pack, env, relations, environments)
scope = (env == '_default') ? '' : "/#{env}"
pack_resources = pack.resources
Cms::Ci.all(:params => {:nsPath => "#{get_pack_ns(pack)}#{scope}"}).each do |resource|
new_state = nil
exists_in_pack = pack_resources.include?(resource.ciName) || relations.include?(resource.ciName) || environments.include?(resource.ciName)
if exists_in_pack && resource.ciState == 'pending_deletion'
new_state = 'default'
elsif !exists_in_pack && resource.ciState != 'pending_deletion'
new_state = 'pending_deletion'
end
if new_state
resource.ciState = new_state
if save(resource)
ui.debug("Successfuly updated ciState to #{new_state} for #{resource.ciName} for #{env}")
else
ui.error("Failed to update ciState to #{new_state} for #{resource.ciName} for #{env}")
end
end
end
end
def check_pack_version_ver_update(pack)
all_versions = Cms::Ci.all(:params => {:nsPath => "#{get_packs_ns}/#{pack.name}",
:ciClassName => 'mgmt.Version',
:includeAltNs => VISIBILITY_ALT_NS_TAG})
major, minor, patch = (pack.version.blank? ? config[:version] : pack.version).split('.')
minor = '0' if minor.blank?
# Need to filter version for the same major and find latest patch version for the same minor.
latest_patch = nil
latest_patch_number = -1
versions = all_versions.select do |ci_v|
split = ci_v.ciName.split('.')
if major == split[0] && minor == split[1] && split[2].to_i > latest_patch_number
latest_patch = ci_v
latest_patch_number = split[2].to_i
end
major == split[0]
end
if versions.size > 0
version_ci = latest_patch || versions.sort_by(&:ciName).last
# Carry over 'enable' and 'visibility' from the latest patch or latest version overall.
pack.enabled(version_ci.ciAttributes.attributes['enabled'] != 'false')
pack.visibility(version_ci.altNs.attributes[VISIBILITY_ALT_NS_TAG])
end
if patch.present?
# Check to make sure version does not already exist.
version = "#{major}.#{minor}.#{patch}"
if versions.find {|ci_v| ci_v.ciName == version}
ui.warn("Pack #{pack.name} version #{pack.version} explicitly specified but it already exists, ignore it - will SKIP pack loading, but will try to update docs.")
return nil
else
pack.version(version)
ui.info("Pack #{pack.name} version #{pack.version} explicitly specified and it does not exist yet, will load.")
return pack.signature
end
else
ui.info("Pack #{pack.name} version #{pack.version} - patch version is not explicitly specified, continue with checking for latest patch version for it.")
end
if latest_patch
pack.version(latest_patch.ciName)
signature = pack.signature
if latest_patch.ciAttributes.attributes['commit'] == signature
ui.info("Pack #{pack.name} latest patch version #{latest_patch.ciName} matches signature (#{signature}), will skip pack loading, but will try to update docs.")
return nil
else
ui.info("Pack #{pack.name} latest patch version #{latest_patch.ciName} signature is different from new pack signature #{signature}, will increment patch version and load.")
pack.version("#{major}.#{minor}.#{latest_patch.ciName.split('.')[2].to_i + 1}")
return pack.signature
end
else
ui.info("No patches found for #{pack.name} version #{major}.#{minor}, start at patch 0 and load.")
pack.version("#{major}.#{minor}.0")
return pack.signature
end
end
def check_pack_version_no_ver_update(pack, signature)
pack_version = Cms::Ci.first(:params => {:nsPath => "#{get_packs_ns}/#{pack.name}", :ciClassName => 'mgmt.Version', :ciName => pack.version})
if pack_version.nil?
ui.info("Pack #{pack.name} version #{pack.version} not found")
return false
else
if pack_version.ciAttributes.attributes.key?('commit') && pack_version.ciAttributes.commit == signature
ui.info("Pack #{pack.name} version #{pack.version} matches signature #{signature}, use --reload to force load.")
return true
else
ui.warn("Pack #{pack.name} version #{pack.version} signature is different from file signature #{signature}")
return false
end
end
end
def setup_pack_version(pack, comments, signature)
pack_ci = @existing_pack_ci_map[pack.name.downcase]
packs_ns = get_packs_ns
if pack_ci
ui.debug("Updating pack #{pack.name}")
else
ui.info("Creating pack CI #{pack.name}")
pack_ci = build('Cms::Ci',
:nsPath => packs_ns,
:ciClassName => 'mgmt.Pack',
:ciName => pack.name)
end
pack_ci.comments = comments
pack_ci.ciAttributes.pack_type = pack.type
pack_ci.ciAttributes.description = pack.description
pack_ci.ciAttributes.category = pack.category
pack_ci.ciAttributes.owner = pack.owner
if save(pack_ci)
ui.debug("Successfuly saved pack CI #{pack.name}")
@existing_pack_ci_map[pack.name.downcase] = pack_ci
pack_version = Cms::Ci.first(:params => {:nsPath => "#{packs_ns}/#{pack.name}",
:ciClassName => 'mgmt.Version',
:ciName => pack.version})
if pack_version
ui.debug("Updating pack CI #{pack.name} version #{pack.version}")
else
ui.info("Creating pack CI #{pack.name} version #{pack.version}")
pack_version = build('Cms::Ci',
:nsPath => "#{packs_ns}/#{pack.name}",
:ciClassName => 'mgmt.Version',
:ciName => pack.version,
:ciAttributes => {:enabled => pack.enabled},
:altNs => {VISIBILITY_ALT_NS_TAG => pack.visibility})
end
pack_version.comments = comments
pack_version.ciAttributes.description = pack.description
pack_version.ciAttributes.commit = signature
if save(pack_version)
ui.debug("Successfuly saved pack version CI for: #{pack.name} #{pack.version}")
return pack_version
else
ui.error("Could not save pack version CI for: #{pack.name} #{pack.version}")
end
else
ui.error("Could not save pack CI #{pack.name}")
end
message = "Unable to setup namespace for pack #{pack.name} version #{pack.version}"
raise Exception.new(message)
end
def setup_mode(pack, env, comments)
ns = get_pack_ns(pack)
mode = Cms::Ci.first(:params => {:nsPath => ns, :ciClassName => 'mgmt.Mode', :ciName => env})
if mode
ui.debug("Updating pack #{pack.name} version #{pack.version} environment mode #{env}")
else
ui.info("Creating pack #{pack.name} version #{pack.version} environment mode #{env}")
mode = build('Cms::Ci',
:nsPath => ns,
:ciClassName => 'mgmt.Mode',
:ciName => env)
end
mode.comments = comments
mode.ciAttributes.description = pack.description
if save(mode)
ui.debug("Successfuly saved pack mode CI #{env}")
return mode
else
message = "Unable to setup environment namespace for pack #{pack.name} version #{pack.version} environment mode #{env}"
ui.error(message)
raise Exception.new(message)
end
end
def sync_env(ns_path, package, pack, env, resources, comments)
ui.info("======> #{env == '_default' ? 'design' : env}")
Log.debug([pack.name, pack.version, package, ns_path, resources, comments].to_yaml) if Log.debug?
platform = sync_platform(ns_path, package, pack, comments)
if platform
components = sync_components(package, ns_path, platform, resources, comments)
%w(DependsOn ManagedVia SecuredBy).each do |relation_name|
sync_relations(relation_name, package, ns_path, pack.env_relations(env, relation_name), components)
end
upload_template_entrypoint(ns_path, pack, resources, components, platform, env)
upload_template_procedures(ns_path, pack, platform, env)
upload_template_variables(ns_path, pack, package, platform, env)
upload_template_policies(ns_path, pack, package, env)
sync_monitors(package, ns_path, resources, components)
sync_payloads(ns_path, resources, components) if package == 'mgmt.manifest'
end
end
def sync_platform(nspath, package, pack, comments)
ci_class_name = "#{package}.#{pack.type.capitalize}"
platform = Cms::Ci.first(:params => {:nsPath => nspath,
:ciClassName => ci_class_name,
:ciName => pack.name})
if platform
ui.debug("Updating #{ci_class_name}")
else
ui.info("Creating #{ci_class_name}")
platform = build('Cms::Ci',
:nsPath => nspath,
:ciClassName => ci_class_name,
:ciName => pack.name)
end
plat_attrs = pack.platform && pack.platform[:attributes]
if plat_attrs
attrs = platform.ciAttributes.attributes
attrs.each {|name, _| attrs[name] = plat_attrs[name] if plat_attrs.has_key?(name)}
end
platform.comments = comments
platform.ciAttributes.description = pack.description
platform.ciAttributes.source = get_source
platform.ciAttributes.pack = pack.name
platform.ciAttributes.version = pack.version
if save(platform)
ui.debug("Successfuly saved #{ci_class_name}")
return platform
else
ui.error("Could not save #{ci_class_name}, skipping pack")
return false
end
end
def sync_components(package, ns_path, platform, resources, comments)
relations = []
existing = Cms::Relation.all(:params => {:ciId => platform.ciId,
:direction => 'from',
:relationShortName => 'Requires',
:includeToCi => true})
resources.each do |resource_name, resource|
class_name_parts = resource[:cookbook].split('.')
class_name_parts[-1] = class_name_parts[-1].capitalize
class_name_parts = class_name_parts.unshift(resource[:source]) if resource[:source]
class_name_parts = class_name_parts.unshift(package)
ci_class_name = class_name_parts.join('.')
relation = existing.find {|r| r.toCi.ciName == resource_name && r.toCi.ciClassName == ci_class_name}
if relation
ui.debug("Updating resource #{resource_name}")
else
ui.info("Creating resource #{resource_name}")
relation = build('Cms::Relation',
:relationName => 'mgmt.Requires',
:nsPath => ns_path,
:fromCiId => platform.ciId,
:toCiId => 0,
:toCi => build('Cms::Ci',
:nsPath => ns_path,
:ciClassName => ci_class_name,
:ciName => resource_name))
end
relation.comments = comments
relation.toCi.comments = comments
relation.relationAttributes.template = resource_name # default value for template attribute is the resource name
requires_attrs = resource[:requires]
if requires_attrs
attrs = relation.relationAttributes.attributes
attrs.each {|name, _| attrs[name] = requires_attrs[name] if requires_attrs[name]}
end
component_attrs = resource[:attributes]
if component_attrs
attrs = relation.toCi.ciAttributes.attributes
attrs.each {|name, _| attrs[name] = component_attrs[name] if component_attrs.has_key?(name)}
end
relations << relation
end
relations, error = Cms::Relation.bulk(relations)
unless relations
ui.error("Could not save components: #{error}")
raise(error)
end
ui.info("synced #{relations.size} components")
return relations.inject({}) {|h, r| h[r.toCi.ciName] = r.toCiId; h}
end
def sync_relations(short_name, package, ns_path, pack_rels, components)
relation_name = "#{package}.#{short_name}"
existing_rels = Cms::Relation.all(:params => {:nsPath => ns_path,
:relationName => relation_name})
relations = pack_rels.inject([]) do |rels_to_save, pack_rel|
from = pack_rel[:from_resource]
to = pack_rel[:to_resource]
from_id = components[from]
to_id = components[to]
problems = []
problems << "component #{from} not found" unless from_id
problems << "component #{to} not found" unless to_id
if problems.present?
ui.warn("Can't process #{short_name} from #{from} to #{to}: #{problems.join('; ')}")
next rels_to_save
end
relation = rels_to_save.find {|d| d.fromCiId == from_id && d.toCiId == to_id}
if relation
ui.debug("Updating again #{short_name} from #{from} to #{to}")
else
relation = existing_rels.find {|d| d.fromCiId == from_id && d.toCiId == to_id}
if relation
ui.debug("Updating #{short_name} from #{from} to #{to}")
else
ui.info("Creating #{short_name} between #{from} to #{to}")
relation = build('Cms::Relation',
:relationName => relation_name,
:nsPath => ns_path,
:fromCiId => from_id,
:toCiId => to_id)
end
rels_to_save << relation
end
relation.merge_attributes(pack_rel[:attributes])
rels_to_save
end
if relations.present?
relations, error = Cms::Relation.bulk(relations)
unless relations
ui.error("Could not save #{short_name} relations: #{error}")
raise(error)
end
ui.info("synched #{relations.size} #{short_name} relations")
end
end
def upload_template_entrypoint(nspath, pack, resources, components, platform, env)
relation_name = 'mgmt.Entrypoint'
relations = Cms::Relation.all(:params => {:ciId => platform.ciId,
:nsPath => nspath,
:direction => 'from',
:relationName => relation_name})
resources.each do |resource_name, _|
next unless pack.environment_entrypoints(env)[resource_name]
entrypoint = relations.find {|r| r.toCi.ciId == components[resource_name]}
if entrypoint
ui.debug("Updating entrypoint between platform and #{resource_name}")
else
ui.info("Creating entrypoint between platform and #{resource_name}")
entrypoint = build('Cms::Relation',
:relationName => relation_name,
:nsPath => nspath,
:fromCiId => platform.ciId,
:toCiId => components[resource_name])
end
entrypoint_attrs = pack.entrypoints[resource_name]['attributes']
attrs = entrypoint.relationAttributes.attributes
attrs.each {|name, __| attrs[name] = entrypoint_attrs[name] if entrypoint_attrs[name]}
if save(entrypoint)
ui.debug("Successfuly saved entrypoint between platform and #{resource_name}")
else
ui.error("Could not save entrypoint between platform and #{resource_name}, skipping it")
end
end
end
def sync_monitors(package, ns_path, resources, components)
relation_name = "#{package}.WatchedBy"
ci_class_name = "#{package}.Monitor"
relations = Cms::Relation.all(:params => {:nsPath => ns_path,
:relationName => relation_name,
:includeToCi => true}).to_a
resources.each do |resource_name, resource|
next unless resource[:monitors]
resource[:monitors].each do |monitor_name, monitor|
relation = relations.find {|r| r.fromCiId == components[resource_name] && r.toCi.ciName == monitor_name}
if relation
ui.debug("Updating monitor #{monitor_name} for #{resource_name} in #{package}")
else
ui.info("Creating monitor #{monitor_name} for #{resource_name}")
relation = build('Cms::Relation',
:relationName => relation_name,
:nsPath => ns_path,
:fromCiId => components[resource_name])
# For legacy reasons, we might have monitors with same name, so several components
# link (via relation) to the same CI in the pack template. Therefore,
# monitor CI may already exists.
duplicate_ci_name_rel = relations.find {|r| r.toCi.ciName == monitor_name}
if duplicate_ci_name_rel
ui.warn("Monitor #{monitor_name} for component #{resource_name} is not uniquely named, will re-use existing monitor CI with the same name")
relation.toCiId = duplicate_ci_name_rel.toCiId
if save(relation)
relation.toCi = duplicate_ci_name_rel.toCi
else
ui.error("Could not create WatchedBy relation #{monitor_name} for #{resource_name}, skipping it")
next
end
else
relation.toCiId = 0
relation.toCi = build('Cms::Ci',
:nsPath => ns_path,
:ciClassName => ci_class_name,
:ciName => monitor_name)
end
relations << relation
end
attrs = relation.toCi.ciAttributes.attributes
attrs.each do |name, _|
if monitor[name]
monitor[name] = monitor[name].to_json if monitor[name].is_a?(Hash)
attrs[name] = monitor[name]
end
end
if save(relation)
ui.debug("Successfuly saved monitor #{monitor_name} for #{resource_name} in #{package}")
else
ui.error("Could not save monitor #{monitor_name} for #{resource_name}, skipping it")
end
end
end
end
def sync_payloads(ns_path, resources, components)
relation_name = 'mgmt.manifest.Payload'
ci_class_name = 'mgmt.manifest.Qpath'
relations = Cms::Relation.all(:params => {:nsPath => ns_path,
:relationName => relation_name,
:targetClassName => ci_class_name,
:includeToCi => true})
existing_rels = relations.inject({}) {|h, r| h[r.toCi.ciName.downcase] = r; h}
resources.each do |resource_name, resource|
next unless resource[:payloads]
resource[:payloads].each do |payload_name, payload|
relation = relations.find {|r| r.toCi.ciName == payload_name && r.fromCiId == components[resource_name]}
# For legacy reasons, we might have payloads with same name, so several components
# link (via relation) to the same pyaload CI in the pack template. Therefore,
# payload CI may already exists.
duplicate_ci_name_rel = existing_rels[payload_name.downcase]
if duplicate_ci_name_rel && (!relation || relation.fromCiId != duplicate_ci_name_rel.fromCiId)
ui.warn("Payload #{payload_name} for component #{resource_name} is not uniquely named, will re-use existing payload CI with the same name")
end
if relation
ui.debug("Updating payload #{payload_name} for #{resource_name}")
else
ui.info("Creating payload #{payload_name} for #{resource_name}")
relation = build('Cms::Relation',
:relationName => relation_name,
:nsPath => ns_path,
:fromCiId => components[resource_name])
if duplicate_ci_name_rel
relation.toCiId = duplicate_ci_name_rel.toCiId
unless save(relation)
ui.error("Could not create Payload relation #{payload_name} for #{resource_name}, skipping it")
next
end
relation.toCi = duplicate_ci_name_rel.toCi
else
relation.toCiId = 0
relation.toCi = build('Cms::Ci',
:nsPath => ns_path,
:ciClassName => ci_class_name,
:ciName => payload_name)
end
end
attrs = relation.toCi.ciAttributes.attributes
attrs.each {|name, _| attrs[name] = payload[name] if payload[name]}
if save(relation)
existing_rels[payload_name.downcase] = relation unless duplicate_ci_name_rel
ui.debug("Successfuly saved payload #{payload_name} for #{resource_name}")
else
ui.error("Could not save payload #{payload_name} for #{resource_name}, skipping it")
end
end
end
end
def upload_template_procedures(nspath, pack, platform, env)
relation_name = 'mgmt.manifest.ControlledBy'
ci_class_name = 'mgmt.manifest.Procedure'
relations = Cms::Relation.all(:params => {:ciId => platform.ciId,
:nsPath => nspath,
:direction => 'from',
:relationName => relation_name,
:targetClassName => ci_class_name,
:includeToCi => true})
pack.environment_procedures(env).each do |procedure_name, procedure_attributes|
relation = relations.find {|r| r.toCi.ciName == procedure_name}
if relation
ui.debug("Updating procedure #{procedure_name} for environment #{env}")
else
ui.info("Creating procedure #{procedure_name} for environment #{env}")
relation = build('Cms::Relation',
:relationName => relation_name,
:nsPath => nspath,
:fromCiId => platform.ciId,
:toCiId => 0,
:toCi => build('Cms::Ci',
:nsPath => nspath,
:ciClassName => ci_class_name,
:ciName => procedure_name))
end
attrs = relation.toCi.ciAttributes.attributes
attrs.each do |name, _|
if procedure_attributes[name]
if name == 'arguments' && procedure_attributes[name].is_a?(Hash)
procedure_attributes[name] = procedure_attributes[name].to_json
end
attrs[name] = procedure_attributes[name]
end
end
if save(relation)
ui.debug("Successfuly saved procedure #{procedure_name} for environment #{env}")
else
ui.error("Could not save procedure #{procedure_name} for environment #{env}, skipping it")
end
end
end
def upload_template_variables(nspath, pack, package, platform, env)
relation_name = "#{package}.ValueFor"
ci_class_name = "#{package}.Localvar"
relations = Cms::Relation.all(:params => {:ciId => platform.ciId,
:direction => 'to',
:relationName => relation_name,
:targetClassName => ci_class_name,
:includeFromCi => true})
pack.environment_variables(env).each do |variable_name, var_attrs|
relation = relations.find {|r| r.fromCi.ciName == variable_name}
if relation
ui.debug("Updating variable #{variable_name} for environment #{env}")
else
ui.info("Creating variable #{variable_name} for environment #{env}")
relation = build('Cms::Relation',
:relationName => relation_name,
:nsPath => nspath,
:toCiId => platform.ciId,
:fromCiId => 0,
:fromCi => build('Cms::Ci',
:nsPath => nspath,
:ciClassName => ci_class_name,
:ciName => variable_name))
end
attrs = relation.fromCi.ciAttributes.attributes
attrs.each {|name, _| attrs[name] = var_attrs[name] if var_attrs[name]}
if save(relation)
ui.debug("Successfuly saved variable #{variable_name} for environment #{env}")
else
ui.error("Could not save variable #{variable_name} for environment #{env}, skipping it")
end
end
end
def upload_template_policies(nspath, pack, package, env)
ci_class_name = "#{package}.Policy"
policies = Cms::Ci.all(:params => {:nsPath => nspath,
:ciClassName => ci_class_name})
pack.environment_policies(env).each do |policy_name, policy_attrs|
policy = policies.find {|p| p.ciName == policy_name}
unless policy
policy = build('Cms::Ci',
:nsPath => nspath,
:ciClassName => ci_class_name,
:ciName => policy_name)
end
attrs = policy.ciAttributes.attributes
attrs.each {|name, _| attrs[name] = policy_attrs[name] if policy_attrs[name]}
if save(policy)
ui.debug("Successfuly saved policy #{policy_name} attributes for environment #{env} and #{pack}")
else
ui.error("Could not save policy #{policy_name} attributes for environment #{env} and #{pack}, skipping it")
end
end
end
def sync_docs(pack)
return unless sync_docs?
doc_dir = File.expand_path('doc', File.dirname(pack.filename))
files = Dir.glob("#{doc_dir}/#{pack.name}.*")
if files.present?
ui.info('docs and images:')
files.each {|file| sync_doc_file(file, file.gsub(doc_dir, "#{get_source}/packs/#{pack.name}/#{pack.version}"))}
end
end
end
end
end
| oneops/OneOps | oneops-admin/lib/chef/knife/pack_sync.rb | Ruby | apache-2.0 | 40,395 |
// Copyright 2014 Docker authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the DOCKER-LICENSE file.
package archive
import (
"archive/tar"
"bufio"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota
// Bzip2 is bzip2 compression algorithm.
Bzip2
// Gzip is gzip compression algorithm.
Gzip
// Xz is xz compression algorithm.
Xz
)
const (
modeISDIR = 040000 // Directory
modeISFIFO = 010000 // FIFO
modeISREG = 0100000 // Regular file
modeISLNK = 0120000 // Symbolic link
modeISBLK = 060000 // Block special file
modeISCHR = 020000 // Character special file
modeISSOCK = 0140000 // Socket
)
// Compression is the state represents if compressed or not.
type Compression int
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
}
return ""
}
// WhiteoutFormat is the format of whiteouts unpacked
type WhiteoutFormat int
// TarOptions wraps the tar options.
type TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ChownOpts *idtools.Identity
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
// For each include when creating an archive, the included name will be
// replaced with the matching name from this map.
RebaseNames map[string]string
InUserNS bool
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return nil, err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
go func() {
ta := newTarAppender(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter,
options.ChownOpts,
)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Errorf("Can't close pipe writer: %s", err)
}
}()
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
stat, err := os.Lstat(srcPath)
if err != nil {
return
}
if !stat.IsDir() {
// We can't later join a non-dir with any includes because the
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
}
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
return nil
}
if options.IncludeSourceDir && include == "." && relFilePath != "." {
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
}
skip := false
// If "include" is an exact match for the current file
// then even if there's an "excludePatterns" pattern that
// matches it, don't skip it. IOW, assume an explicit 'include'
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if include != relFilePath {
skip, err = pm.Matches(relFilePath)
if err != nil {
logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
}
if skip {
// If we want to skip this file and its a directory
// then we should first check to see if there's an
// excludes pattern (e.g. !dir/file) that starts with this
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir
if !pm.Exclusions() {
return filepath.SkipDir
}
dirSlash := relFilePath + string(filepath.Separator)
for _, pat := range pm.Patterns() {
if !pat.Exclusion() {
continue
}
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir
}
if seen[relFilePath] {
return nil
}
seen[relFilePath] = true
// Rename the base resource.
if rebaseName != "" {
var replacement string
if rebaseName != string(filepath.Separator) {
// Special case the root directory to replace with an
// empty string instead so that we don't end up with
// double slashes in the paths.
replacement = rebaseName
}
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
return err
}
}
return nil
})
}
}()
return pipeReader, nil
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
return writeBufWrapper, nil
case Gzip:
gzWriter := gzip.NewWriter(dest)
writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
return writeBufWrapper, nil
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
ConvertRead(*tar.Header, string) (bool, error)
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
// for hardlink mapping
SeenFiles map[uint64]string
IdentityMapping *idtools.IdentityMapping
ChownOpts *idtools.Identity
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
}
func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
return &tarAppender{
SeenFiles: make(map[uint64]string),
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
IdentityMapping: idMapping,
ChownOpts: chownOpts,
}
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
return err
}
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
// if it's not a directory and has more than 1 link,
// it's hard linked, so set the type flag accordingly
if !fi.IsDir() && hasHardlinks(fi) {
inode, err := getInodeFromStat(fi.Sys())
if err != nil {
return err
}
// a link should have a name that it links too
// and that linked name should be first in the tar archive
if oldpath, ok := ta.SeenFiles[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = oldpath
hdr.Size = 0 // This Must be here for the writer math to add up!
} else {
ta.SeenFiles[inode] = name
}
}
//check whether the file is overlayfs whiteout
//if yes, skip re-mapping container ID mappings.
isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host
if !isOverlayWhiteout &&
!strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
!ta.IdentityMapping.Empty() {
fileIdentity, err := getFileIdentity(fi.Sys())
if err != nil {
return err
}
hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIdentity)
if err != nil {
return err
}
}
// explicitly override with ChownOpts
if ta.ChownOpts != nil {
hdr.Uid = ta.ChownOpts.UID
hdr.Gid = ta.ChownOpts.GID
}
if ta.WhiteoutConverter != nil {
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
if err != nil {
return err
}
// If a new whiteout file exists, write original hdr, then
// replace hdr with wo to be written after. Whiteouts should
// always be written after the original. Note the original
// hdr may have been updated to be a whiteout with returning
// a whiteout header
if wo != nil {
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
}
hdr = wo
}
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
// We use system.OpenSequential to ensure we use sequential file
// access on Windows to avoid depleting the standby list.
// On Linux, this equates to a regular os.Open.
file, err := system.OpenSequential(path)
if err != nil {
return err
}
ta.Buffer.Reset(ta.TarWriter)
defer ta.Buffer.Reset(nil)
_, err = io.Copy(ta.Buffer, file)
file.Close()
if err != nil {
return err
}
err = ta.Buffer.Flush()
if err != nil {
return err
}
}
return nil
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
return nil
}
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return nil, err
}
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
name, err = canonicalTarName(name, fi.IsDir())
if err != nil {
return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
return nil, err
}
return hdr, nil
}
// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
// https://github.com/golang/go/commit/66b5a2f
func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
fm := fi.Mode()
switch {
case fm.IsRegular():
mode |= modeISREG
case fi.IsDir():
mode |= modeISDIR
case fm&os.ModeSymlink != 0:
mode |= modeISLNK
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
mode |= modeISCHR
} else {
mode |= modeISBLK
}
case fm&os.ModeNamedPipe != 0:
mode |= modeISFIFO
case fm&os.ModeSocket != 0:
mode |= modeISSOCK
}
return mode
}
// canonicalTarName provides a platform-independent and consistent posix-style
//path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
return "", err
}
// suffix with '/' for directories
if isDir && !strings.HasSuffix(name, "/") {
name += "/"
}
return name, nil
}
| rhatdan/cri-o | vendor/github.com/fsouza/go-dockerclient/internal/archive/archive.go | GO | apache-2.0 | 14,629 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.openwire.v4;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.activemq.openwire.*;
import org.apache.activemq.command.*;
/**
* Test case for the OpenWire marshalling for RemoveSubscriptionInfo
*
*
* NOTE!: This file is auto generated - do not modify!
* if you need to make a change, please see the modify the groovy scripts in the
* under src/gram/script and then use maven openwire:generate to regenerate
* this file.
*
*
*/
public class RemoveSubscriptionInfoTest extends BaseCommandTestSupport {
public static RemoveSubscriptionInfoTest SINGLETON = new RemoveSubscriptionInfoTest();
public Object createObject() throws Exception {
RemoveSubscriptionInfo info = new RemoveSubscriptionInfo();
populateObject(info);
return info;
}
protected void populateObject(Object object) throws Exception {
super.populateObject(object);
RemoveSubscriptionInfo info = (RemoveSubscriptionInfo) object;
info.setConnectionId(createConnectionId("ConnectionId:1"));
info.setSubscriptionName("SubcriptionName:2");
info.setClientId("ClientId:3");
}
}
| ryanemerson/activemq-artemis | tests/activemq5-unit-tests/src/test/java/org/apache/activemq/openwire/v4/RemoveSubscriptionInfoTest.java | Java | apache-2.0 | 2,049 |
/*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* {@code @CacheConfig} provides a mechanism for sharing common cache-related
* settings at the class level.
*
* <p>When this annotation is present on a given class, it provides a set
* of default settings for any cache operation defined in that class.
*
* @author Stephane Nicoll
* @author Sam Brannen
* @since 4.1
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface CacheConfig {
/**
* Names of the default caches to consider for caching operations defined
* in the annotated class.
* <p>If none is set at the operation level, these are used instead of the default.
* <p>May be used to determine the target cache (or caches), matching the
* qualifier value or the bean names of a specific bean definition.
*/
String[] cacheNames() default {};
/**
* The bean name of the default {@link org.springframework.cache.interceptor.KeyGenerator} to
* use for the class.
* <p>If none is set at the operation level, this one is used instead of the default.
* <p>The key generator is mutually exclusive with the use of a custom key. When such key is
* defined for the operation, the value of this key generator is ignored.
*/
String keyGenerator() default "";
/**
* The bean name of the custom {@link org.springframework.cache.CacheManager} to use to
* create a default {@link org.springframework.cache.interceptor.CacheResolver} if none
* is set already.
* <p>If no resolver and no cache manager are set at the operation level, and no cache
* resolver is set via {@link #cacheResolver}, this one is used instead of the default.
* @see org.springframework.cache.interceptor.SimpleCacheResolver
*/
String cacheManager() default "";
/**
* The bean name of the custom {@link org.springframework.cache.interceptor.CacheResolver} to use.
* <p>If no resolver and no cache manager are set at the operation level, this one is used
* instead of the default.
*/
String cacheResolver() default "";
}
| shivpun/spring-framework | spring-context/src/main/java/org/springframework/cache/annotation/CacheConfig.java | Java | apache-2.0 | 2,865 |
/*
* Copyright 2014 Avanza Bank AB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.avanza.astrix.beans.ft;
import com.avanza.astrix.beans.core.AstrixBeanKey;
import com.avanza.astrix.beans.core.BeanProxy;
import com.avanza.astrix.beans.service.ServiceBeanProxyFactory;
/**
*
* @author Elias Lindholm
*
*/
final class FaultToleranceServiceBeanProxyFactory implements ServiceBeanProxyFactory {
private final BeanFaultToleranceFactory ftFactory;
public FaultToleranceServiceBeanProxyFactory(BeanFaultToleranceFactory ftFactory) {
this.ftFactory = ftFactory;
}
@Override
public BeanProxy create(AstrixBeanKey<?> beanKey) {
return ftFactory.createFaultToleranceProxy(beanKey);
}
@Override
public int order() {
return 1;
}
}
| jensim/astrix | astrix-context/src/main/java/com/avanza/astrix/beans/ft/FaultToleranceServiceBeanProxyFactory.java | Java | apache-2.0 | 1,277 |
+++
date = "2016-03-06T21:28:07-06:00"
title = "Gareth Rushgrove"
type = "talk"
+++
## Rate of Change, (Un)opinionated Platforms and Devops Coevolution
There has been a recent explosion in new technologies for running software systems.
Many architectures are now based on ever smaller (micro) services.
The devops movement has crossed the chasm and become mainstream with operators and developers working more closely together than ever before.
These three threads are the result of coevolution - where a change in one practice has a direct effect on others.
Join us to gain insights on how the adoption of practices such as devops and microservices, and tools such as platform as a service, are interconnected. You'll also develop a better understanding of how devops emerged in widely different organisations. Last, but by no means least, we'll explore the power of coevolution which makes cause and effect bi-directional, making it possible to leverage what you do and how you do it to improve your organisation as a whole.
<script async class="speakerdeck-embed" data-id="6b6c0f54693147829eb767f53ce5d299" data-ratio="1.77777777777778" src="//speakerdeck.com/assets/embed.js"></script>
<iframe src="https://player.vimeo.com/video/165641911" width="640" height="360" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
<p><a href="https://vimeo.com/165641911">Rate of Change, (Un)opinionated Platforms and Devops Coevolution by Gareth Rushgrove</a> from <a href="https://vimeo.com/devopsdaysldn16">DevOpsDays London 2016</a> on <a href="https://vimeo.com">Vimeo</a>.</p>
| joelaha/devopsdays-web | content/events/2016-london/program/gareth-rushgrove.md | Markdown | apache-2.0 | 1,615 |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package f32_test
import (
"testing"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/math/f32"
)
func TestV4DSqrMagnitude(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
v f32.Vec4
r float32
}{
{f32.Vec4{0, 0, 0, 0}, 0},
{f32.Vec4{1, 0, 0, 0}, 1},
{f32.Vec4{0, 2, 0, 0}, 4},
{f32.Vec4{0, 0, -3, 0}, 9},
{f32.Vec4{0, 0, 0, -4}, 16},
{f32.Vec4{1, 1, 1, 1}, 4},
} {
assert.For("%v.SqrMagnitude", test.v).That(test.v.SqrMagnitude()).Equals(test.r)
}
}
func TestV4DMagnitude(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
v f32.Vec4
r float32
}{
{f32.Vec4{0, 0, 0, 0}, 0},
{f32.Vec4{1, 0, 0, 0}, 1},
{f32.Vec4{0, 2, 0, 0}, 2},
{f32.Vec4{0, 0, -3, 0}, 3},
{f32.Vec4{0, 0, 0, -4}, 4},
{f32.Vec4{1, 1, 1, 1}, 2},
} {
assert.For("%v.Magnitude", test.v).That(test.v.Magnitude()).Equals(test.r)
}
}
func TestV4DScale(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
v f32.Vec4
s float32
r f32.Vec4
}{
{f32.Vec4{1, 0, 0, 0}, -1, f32.Vec4{-1, 0, 0, 0}},
{f32.Vec4{0, 2, 0, 0}, -2, f32.Vec4{0, -4, 0, 0}},
{f32.Vec4{0, 0, 3, 0}, -3, f32.Vec4{0, 0, -9, 0}},
{f32.Vec4{0, 0, 0, 4}, -4, f32.Vec4{0, 0, 0, -16}},
{f32.Vec4{1, 1, 1, 1}, 0, f32.Vec4{0, 0, 0, 0}},
} {
assert.For("%v.Scale", test.v).That(test.v.Scale(test.s)).Equals(test.r)
}
}
func TestV4DNormalize(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
v f32.Vec4
r f32.Vec4
}{
{f32.Vec4{1, 0, 0, 0}, f32.Vec4{1, 0, 0, 0}},
{f32.Vec4{0, -2, 0, 0}, f32.Vec4{0, -1, 0, 0}},
{f32.Vec4{0, 0, 3, 0}, f32.Vec4{0, 0, 1, 0}},
{f32.Vec4{0, 0, 0, -4}, f32.Vec4{0, 0, 0, -1}},
{f32.Vec4{1, 2, -2, 4}, f32.Vec4{1. / 5, 2. / 5, -2. / 5, 4. / 5}},
} {
assert.For("%v.Normalize", test.v).That(test.v.Normalize()).Equals(test.r)
}
}
func TestV4DXYZ(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
v f32.Vec4
r f32.Vec3
}{
{f32.Vec4{0, 0, 0, 0}, f32.Vec3{0, 0, 0}},
{f32.Vec4{1, 2, 3, 4}, f32.Vec3{1, 2, 3}},
} {
assert.For("%v.V3D", test.v).That(test.v.XYZ()).Equals(test.r)
}
}
func TestAdd4D(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
a f32.Vec4
b f32.Vec4
r f32.Vec4
}{
{f32.Vec4{0, 0, 0, 0}, f32.Vec4{0, 0, 0, 0}, f32.Vec4{0, 0, 0, 0}},
{f32.Vec4{1, 2, 3, 4}, f32.Vec4{0, 0, 0, 0}, f32.Vec4{1, 2, 3, 4}},
{f32.Vec4{0, 0, 0, 0}, f32.Vec4{4, 3, 2, 1}, f32.Vec4{4, 3, 2, 1}},
{f32.Vec4{1, 2, 3, 4}, f32.Vec4{-1, -2, -3, -4}, f32.Vec4{0, 0, 0, 0}},
} {
assert.For("Add4D(%v, %v)", test.a, test.b).
That(f32.Add4D(test.a, test.b)).Equals(test.r)
}
}
func TestSub4D(t *testing.T) {
assert := assert.To(t)
for _, test := range []struct {
a f32.Vec4
b f32.Vec4
r f32.Vec4
}{
{f32.Vec4{0, 0, 0, 0}, f32.Vec4{0, 0, 0, 0}, f32.Vec4{0, 0, 0, 0}},
{f32.Vec4{1, 2, 3, 4}, f32.Vec4{0, 0, 0, 0}, f32.Vec4{1, 2, 3, 4}},
{f32.Vec4{0, 0, 0, 0}, f32.Vec4{4, 3, 2, 1}, f32.Vec4{-4, -3, -2, -1}},
{f32.Vec4{1, 2, 3, 4}, f32.Vec4{-1, -2, -3, -4}, f32.Vec4{2, 4, 6, 8}},
} {
assert.For("Sub4D(%v, %v)", test.a, test.b).
That(f32.Sub4D(test.a, test.b)).Equals(test.r)
}
}
| Qining/gapid | core/math/f32/vec4_test.go | GO | apache-2.0 | 3,798 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("535a68d1-d545-4e49-95e2-a5db29b76f20")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
| halcwb/FSharp.Formatting | src/CSharpFormat/Properties/LocalAssemblyInfo.cs | C# | apache-2.0 | 1,144 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Form Validation : CodeIgniter User Guide</title>
<style type='text/css' media='all'>@import url('../userguide.css');</style>
<link rel='stylesheet' type='text/css' media='all' href='../userguide.css' />
<script type="text/javascript" src="../nav/nav.js"></script>
<script type="text/javascript" src="../nav/prototype.lite.js"></script>
<script type="text/javascript" src="../nav/moo.fx.js"></script>
<script type="text/javascript" src="../nav/user_guide_menu.js"></script>
<meta http-equiv='expires' content='-1' />
<meta http-equiv= 'pragma' content='no-cache' />
<meta name='robots' content='all' />
<meta name='author' content='ExpressionEngine Dev Team' />
<meta name='description' content='CodeIgniter User Guide' />
</head>
<body>
<!-- START NAVIGATION -->
<div id="nav"><div id="nav_inner"><script type="text/javascript">create_menu('../');</script></div></div>
<div id="nav2"><a name="top"></a><a href="javascript:void(0);" onclick="myHeight.toggle();"><img src="../images/nav_toggle_darker.jpg" width="154" height="43" border="0" title="Toggle Table of Contents" alt="Toggle Table of Contents" /></a></div>
<div id="masthead">
<table cellpadding="0" cellspacing="0" border="0" style="width:100%">
<tr>
<td><h1>CodeIgniter User Guide Version 1.7.3</h1></td>
<td id="breadcrumb_right"><a href="../toc.html">Table of Contents Page</a></td>
</tr>
</table>
</div>
<!-- END NAVIGATION -->
<!-- START BREADCRUMB -->
<table cellpadding="0" cellspacing="0" border="0" style="width:100%">
<tr>
<td id="breadcrumb">
<a href="http://codeigniter.com/">CodeIgniter Home</a> ›
<a href="../index.html">User Guide Home</a> ›
Form Validation
</td>
<td id="searchbox"><form method="get" action="http://www.google.com/search"><input type="hidden" name="as_sitesearch" id="as_sitesearch" value="codeigniter.com/user_guide/" />Search User Guide <input type="text" class="input" style="width:200px;" name="q" id="q" size="31" maxlength="255" value="" /> <input type="submit" class="submit" name="sa" value="Go" /></form></td>
</tr>
</table>
<!-- END BREADCRUMB -->
<br clear="all" />
<!-- START CONTENT -->
<div id="content">
<p class="important">
This library has been deprecated. Use of the form_validation library is encouraged.
</p>
<h1>Form Validation</h1>
<p>Before explaining CodeIgniter's approach to data validation, let's describe the ideal scenario:</p>
<ol>
<li>A form is displayed.</li>
<li>You fill it in and submit it.</li>
<li>If you submitted something invalid, or perhaps missed a required item, the form is redisplayed containing your data along with an error message describing the problem.</li>
<li>This process continues until you have submitted a valid form.</li>
</ol>
<p>On the receiving end, the script must:</p>
<ol>
<li>Check for required data.</li>
<li>Verify that the data is of the correct type, and meets the correct criteria. (For example, if a username is submitted
it must be validated to contain only permitted characters. It must be of a minimum length,
and not exceed a maximum length. The username can't be someone else's existing username, or perhaps even a reserved word. Etc.)</li>
<li>Sanitize the data for security.</li>
<li>Pre-format the data if needed (Does the data need to be trimmed? HTML encoded? Etc.)</li>
<li>Prep the data for insertion in the database.</li>
</ol>
<p>Although there is nothing complex about the above process, it usually requires a significant
amount of code, and to display error messages, various control structures are usually placed within the form HTML.
Form validation, while simple to create, is generally very messy and tedious to implement.</p>
<dfn>CodeIgniter provides a comprehensive validation framework that truly minimizes the amount of code you'll write.
It also removes all control structures from your form HTML, permitting it to be clean and free of code.</dfn>
<h2>Overview</h2>
<p>In order to implement CodeIgniter's form validation you'll need three things:</p>
<ol>
<li>A <a href="../general/views.html">View</a> file containing the form.</li>
<li>A View file containing a "success" message to be displayed upon successful submission.</li>
<li>A <a href="../general/controllers.html">controller</a> function to receive and process the submitted data.</li>
</ol>
<p>Let's create those three things, using a member sign-up form as the example.</p>
<h2>The Form</h2>
<p>Using a text editor, create a form called <dfn>myform.php</dfn>. In it, place this code and save it to your <samp>applications/views/</samp>
folder:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="30"><html>
<head>
<title>My Form</title>
</head>
<body>
<?php echo $this->validation->error_string; ?>
<?php echo form_open('form'); ?>
<h5>Username</h5>
<input type="text" name="username" value="" size="50" />
<h5>Password</h5>
<input type="text" name="password" value="" size="50" />
<h5>Password Confirm</h5>
<input type="text" name="passconf" value="" size="50" />
<h5>Email Address</h5>
<input type="text" name="email" value="" size="50" />
<div><input type="submit" value="Submit" /></div>
</form>
</body>
</html>
</textarea>
<h2>The Success Page</h2>
<p>Using a text editor, create a form called <dfn>formsuccess.php</dfn>. In it, place this code and save it to your <samp>applications/views/</samp>
folder:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="14">
<html>
<head>
<title>My Form</title>
</head>
<body>
<h3>Your form was successfully submitted!</h3>
<p><?php echo anchor('form', 'Try it again!'); ?></p>
</body>
</html>
</textarea>
<h2>The Controller</h2>
<p>Using a text editor, create a controller called <dfn>form.php</dfn>. In it, place this code and save it to your <samp>applications/controllers/</samp>
folder:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="21"><?php
class Form extends Controller {
function index()
{
$this->load->helper(array('form', 'url'));
$this->load->library('validation');
if ($this->validation->run() == FALSE)
{
$this->load->view('myform');
}
else
{
$this->load->view('formsuccess');
}
}
}
?></textarea>
<h2>Try it!</h2>
<p>To try your form, visit your site using a URL similar to this one:</p>
<code>example.com/index.php/<var>form</var>/</code>
<p><strong>If you submit the form you should simply see the form reload. That's because you haven't set up any validation
rules yet, which we'll get to in a moment.</strong></p>
<h2>Explanation</h2>
<p>You'll notice several things about the above pages:</p>
<p>The <dfn>form</dfn> (myform.php) is a standard web form with a couple exceptions:</p>
<ol>
<li>It uses a <dfn>form helper</dfn> to create the form opening.
Technically, this isn't necessary. You could create the form using standard HTML. However, the benefit of using the helper
is that it generates the action URL for you, based on the URL in your config file. This makes your application more portable
and flexible in the event your URLs change.</li>
<li>At the top of the form you'll notice the following variable:
<code><?php echo $this->validation->error_string; ?></code>
<p>This variable will display any error messages sent back by the validator. If there are no messages it returns nothing.</p>
</li>
</ol>
<p>The <dfn>controller</dfn> (form.php) has one function: <dfn>index()</dfn>. This function initializes the validation class and
loads the <var>form helper</var> and <var>URL helper</var> used by your view files. It also <samp>runs</samp>
the validation routine. Based on
whether the validation was successful it either presents the form or the success page.</p>
<p><strong>Since you haven't told the validation class to validate anything yet, it returns "false" (boolean false) by default. The <samp>run()</samp>
function only returns "true" if it has successfully applied your rules without any of them failing.</strong></p>
<h2>Setting Validation Rules</h2>
<p>CodeIgniter lets you set as many validation rules as you need for a given field, cascading them in order, and it even lets you prep and pre-process the field data
at the same time. Let's see it in action, we'll explain it afterwards.</p>
<p>In your <dfn>controller</dfn> (form.php), add this code just below the validation initialization function:</p>
<code>$rules['username'] = "required";<br />
$rules['password'] = "required";<br />
$rules['passconf'] = "required";<br />
$rules['email'] = "required";<br />
<br />
$this->validation->set_rules($rules);</code>
<p>Your controller should now look like this:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="28"><?php
class Form extends Controller {
function index()
{
$this->load->helper(array('form', 'url'));
$this->load->library('validation');
$rules['username'] = "required";
$rules['password'] = "required";
$rules['passconf'] = "required";
$rules['email'] = "required";
$this->validation->set_rules($rules);
if ($this->validation->run() == FALSE)
{
$this->load->view('myform');
}
else
{
$this->load->view('formsuccess');
}
}
}
?></textarea>
<p><dfn>Now submit the form with the fields blank and you should see the error message.
If you submit the form with all the fields populated you'll see your success page.</dfn></p>
<p class="important"><strong>Note:</strong> The form fields are not yet being re-populated with the data when
there is an error. We'll get to that shortly, once we're through explaining the validation rules.</p>
<h2>Changing the Error Delimiters</h2>
<p>By default, the system adds a paragraph tag (<p>) around each error message shown. You can easily change these delimiters with
this code, placed in your controller:</p>
<code>$this->validation->set_error_delimiters('<kbd><div class="error"></kbd>', '<kbd></div></kbd>');</code>
<p>In this example, we've switched to using div tags.</p>
<h2>Cascading Rules</h2>
<p>CodeIgniter lets you pipe multiple rules together. Let's try it. Change your rules array like this:</p>
<code>$rules['username'] = "required|min_length[5]|max_length[12]";<br />
$rules['password'] = "required|matches[passconf]";<br />
$rules['passconf'] = "required";<br />
$rules['email'] = "required|valid_email";</code>
<p>The above code requires that:</p>
<ol>
<li>The username field be no shorter than 5 characters and no longer than 12.</li>
<li>The password field must match the password confirmation field.</li>
<li>The email field must contain a valid email address.</li>
</ol>
<p>Give it a try!</p>
<p class="important"><strong>Note:</strong> There are numerous rules available which you can read about in the validation reference.</p>
<h2>Prepping Data</h2>
<p>In addition to the validation functions like the ones we used above, you can also prep your data in various ways.
For example, you can set up rules like this:</p>
<code>$rules['username'] = "<kbd>trim</kbd>|required|min_length[5]|max_length[12]|<kbd>xss_clean</kbd>";<br />
$rules['password'] = "<kbd>trim</kbd>|required|matches[passconf]|<kbd>md5</kbd>";<br />
$rules['passconf'] = "<kbd>trim</kbd>|required";<br />
$rules['email'] = "<kbd>trim</kbd>|required|valid_email";</code>
<p>In the above example, we are "trimming" the fields, converting the password to MD5, and running the username through
the "xss_clean" function, which removes malicious data.</p>
<p class="important"><strong>Any native PHP function that accepts one parameter can be used as a rule, like <dfn>htmlspecialchars</dfn>,
<dfn>trim</dfn>, <dfn>MD5</dfn>, etc.</strong></p>
<p><strong>Note:</strong> You will generally want to use the prepping functions <strong>after</strong>
the validation rules so if there is an error, the original data will be shown in the form.</p>
<h2>Callbacks: Your own Validation Functions</h2>
<p>The validation system supports callbacks to your own validation functions. This permits you to extend the validation class
to meet your needs. For example, if you need to run a database query to see if the user is choosing a unique username, you can
create a callback function that does that. Let's create a simple example.</p>
<p>In your controller, change the "username" rule to this:</p>
<code>$rules['username'] = "callback_username_check"; </code>
<p>Then add a new function called <dfn>username_check</dfn> to your controller. Here's how your controller should look:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="44"><?php
class Form extends Controller {
function index()
{
$this->load->helper(array('form', 'url'));
$this->load->library('validation');
$rules['username'] = "callback_username_check";
$rules['password'] = "required";
$rules['passconf'] = "required";
$rules['email'] = "required";
$this->validation->set_rules($rules);
if ($this->validation->run() == FALSE)
{
$this->load->view('myform');
}
else
{
$this->load->view('formsuccess');
}
}
function username_check($str)
{
if ($str == 'test')
{
$this->validation->set_message('username_check', 'The %s field can not be the word "test"');
return FALSE;
}
else
{
return TRUE;
}
}
}
?></textarea>
<p>Reload your form and submit it with the word "test" as the username. You can see that the form field data was passed to your
callback function for you to process.</p>
<p><strong>To invoke a callback just put the function name in a rule, with "callback_" as the rule prefix.</strong></p>
<p>The error message was set using the <dfn>$this->validation->set_message</dfn> function.
Just remember that the message key (the first parameter) must match your function name.</p>
<p class="important"><strong>Note:</strong> You can apply your own custom error messages to any rule, just by setting the
message similarly. For example, to change the message for the "required" rule you will do this:</p>
<code>$this->validation->set_message('required', 'Your custom message here');</code>
<h2>Re-populating the form</h2>
<p>Thus far we have only been dealing with errors. It's time to repopulate the form field with the submitted data.
This is done similarly to your rules. Add the following code to your controller, just below your rules:</p>
<code>$fields['username'] = 'Username';<br />
$fields['password'] = 'Password';<br />
$fields['passconf'] = 'Password Confirmation';<br />
$fields['email'] = 'Email Address';<br />
<br />
$this->validation->set_fields($fields);</code>
<p>The array keys are the actual names of the form fields, the value represents the full name that you want shown in the
error message.</p>
<p>The index function of your controller should now look like this:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="30">function index()
{
$this->load->helper(array('form', 'url'));
$this->load->library('validation');
$rules['username'] = "required";
$rules['password'] = "required";
$rules['passconf'] = "required";
$rules['email'] = "required";
$this->validation->set_rules($rules);
$fields['username'] = 'Username';
$fields['password'] = 'Password';
$fields['passconf'] = 'Password Confirmation';
$fields['email'] = 'Email Address';
$this->validation->set_fields($fields);
if ($this->validation->run() == FALSE)
{
$this->load->view('myform');
}
else
{
$this->load->view('formsuccess');
}
}</textarea>
<p>Now open your <dfn>myform.php</dfn> view file and update the value in each field so that it has an attribute corresponding to its name:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="30">
<html>
<head>
<title>My Form</title>
</head>
<body>
<?php echo $this->validation->error_string; ?>
<?php echo form_open('form'); ?>
<h5>Username</h5>
<input type="text" name="username" value="<?php echo $this->validation->username;?>" size="50" />
<h5>Password</h5>
<input type="text" name="password" value="<?php echo $this->validation->password;?>" size="50" />
<h5>Password Confirm</h5>
<input type="text" name="passconf" value="<?php echo $this->validation->passconf;?>" size="50" />
<h5>Email Address</h5>
<input type="text" name="email" value="<?php echo $this->validation->email;?>" size="50" />
<div><input type="submit" value="Submit" /></div>
</form>
</body>
</html>
</textarea>
<p>Now reload your page and submit the form so that it triggers an error. Your form fields should be populated
and the error messages will contain a more relevant field name.</p>
<h2>Showing Errors Individually</h2>
<p>If you prefer to show an error message next to each form field, rather than as a list, you can change your form so that it looks like this:</p>
<textarea class="textarea" style="width:100%" cols="50" rows="20">
<h5>Username</h5>
<?php echo $this->validation->username_error; ?>
<input type="text" name="username" value="<?php echo $this->validation->username;?>" size="50" />
<h5>Password</h5>
<?php echo $this->validation->password_error; ?>
<input type="text" name="password" value="<?php echo $this->validation->password;?>" size="50" />
<h5>Password Confirm</h5>
<?php echo $this->validation->passconf_error; ?>
<input type="text" name="passconf" value="<?php echo $this->validation->passconf;?>" size="50" />
<h5>Email Address</h5>
<?php echo $this->validation->email_error; ?>
<input type="text" name="email" value="<?php echo $this->validation->email;?>" size="50" /></textarea>
<p>If there are no errors, nothing will be shown. If there is an error, the message will appear, wrapped in the delimiters you
have set (<p> tags by default).</p>
<p class="important"><strong>Note: </strong>To display errors this way you must remember to set your fields using the <kbd>$this->validation->set_fields</kbd>
function described earlier. The errors will be turned into variables that have "_error" after your field name.
For example, your "username" error will be available at:<br /><dfn>$this->validation->username_error</dfn>.</p>
<h2>Rule Reference</h2>
<p>The following is a list of all the native rules that are available to use:</p>
<table cellpadding="0" cellspacing="1" border="0" style="width:100%" class="tableborder">
<tr>
<th>Rule</th>
<th>Parameter</th>
<th>Description</th>
<th>Example</th>
</tr><tr>
<td class="td"><strong>required</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element is empty.</td>
<td class="td"> </td>
</tr><tr>
<td class="td"><strong>matches</strong></td>
<td class="td">Yes</td>
<td class="td">Returns FALSE if the form element does not match the one in the parameter.</td>
<td class="td">matches[form_item]</td>
</tr><tr>
<td class="td"><strong>min_length</strong></td>
<td class="td">Yes</td>
<td class="td">Returns FALSE if the form element is shorter then the parameter value.</td>
<td class="td">min_length[6]</td>
</tr><tr>
<td class="td"><strong>max_length</strong></td>
<td class="td">Yes</td>
<td class="td">Returns FALSE if the form element is longer then the parameter value.</td>
<td class="td">max_length[12]</td>
</tr><tr>
<td class="td"><strong>exact_length</strong></td>
<td class="td">Yes</td>
<td class="td">Returns FALSE if the form element is not exactly the parameter value.</td>
<td class="td">exact_length[8]</td>
</tr><tr>
<td class="td"><strong>alpha</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element contains anything other than alphabetical characters.</td>
<td class="td"> </td>
</tr><tr>
<td class="td"><strong>alpha_numeric</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element contains anything other than alpha-numeric characters.</td>
<td class="td"> </td>
</tr><tr>
<td class="td"><strong>alpha_dash</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element contains anything other than alpha-numeric characters, underscores or dashes.</td>
<td class="td"> </td>
</tr>
<tr>
<td class="td"><strong>numeric</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element contains anything other than numeric characters.</td>
<td class="td"> </td>
</tr>
<tr>
<td class="td"><strong>integer</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element contains anything other than an integer.</td>
<td class="td"> </td>
</tr><tr>
<td class="td"><strong>valid_email</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the form element does not contain a valid email address.</td>
<td class="td"> </td>
</tr>
<tr>
<td class="td"><strong>valid_emails</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if any value provided in a comma separated list is not a valid email.</td>
<td class="td"> </td>
</tr>
<tr>
<td class="td"><strong>valid_ip</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the supplied IP is not valid.</td>
<td class="td"> </td>
</tr>
<tr>
<td class="td"><strong>valid_base64</strong></td>
<td class="td">No</td>
<td class="td">Returns FALSE if the supplied string contains anything other than valid Base64 characters.</td>
<td class="td"> </td>
</tr>
</table>
<p><strong>Note:</strong> These rules can also be called as discrete functions. For example:</p>
<code>$this->validation->required($string);</code>
<p class="important"><strong>Note:</strong> You can also use any native PHP functions that permit one parameter.</p>
<h2>Prepping Reference</h2>
<p>The following is a list of all the prepping functions that are available to use:</p>
<table cellpadding="0" cellspacing="1" border="0" style="width:100%" class="tableborder">
<tr>
<th>Name</th>
<th>Parameter</th>
<th>Description</th>
</tr><tr>
<td class="td"><strong>xss_clean</strong></td>
<td class="td">No</td>
<td class="td">Runs the data through the XSS filtering function, described in the <a href="input.html">Input Class</a> page.</td>
</tr><tr>
<td class="td"><strong>prep_for_form</strong></td>
<td class="td">No</td>
<td class="td">Converts special characters so that HTML data can be shown in a form field without breaking it.</td>
</tr><tr>
<td class="td"><strong>prep_url</strong></td>
<td class="td">No</td>
<td class="td">Adds "http://" to URLs if missing.</td>
</tr><tr>
<td class="td"><strong>strip_image_tags</strong></td>
<td class="td">No</td>
<td class="td">Strips the HTML from image tags leaving the raw URL.</td>
</tr><tr>
<td class="td"><strong>encode_php_tags</strong></td>
<td class="td">No</td>
<td class="td">Converts PHP tags to entities.</td>
</tr>
</table>
<p class="important"><strong>Note:</strong> You can also use any native PHP functions that permit one parameter,
like <kbd>trim</kbd>, <kbd>htmlspecialchars</kbd>, <kbd>urldecode</kbd>, etc.</p>
<h2>Setting Custom Error Messages</h2>
<p>All of the native error messages are located in the following language file: <dfn>language/english/validation_lang.php</dfn></p>
<p>To set your own custom message you can either edit that file, or use the following function:</p>
<code>$this->validation->set_message('<var>rule</var>', '<var>Error Message</var>');</code>
<p>Where <var>rule</var> corresponds to the name of a particular rule, and <var>Error Message</var> is the text you would like displayed.</p>
<h2>Dealing with Select Menus, Radio Buttons, and Checkboxes</h2>
<p>If you use select menus, radio buttons or checkboxes, you will want the state of
these items to be retained in the event of an error. The Validation class has three functions that help you do this:</p>
<h2>set_select()</h2>
<p>Permits you to display the menu item that was selected. The first parameter
must contain the name of the select menu, the second parameter must contain the value of
each item. Example:</p>
<code>
<select name="myselect"><br />
<option value="one" <dfn><?php echo $this->validation->set_select('myselect', 'one'); ?></dfn> >One</option><br />
<option value="two" <dfn><?php echo $this->validation->set_select('myselect', 'two'); ?></dfn> >Two</option><br />
<option value="three" <dfn><?php echo $this->validation->set_select('myselect', 'three'); ?></dfn> >Three</option><br />
</select>
</code>
<h2>set_checkbox()</h2>
<p>Permits you to display a checkbox in the state it was submitted. The first parameter
must contain the name of the checkbox, the second parameter must contain its value. Example:</p>
<code><input type="checkbox" name="mycheck" value="1" <dfn><?php echo $this->validation->set_checkbox('mycheck', '1'); ?></dfn> /></code>
<h2>set_radio()</h2>
<p>Permits you to display radio buttons in the state they were submitted. The first parameter
must contain the name of the radio button, the second parameter must contain its value. Example:</p>
<code><input type="radio" name="myradio" value="1" <dfn><?php echo $this->validation->set_radio('myradio', '1'); ?></dfn> /></code>
</div>
<!-- END CONTENT -->
<div id="footer">
<p>
Previous Topic: <a href="user_agent.html">User Agent Class</a>
·
<a href="#top">Top of Page</a> ·
<a href="../index.html">User Guide Home</a> ·
Next Topic: <a href="xmlrpc.html">XML-RPC Class</a>
</p>
<p><a href="http://codeigniter.com">CodeIgniter</a> · Copyright © 2006-2010 · <a href="http://ellislab.com/">Ellislab, Inc.</a></p>
</div>
</body>
</html> | prashants/webzash-v1-defunct | user_guide/libraries/validation.html | HTML | apache-2.0 | 26,145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import java.io.File
import java.io.IOException
import java.text.SimpleDateFormat
import java.util.{Date, Locale, UUID}
import java.util.concurrent._
import java.util.concurrent.{Future => JFuture, ScheduledFuture => JScheduledFuture}
import scala.collection.mutable.{HashMap, HashSet, LinkedHashMap}
import scala.concurrent.ExecutionContext
import scala.util.Random
import scala.util.control.NonFatal
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.{Command, ExecutorDescription, ExecutorState}
import org.apache.spark.deploy.DeployMessages._
import org.apache.spark.deploy.ExternalShuffleService
import org.apache.spark.deploy.master.{DriverState, Master}
import org.apache.spark.deploy.worker.ui.WorkerWebUI
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.rpc._
import org.apache.spark.util.{SparkUncaughtExceptionHandler, ThreadUtils, Utils}
private[deploy] class Worker(
override val rpcEnv: RpcEnv,
webUiPort: Int,
cores: Int,
memory: Int,
masterRpcAddresses: Array[RpcAddress],
endpointName: String,
workDirPath: String = null,
val conf: SparkConf,
val securityMgr: SecurityManager)
extends ThreadSafeRpcEndpoint with Logging {
private val host = rpcEnv.address.host
private val port = rpcEnv.address.port
Utils.checkHost(host)
assert (port > 0)
// A scheduled executor used to send messages at the specified time.
private val forwordMessageScheduler =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("worker-forward-message-scheduler")
// A separated thread to clean up the workDir and the directories of finished applications.
// Used to provide the implicit parameter of `Future` methods.
private val cleanupThreadExecutor = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonSingleThreadExecutor("worker-cleanup-thread"))
// For worker and executor IDs
private def createDateFormat = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US)
// Send a heartbeat every (heartbeat timeout) / 4 milliseconds
private val HEARTBEAT_MILLIS = conf.getLong("spark.worker.timeout", 60) * 1000 / 4
// Model retries to connect to the master, after Hadoop's model.
// The first six attempts to reconnect are in shorter intervals (between 5 and 15 seconds)
// Afterwards, the next 10 attempts are between 30 and 90 seconds.
// A bit of randomness is introduced so that not all of the workers attempt to reconnect at
// the same time.
private val INITIAL_REGISTRATION_RETRIES = 6
private val TOTAL_REGISTRATION_RETRIES = INITIAL_REGISTRATION_RETRIES + 10
private val FUZZ_MULTIPLIER_INTERVAL_LOWER_BOUND = 0.500
private val REGISTRATION_RETRY_FUZZ_MULTIPLIER = {
val randomNumberGenerator = new Random(UUID.randomUUID.getMostSignificantBits)
randomNumberGenerator.nextDouble + FUZZ_MULTIPLIER_INTERVAL_LOWER_BOUND
}
private val INITIAL_REGISTRATION_RETRY_INTERVAL_SECONDS = (math.round(10 *
REGISTRATION_RETRY_FUZZ_MULTIPLIER))
private val PROLONGED_REGISTRATION_RETRY_INTERVAL_SECONDS = (math.round(60
* REGISTRATION_RETRY_FUZZ_MULTIPLIER))
private val CLEANUP_ENABLED = conf.getBoolean("spark.worker.cleanup.enabled", false)
// How often worker will clean up old app folders
private val CLEANUP_INTERVAL_MILLIS =
conf.getLong("spark.worker.cleanup.interval", 60 * 30) * 1000
// TTL for app folders/data; after TTL expires it will be cleaned up
private val APP_DATA_RETENTION_SECONDS =
conf.getLong("spark.worker.cleanup.appDataTtl", 7 * 24 * 3600)
private val testing: Boolean = sys.props.contains("spark.testing")
private var master: Option[RpcEndpointRef] = None
/**
* Whether to use the master address in `masterRpcAddresses` if possible. If it's disabled, Worker
* will just use the address received from Master.
*/
private val preferConfiguredMasterAddress =
conf.getBoolean("spark.worker.preferConfiguredMasterAddress", false)
/**
* The master address to connect in case of failure. When the connection is broken, worker will
* use this address to connect. This is usually just one of `masterRpcAddresses`. However, when
* a master is restarted or takes over leadership, it will be an address sent from master, which
* may not be in `masterRpcAddresses`.
*/
private var masterAddressToConnect: Option[RpcAddress] = None
private var activeMasterUrl: String = ""
private[worker] var activeMasterWebUiUrl : String = ""
private var workerWebUiUrl: String = ""
private val workerUri = RpcEndpointAddress(rpcEnv.address, endpointName).toString
private var registered = false
private var connected = false
private val workerId = generateWorkerId()
private val sparkHome =
if (testing) {
assert(sys.props.contains("spark.test.home"), "spark.test.home is not set!")
new File(sys.props("spark.test.home"))
} else {
new File(sys.env.get("SPARK_HOME").getOrElse("."))
}
var workDir: File = null
val finishedExecutors = new LinkedHashMap[String, ExecutorRunner]
val drivers = new HashMap[String, DriverRunner]
val executors = new HashMap[String, ExecutorRunner]
val finishedDrivers = new LinkedHashMap[String, DriverRunner]
val appDirectories = new HashMap[String, Seq[String]]
val finishedApps = new HashSet[String]
val retainedExecutors = conf.getInt("spark.worker.ui.retainedExecutors",
WorkerWebUI.DEFAULT_RETAINED_EXECUTORS)
val retainedDrivers = conf.getInt("spark.worker.ui.retainedDrivers",
WorkerWebUI.DEFAULT_RETAINED_DRIVERS)
// The shuffle service is not actually started unless configured.
private val shuffleService = new ExternalShuffleService(conf, securityMgr)
private val publicAddress = {
val envVar = conf.getenv("SPARK_PUBLIC_DNS")
if (envVar != null) envVar else host
}
private var webUi: WorkerWebUI = null
private var connectionAttemptCount = 0
private val metricsSystem = MetricsSystem.createMetricsSystem("worker", conf, securityMgr)
private val workerSource = new WorkerSource(this)
private var registerMasterFutures: Array[JFuture[_]] = null
private var registrationRetryTimer: Option[JScheduledFuture[_]] = None
// A thread pool for registering with masters. Because registering with a master is a blocking
// action, this thread pool must be able to create "masterRpcAddresses.size" threads at the same
// time so that we can register with all masters.
private val registerMasterThreadPool = ThreadUtils.newDaemonCachedThreadPool(
"worker-register-master-threadpool",
masterRpcAddresses.length // Make sure we can register with all masters at the same time
)
var coresUsed = 0
var memoryUsed = 0
def coresFree: Int = cores - coresUsed
def memoryFree: Int = memory - memoryUsed
private def createWorkDir() {
workDir = Option(workDirPath).map(new File(_)).getOrElse(new File(sparkHome, "work"))
try {
// This sporadically fails - not sure why ... !workDir.exists() && !workDir.mkdirs()
// So attempting to create and then check if directory was created or not.
workDir.mkdirs()
if ( !workDir.exists() || !workDir.isDirectory) {
logError("Failed to create work directory " + workDir)
System.exit(1)
}
assert (workDir.isDirectory)
} catch {
case e: Exception =>
logError("Failed to create work directory " + workDir, e)
System.exit(1)
}
}
override def onStart() {
assert(!registered)
logInfo("Starting Spark worker %s:%d with %d cores, %s RAM".format(
host, port, cores, Utils.megabytesToString(memory)))
logInfo(s"Running Spark version ${org.apache.spark.SPARK_VERSION}")
logInfo("Spark home: " + sparkHome)
createWorkDir()
shuffleService.startIfEnabled()
webUi = new WorkerWebUI(this, workDir, webUiPort)
webUi.bind()
workerWebUiUrl = s"http://$publicAddress:${webUi.boundPort}"
registerWithMaster()
metricsSystem.registerSource(workerSource)
metricsSystem.start()
// Attach the worker metrics servlet handler to the web ui after the metrics system is started.
metricsSystem.getServletHandlers.foreach(webUi.attachHandler)
}
/**
* Change to use the new master.
*
* @param masterRef the new master ref
* @param uiUrl the new master Web UI address
* @param masterAddress the new master address which the worker should use to connect in case of
* failure
*/
private def changeMaster(masterRef: RpcEndpointRef, uiUrl: String, masterAddress: RpcAddress) {
// activeMasterUrl it's a valid Spark url since we receive it from master.
activeMasterUrl = masterRef.address.toSparkURL
activeMasterWebUiUrl = uiUrl
masterAddressToConnect = Some(masterAddress)
master = Some(masterRef)
connected = true
if (conf.getBoolean("spark.ui.reverseProxy", false)) {
logInfo(s"WorkerWebUI is available at $activeMasterWebUiUrl/proxy/$workerId")
}
// Cancel any outstanding re-registration attempts because we found a new master
cancelLastRegistrationRetry()
}
private def tryRegisterAllMasters(): Array[JFuture[_]] = {
masterRpcAddresses.map { masterAddress =>
registerMasterThreadPool.submit(new Runnable {
override def run(): Unit = {
try {
logInfo("Connecting to master " + masterAddress + "...")
val masterEndpoint = rpcEnv.setupEndpointRef(masterAddress, Master.ENDPOINT_NAME)
sendRegisterMessageToMaster(masterEndpoint)
} catch {
case ie: InterruptedException => // Cancelled
case NonFatal(e) => logWarning(s"Failed to connect to master $masterAddress", e)
}
}
})
}
}
/**
* Re-register with the master because a network failure or a master failure has occurred.
* If the re-registration attempt threshold is exceeded, the worker exits with error.
* Note that for thread-safety this should only be called from the rpcEndpoint.
*/
private def reregisterWithMaster(): Unit = {
Utils.tryOrExit {
connectionAttemptCount += 1
if (registered) {
cancelLastRegistrationRetry()
} else if (connectionAttemptCount <= TOTAL_REGISTRATION_RETRIES) {
logInfo(s"Retrying connection to master (attempt # $connectionAttemptCount)")
/**
* Re-register with the active master this worker has been communicating with. If there
* is none, then it means this worker is still bootstrapping and hasn't established a
* connection with a master yet, in which case we should re-register with all masters.
*
* It is important to re-register only with the active master during failures. Otherwise,
* if the worker unconditionally attempts to re-register with all masters, the following
* race condition may arise and cause a "duplicate worker" error detailed in SPARK-4592:
*
* (1) Master A fails and Worker attempts to reconnect to all masters
* (2) Master B takes over and notifies Worker
* (3) Worker responds by registering with Master B
* (4) Meanwhile, Worker's previous reconnection attempt reaches Master B,
* causing the same Worker to register with Master B twice
*
* Instead, if we only register with the known active master, we can assume that the
* old master must have died because another master has taken over. Note that this is
* still not safe if the old master recovers within this interval, but this is a much
* less likely scenario.
*/
master match {
case Some(masterRef) =>
// registered == false && master != None means we lost the connection to master, so
// masterRef cannot be used and we need to recreate it again. Note: we must not set
// master to None due to the above comments.
if (registerMasterFutures != null) {
registerMasterFutures.foreach(_.cancel(true))
}
val masterAddress =
if (preferConfiguredMasterAddress) masterAddressToConnect.get else masterRef.address
registerMasterFutures = Array(registerMasterThreadPool.submit(new Runnable {
override def run(): Unit = {
try {
logInfo("Connecting to master " + masterAddress + "...")
val masterEndpoint = rpcEnv.setupEndpointRef(masterAddress, Master.ENDPOINT_NAME)
sendRegisterMessageToMaster(masterEndpoint)
} catch {
case ie: InterruptedException => // Cancelled
case NonFatal(e) => logWarning(s"Failed to connect to master $masterAddress", e)
}
}
}))
case None =>
if (registerMasterFutures != null) {
registerMasterFutures.foreach(_.cancel(true))
}
// We are retrying the initial registration
registerMasterFutures = tryRegisterAllMasters()
}
// We have exceeded the initial registration retry threshold
// All retries from now on should use a higher interval
if (connectionAttemptCount == INITIAL_REGISTRATION_RETRIES) {
registrationRetryTimer.foreach(_.cancel(true))
registrationRetryTimer = Some(
forwordMessageScheduler.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(ReregisterWithMaster)
}
}, PROLONGED_REGISTRATION_RETRY_INTERVAL_SECONDS,
PROLONGED_REGISTRATION_RETRY_INTERVAL_SECONDS,
TimeUnit.SECONDS))
}
} else {
logError("All masters are unresponsive! Giving up.")
System.exit(1)
}
}
}
/**
* Cancel last registeration retry, or do nothing if no retry
*/
private def cancelLastRegistrationRetry(): Unit = {
if (registerMasterFutures != null) {
registerMasterFutures.foreach(_.cancel(true))
registerMasterFutures = null
}
registrationRetryTimer.foreach(_.cancel(true))
registrationRetryTimer = None
}
private def registerWithMaster() {
// onDisconnected may be triggered multiple times, so don't attempt registration
// if there are outstanding registration attempts scheduled.
registrationRetryTimer match {
case None =>
registered = false
registerMasterFutures = tryRegisterAllMasters()
connectionAttemptCount = 0
registrationRetryTimer = Some(forwordMessageScheduler.scheduleAtFixedRate(
new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReregisterWithMaster))
}
},
INITIAL_REGISTRATION_RETRY_INTERVAL_SECONDS,
INITIAL_REGISTRATION_RETRY_INTERVAL_SECONDS,
TimeUnit.SECONDS))
case Some(_) =>
logInfo("Not spawning another attempt to register with the master, since there is an" +
" attempt scheduled already.")
}
}
private def sendRegisterMessageToMaster(masterEndpoint: RpcEndpointRef): Unit = {
masterEndpoint.send(RegisterWorker(
workerId,
host,
port,
self,
cores,
memory,
workerWebUiUrl,
masterEndpoint.address))
}
private def handleRegisterResponse(msg: RegisterWorkerResponse): Unit = synchronized {
msg match {
case RegisteredWorker(masterRef, masterWebUiUrl, masterAddress) =>
if (preferConfiguredMasterAddress) {
logInfo("Successfully registered with master " + masterAddress.toSparkURL)
} else {
logInfo("Successfully registered with master " + masterRef.address.toSparkURL)
}
registered = true
changeMaster(masterRef, masterWebUiUrl, masterAddress)
forwordMessageScheduler.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(SendHeartbeat)
}
}, 0, HEARTBEAT_MILLIS, TimeUnit.MILLISECONDS)
if (CLEANUP_ENABLED) {
logInfo(
s"Worker cleanup enabled; old application directories will be deleted in: $workDir")
forwordMessageScheduler.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
self.send(WorkDirCleanup)
}
}, CLEANUP_INTERVAL_MILLIS, CLEANUP_INTERVAL_MILLIS, TimeUnit.MILLISECONDS)
}
val execs = executors.values.map { e =>
new ExecutorDescription(e.appId, e.execId, e.cores, e.state)
}
masterRef.send(WorkerLatestState(workerId, execs.toList, drivers.keys.toSeq))
case RegisterWorkerFailed(message) =>
if (!registered) {
logError("Worker registration failed: " + message)
System.exit(1)
}
case MasterInStandby =>
// Ignore. Master not yet ready.
}
}
override def receive: PartialFunction[Any, Unit] = synchronized {
case msg: RegisterWorkerResponse =>
handleRegisterResponse(msg)
case SendHeartbeat =>
if (connected) { sendToMaster(Heartbeat(workerId, self)) }
case WorkDirCleanup =>
// Spin up a separate thread (in a future) to do the dir cleanup; don't tie up worker
// rpcEndpoint.
// Copy ids so that it can be used in the cleanup thread.
val appIds = executors.values.map(_.appId).toSet
val cleanupFuture = concurrent.Future {
val appDirs = workDir.listFiles()
if (appDirs == null) {
throw new IOException("ERROR: Failed to list files in " + appDirs)
}
appDirs.filter { dir =>
// the directory is used by an application - check that the application is not running
// when cleaning up
val appIdFromDir = dir.getName
val isAppStillRunning = appIds.contains(appIdFromDir)
dir.isDirectory && !isAppStillRunning &&
!Utils.doesDirectoryContainAnyNewFiles(dir, APP_DATA_RETENTION_SECONDS)
}.foreach { dir =>
logInfo(s"Removing directory: ${dir.getPath}")
Utils.deleteRecursively(dir)
}
}(cleanupThreadExecutor)
cleanupFuture.onFailure {
case e: Throwable =>
logError("App dir cleanup failed: " + e.getMessage, e)
}(cleanupThreadExecutor)
case MasterChanged(masterRef, masterWebUiUrl) =>
logInfo("Master has changed, new master is at " + masterRef.address.toSparkURL)
changeMaster(masterRef, masterWebUiUrl, masterRef.address)
val execs = executors.values.
map(e => new ExecutorDescription(e.appId, e.execId, e.cores, e.state))
masterRef.send(WorkerSchedulerStateResponse(workerId, execs.toList, drivers.keys.toSeq))
case ReconnectWorker(masterUrl) =>
logInfo(s"Master with url $masterUrl requested this worker to reconnect.")
registerWithMaster()
case LaunchExecutor(masterUrl, appId, execId, appDesc, cores_, memory_) =>
if (masterUrl != activeMasterUrl) {
logWarning("Invalid Master (" + masterUrl + ") attempted to launch executor.")
} else {
try {
logInfo("Asked to launch executor %s/%d for %s".format(appId, execId, appDesc.name))
// Create the executor's working directory
val executorDir = new File(workDir, appId + "/" + execId)
if (!executorDir.mkdirs()) {
throw new IOException("Failed to create directory " + executorDir)
}
// Create local dirs for the executor. These are passed to the executor via the
// SPARK_EXECUTOR_DIRS environment variable, and deleted by the Worker when the
// application finishes.
val appLocalDirs = appDirectories.getOrElse(appId, {
val localRootDirs = Utils.getOrCreateLocalRootDirs(conf)
val dirs = localRootDirs.flatMap { dir =>
try {
val appDir = Utils.createDirectory(dir, namePrefix = "executor")
Utils.chmod700(appDir)
Some(appDir.getAbsolutePath())
} catch {
case e: IOException =>
logWarning(s"${e.getMessage}. Ignoring this directory.")
None
}
}.toSeq
if (dirs.isEmpty) {
throw new IOException("No subfolder can be created in " +
s"${localRootDirs.mkString(",")}.")
}
dirs
})
appDirectories(appId) = appLocalDirs
val manager = new ExecutorRunner(
appId,
execId,
appDesc.copy(command = Worker.maybeUpdateSSLSettings(appDesc.command, conf)),
cores_,
memory_,
self,
workerId,
host,
webUi.boundPort,
publicAddress,
sparkHome,
executorDir,
workerUri,
conf,
appLocalDirs, ExecutorState.RUNNING)
executors(appId + "/" + execId) = manager
manager.start()
coresUsed += cores_
memoryUsed += memory_
sendToMaster(ExecutorStateChanged(appId, execId, manager.state, None, None))
} catch {
case e: Exception =>
logError(s"Failed to launch executor $appId/$execId for ${appDesc.name}.", e)
if (executors.contains(appId + "/" + execId)) {
executors(appId + "/" + execId).kill()
executors -= appId + "/" + execId
}
sendToMaster(ExecutorStateChanged(appId, execId, ExecutorState.FAILED,
Some(e.toString), None))
}
}
case executorStateChanged @ ExecutorStateChanged(appId, execId, state, message, exitStatus) =>
handleExecutorStateChanged(executorStateChanged)
case KillExecutor(masterUrl, appId, execId) =>
if (masterUrl != activeMasterUrl) {
logWarning("Invalid Master (" + masterUrl + ") attempted to kill executor " + execId)
} else {
val fullId = appId + "/" + execId
executors.get(fullId) match {
case Some(executor) =>
logInfo("Asked to kill executor " + fullId)
executor.kill()
case None =>
logInfo("Asked to kill unknown executor " + fullId)
}
}
case LaunchDriver(driverId, driverDesc) =>
logInfo(s"Asked to launch driver $driverId")
val driver = new DriverRunner(
conf,
driverId,
workDir,
sparkHome,
driverDesc.copy(command = Worker.maybeUpdateSSLSettings(driverDesc.command, conf)),
self,
workerUri,
securityMgr)
drivers(driverId) = driver
driver.start()
coresUsed += driverDesc.cores
memoryUsed += driverDesc.mem
case KillDriver(driverId) =>
logInfo(s"Asked to kill driver $driverId")
drivers.get(driverId) match {
case Some(runner) =>
runner.kill()
case None =>
logError(s"Asked to kill unknown driver $driverId")
}
case driverStateChanged @ DriverStateChanged(driverId, state, exception) =>
handleDriverStateChanged(driverStateChanged)
case ReregisterWithMaster =>
reregisterWithMaster()
case ApplicationFinished(id) =>
finishedApps += id
maybeCleanupApplication(id)
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RequestWorkerState =>
context.reply(WorkerStateResponse(host, port, workerId, executors.values.toList,
finishedExecutors.values.toList, drivers.values.toList,
finishedDrivers.values.toList, activeMasterUrl, cores, memory,
coresUsed, memoryUsed, activeMasterWebUiUrl))
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (master.exists(_.address == remoteAddress) ||
masterAddressToConnect.exists(_ == remoteAddress)) {
logInfo(s"$remoteAddress Disassociated !")
masterDisconnected()
}
}
private def masterDisconnected() {
logError("Connection to master failed! Waiting for master to reconnect...")
connected = false
registerWithMaster()
}
private def maybeCleanupApplication(id: String): Unit = {
val shouldCleanup = finishedApps.contains(id) && !executors.values.exists(_.appId == id)
if (shouldCleanup) {
finishedApps -= id
appDirectories.remove(id).foreach { dirList =>
concurrent.Future {
logInfo(s"Cleaning up local directories for application $id")
dirList.foreach { dir =>
Utils.deleteRecursively(new File(dir))
}
}(cleanupThreadExecutor).onFailure {
case e: Throwable =>
logError(s"Clean up app dir $dirList failed: ${e.getMessage}", e)
}(cleanupThreadExecutor)
}
shuffleService.applicationRemoved(id)
}
}
/**
* Send a message to the current master. If we have not yet registered successfully with any
* master, the message will be dropped.
*/
private def sendToMaster(message: Any): Unit = {
master match {
case Some(masterRef) => masterRef.send(message)
case None =>
logWarning(
s"Dropping $message because the connection to master has not yet been established")
}
}
private def generateWorkerId(): String = {
"worker-%s-%s-%d".format(createDateFormat.format(new Date), host, port)
}
override def onStop() {
cleanupThreadExecutor.shutdownNow()
metricsSystem.report()
cancelLastRegistrationRetry()
forwordMessageScheduler.shutdownNow()
registerMasterThreadPool.shutdownNow()
executors.values.foreach(_.kill())
drivers.values.foreach(_.kill())
shuffleService.stop()
webUi.stop()
metricsSystem.stop()
}
private def trimFinishedExecutorsIfNecessary(): Unit = {
// do not need to protect with locks since both WorkerPage and Restful server get data through
// thread-safe RpcEndPoint
if (finishedExecutors.size > retainedExecutors) {
finishedExecutors.take(math.max(finishedExecutors.size / 10, 1)).foreach {
case (executorId, _) => finishedExecutors.remove(executorId)
}
}
}
private def trimFinishedDriversIfNecessary(): Unit = {
// do not need to protect with locks since both WorkerPage and Restful server get data through
// thread-safe RpcEndPoint
if (finishedDrivers.size > retainedDrivers) {
finishedDrivers.take(math.max(finishedDrivers.size / 10, 1)).foreach {
case (driverId, _) => finishedDrivers.remove(driverId)
}
}
}
private[worker] def handleDriverStateChanged(driverStateChanged: DriverStateChanged): Unit = {
val driverId = driverStateChanged.driverId
val exception = driverStateChanged.exception
val state = driverStateChanged.state
state match {
case DriverState.ERROR =>
logWarning(s"Driver $driverId failed with unrecoverable exception: ${exception.get}")
case DriverState.FAILED =>
logWarning(s"Driver $driverId exited with failure")
case DriverState.FINISHED =>
logInfo(s"Driver $driverId exited successfully")
case DriverState.KILLED =>
logInfo(s"Driver $driverId was killed by user")
case _ =>
logDebug(s"Driver $driverId changed state to $state")
}
sendToMaster(driverStateChanged)
val driver = drivers.remove(driverId).get
finishedDrivers(driverId) = driver
trimFinishedDriversIfNecessary()
memoryUsed -= driver.driverDesc.mem
coresUsed -= driver.driverDesc.cores
}
private[worker] def handleExecutorStateChanged(executorStateChanged: ExecutorStateChanged):
Unit = {
sendToMaster(executorStateChanged)
val state = executorStateChanged.state
if (ExecutorState.isFinished(state)) {
val appId = executorStateChanged.appId
val fullId = appId + "/" + executorStateChanged.execId
val message = executorStateChanged.message
val exitStatus = executorStateChanged.exitStatus
executors.get(fullId) match {
case Some(executor) =>
logInfo("Executor " + fullId + " finished with state " + state +
message.map(" message " + _).getOrElse("") +
exitStatus.map(" exitStatus " + _).getOrElse(""))
executors -= fullId
finishedExecutors(fullId) = executor
trimFinishedExecutorsIfNecessary()
coresUsed -= executor.cores
memoryUsed -= executor.memory
case None =>
logInfo("Unknown Executor " + fullId + " finished with state " + state +
message.map(" message " + _).getOrElse("") +
exitStatus.map(" exitStatus " + _).getOrElse(""))
}
maybeCleanupApplication(appId)
}
}
}
private[deploy] object Worker extends Logging {
val SYSTEM_NAME = "sparkWorker"
val ENDPOINT_NAME = "Worker"
def main(argStrings: Array[String]) {
Thread.setDefaultUncaughtExceptionHandler(new SparkUncaughtExceptionHandler(
exitOnUncaughtException = false))
Utils.initDaemon(log)
val conf = new SparkConf
val args = new WorkerArguments(argStrings, conf)
val rpcEnv = startRpcEnvAndEndpoint(args.host, args.port, args.webUiPort, args.cores,
args.memory, args.masters, args.workDir, conf = conf)
// With external shuffle service enabled, if we request to launch multiple workers on one host,
// we can only successfully launch the first worker and the rest fails, because with the port
// bound, we may launch no more than one external shuffle service on each host.
// When this happens, we should give explicit reason of failure instead of fail silently. For
// more detail see SPARK-20989.
val externalShuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
val sparkWorkerInstances = scala.sys.env.getOrElse("SPARK_WORKER_INSTANCES", "1").toInt
require(externalShuffleServiceEnabled == false || sparkWorkerInstances <= 1,
"Starting multiple workers on one host is failed because we may launch no more than one " +
"external shuffle service on each host, please set spark.shuffle.service.enabled to " +
"false or set SPARK_WORKER_INSTANCES to 1 to resolve the conflict.")
rpcEnv.awaitTermination()
}
def startRpcEnvAndEndpoint(
host: String,
port: Int,
webUiPort: Int,
cores: Int,
memory: Int,
masterUrls: Array[String],
workDir: String,
workerNumber: Option[Int] = None,
conf: SparkConf = new SparkConf): RpcEnv = {
// The LocalSparkCluster runs multiple local sparkWorkerX RPC Environments
val systemName = SYSTEM_NAME + workerNumber.map(_.toString).getOrElse("")
val securityMgr = new SecurityManager(conf)
val rpcEnv = RpcEnv.create(systemName, host, port, conf, securityMgr)
val masterAddresses = masterUrls.map(RpcAddress.fromSparkURL(_))
rpcEnv.setupEndpoint(ENDPOINT_NAME, new Worker(rpcEnv, webUiPort, cores, memory,
masterAddresses, ENDPOINT_NAME, workDir, conf, securityMgr))
rpcEnv
}
def isUseLocalNodeSSLConfig(cmd: Command): Boolean = {
val pattern = """\-Dspark\.ssl\.useNodeLocalConf\=(.+)""".r
val result = cmd.javaOpts.collectFirst {
case pattern(_result) => _result.toBoolean
}
result.getOrElse(false)
}
def maybeUpdateSSLSettings(cmd: Command, conf: SparkConf): Command = {
val prefix = "spark.ssl."
val useNLC = "spark.ssl.useNodeLocalConf"
if (isUseLocalNodeSSLConfig(cmd)) {
val newJavaOpts = cmd.javaOpts
.filter(opt => !opt.startsWith(s"-D$prefix")) ++
conf.getAll.collect { case (key, value) if key.startsWith(prefix) => s"-D$key=$value" } :+
s"-D$useNLC=true"
cmd.copy(javaOpts = newJavaOpts)
} else {
cmd
}
}
}
| ajaysaini725/spark | core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala | Scala | apache-2.0 | 32,894 |
body {
font-family: Source Sans Pro, Ubuntu, sans-serif;
background-color: #fffdfa;
color: #073642;
font-size: 17px;
}
a.button {
font-weight: bold;
}
.homeContainer {
background-color: #073642;
}
.homeContainer, .projectTitle {
color: #eee8d5;
padding-top: 25px;
padding-bottom: 25px;
}
.homeContainer a.button {
background-color: #cb4b16;
border-color: #cb4b16;
color: #eee8d5;
}
.darkBackground {
background-color: #073642;
}
.darkBackground a {
color: #268bd2;
}
.lightBackground {
color: #073642;
background-color: #fffdfa;
}
.projectTitle {
font-weight: 800;
max-width: 800px;
margin: auto;
}
.projectTitle small {
line-height: 1.5em;
}
.imageAlignLeft .blockImage {
max-width: 300px;
}
.imageAlignRight .blockImage {
max-width: 300px;
}
.useCases .gridBlock {
max-width: 920px;
margin: auto;
padding-top: 40px;
padding-bottom: 40px;
}
.useCases .imageAlignLeft .blockImage {
/* max-width: 200px; */
/* margin: auto; */
}
.useCases .imageAlignRight .blockImage {
/* max-width: 200px; */
/* margin: auto; */
}
.useCases h2 {
/* font-size: 20px; */
}
.logos {
max-width: 640px;
margin: auto;
}
.fixedHeaderContainer {
background-color: #f0f0f0;
}
.fixedHeaderContainer a h3 {
color: #707482;
text-decoration: none;
border-bottom: 1px solid #2d7de4;
}
.navigationSlider .slidingNav ul li a {
color: #707482;
font-weight: 600;
letter-spacing: 0.35px;
font-size: 16px;
border-bottom: 3px solid transparent;
margin-left: 6px;
margin-right: 10px;
padding-left: 0;
padding-right: 0;
}
.navigationSlider .slidingNav ul li.siteNavGroupActive a,
.navigationSlider .slidingNav ul li.siteNavItemActive a,
.navigationSlider .slidingNav ul li a:focus,
.navigationSlider .slidingNav ul li a:hover {
color: #707482 !important;
border-bottom: 3px solid #2d7de4;
background: none;
}
.nav-footer {
background-color: #002b36;
}
.nav-footer .sitemap .nav-home {
height: 34px;
opacity: 0.8;
padding: 10px;
transition: opacity 0.15s ease-in-out;
width: 200px;
}
.blog .wrapper {
max-width: 1400px;
}
.blog .post {
max-width: 780px;
}
.versionsContainer {
max-width: 780px;
}
.card {
display: inline-block;
vertical-align: top;
border: 2px solid #e6e6e6;
margin-top: 1em;
margin-bottom: 1em;
margin-right: 1em;
}
.card p {
padding: 1em;
}
.card header {
background: #e6e6e6;
display: block;
text-align: center;
font-weight: 600;
line-height: 2em;
}
.card .buttonWrapper {
text-align: center;
margin: 10px;
}
@media only screen and (min-device-width: 360px) and (max-device-width: 736px) {
.card {
max-width: 350px;
}
}
@media only screen and (min-width: 1024px) {
.card {
max-width: 510px;
}
}
@media only screen and (max-width: 1023px) {
.card {
max-width: 510px;
}
}
@media only screen and (min-width: 1400px) {
.card {
max-width: 510px;
}
}
@media only screen and (min-width: 1500px) {
.card {
max-width: 650px;
}
}
| gurbuzali/hazelcast-jet | site/website/static/css/custom.css | CSS | apache-2.0 | 3,018 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.harmony.lang.management.tests.java.lang.management;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import junit.framework.TestCase;
public class MemoryMXBeanTest extends TestCase {
private MemoryMXBean mb;
protected void setUp() throws Exception {
super.setUp();
mb = ManagementFactory.getMemoryMXBean();
assertNotNull(mb);
}
protected void tearDown() throws Exception {
super.tearDown();
}
/*
* Test method for 'java.lang.management.MemoryMXBean.getHeapMemoryUsage()'
*/
public void testGetHeapMemoryUsage() {
MemoryUsage mu = mb.getHeapMemoryUsage();
assertNotNull(mu);
assertTrue(mu.getCommitted() >= mu.getUsed());
assertTrue(mu.getCommitted() <= mu.getMax());
assertTrue(mu.getUsed() <= mu.getMax());
}
/*
* Test method for 'java.lang.management.MemoryMXBean.getNonHeapMemoryUsage()'
*/
public void testGetNonHeapMemoryUsage() {
MemoryUsage mu = mb.getNonHeapMemoryUsage();
assertNotNull(mu);
assertTrue(mu.getCommitted() >= mu.getUsed());
if (mu.getMax() != -1) {
// If max is defined then used and committed will always
// be less than or equal to it
assertTrue(mu.getCommitted() <= mu.getMax());
assertTrue(mu.getUsed() <= mu.getMax());
}
}
/*
* Test method for 'java.lang.management.MemoryMXBean.getObjectPendingFinalizationCount()'
*/
public void testGetObjectPendingFinalizationCount() {
assertTrue(mb.getObjectPendingFinalizationCount() > -1);
}
/*
* Test method for 'java.lang.management.MemoryMXBean.setVerbose(boolean)'
*/
public void testSetVerbose() {
boolean initialVal = mb.isVerbose();
mb.setVerbose(!initialVal);
assertTrue(mb.isVerbose() != initialVal);
mb.setVerbose(initialVal);
assertTrue(mb.isVerbose() == initialVal);
}
}
| freeVM/freeVM | enhanced/java/classlib/modules/lang-management/src/test/api/java/org/apache/harmony/lang/management/tests/java/lang/management/MemoryMXBeanTest.java | Java | apache-2.0 | 2,895 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_65) on Sun Mar 29 01:32:41 JST 2015 -->
<meta http-equiv="Content-Type" content="text/html" charset="UTF-8">
<title>Uses of Class twitter4j.StatusAdapter (twitter4j-stream 4.0.3 API)</title>
<meta name="date" content="2015-03-29">
<link rel="stylesheet" type="text/css" href="../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class twitter4j.StatusAdapter (twitter4j-stream 4.0.3 API)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../twitter4j/package-summary.html">Package</a></li>
<li><a href="../../twitter4j/StatusAdapter.html" title="class in twitter4j">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../index-all.html">Index</a></li>
<li><a href="../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../index.html?twitter4j/class-use/StatusAdapter.html" target="_top">Frames</a></li>
<li><a href="StatusAdapter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class twitter4j.StatusAdapter" class="title">Uses of Class<br>twitter4j.StatusAdapter</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="twitter4j">
<!-- -->
</a>
<h3>Uses of <a href="../../twitter4j/StatusAdapter.html" title="class in twitter4j">StatusAdapter</a> in <a href="../../twitter4j/package-summary.html">twitter4j</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing subclasses, and an explanation">
<caption><span>Subclasses of <a href="../../twitter4j/StatusAdapter.html" title="class in twitter4j">StatusAdapter</a> in <a href="../../twitter4j/package-summary.html">twitter4j</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Class and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>class </code></td>
<td class="colLast"><code><strong><a href="../../twitter4j/UserStreamAdapter.html" title="class in twitter4j">UserStreamAdapter</a></strong></code> </td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../twitter4j/package-summary.html">Package</a></li>
<li><a href="../../twitter4j/StatusAdapter.html" title="class in twitter4j">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../index-all.html">Index</a></li>
<li><a href="../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../index.html?twitter4j/class-use/StatusAdapter.html" target="_top">Frames</a></li>
<li><a href="StatusAdapter.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2015. All Rights Reserved.</small></p>
</body>
</html>
| egeyman/Final-Project | twitter4j-stream/javadoc/twitter4j/class-use/StatusAdapter.html | HTML | apache-2.0 | 4,984 |
/*
* Copyright (C) 2010 Toni Menzel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ops4j.pax.exam.container.remote;
import static org.ops4j.pax.exam.OptionUtils.filter;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.container.remote.options.RBCLookupTimeoutOption;
import org.ops4j.pax.exam.container.remote.options.RBCPortOption;
/**
* Minimal parser for the rbcremote fragment.
*/
public class Parser {
private String host;
private Integer port;
private long timeout;
public Parser(Option[] options) {
extractArguments(filter(RBCPortOption.class, options));
extractArguments(filter(RBCLookupTimeoutOption.class, options));
assert port != null : "Port should never be null.";
assert host != null : "Host should never be null.";
}
private void extractArguments(RBCLookupTimeoutOption[] options) {
for (RBCLookupTimeoutOption op : options) {
timeout = op.getTimeout();
}
}
private void extractArguments(RBCPortOption[] rbcPortOptions) {
for (RBCPortOption op : rbcPortOptions) {
host = op.getHost();
port = op.getPort();
}
}
public String getHost() {
return host;
}
public Integer getRMIPort() {
return port;
}
public long getRMILookupTimpout() {
return timeout;
}
public Integer getPort() {
return port;
}
}
| ops4j/org.ops4j.pax.exam2 | containers/pax-exam-container-remote/src/main/java/org/ops4j/pax/exam/container/remote/Parser.java | Java | apache-2.0 | 1,966 |
/*
* Copyright (c) 2012, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
/**
* Simple class to represent index spec
*/
public class IndexSpec {
public final String path;
public final Type type;
public final String columnName;
public IndexSpec(String path, Type type) {
this.path = path;
this.type = type;
this.columnName = null; // undefined
}
public IndexSpec(String path, Type type, String columnName) {
this.path = path;
this.type = type;
this.columnName = columnName;
}
@Override
public int hashCode() {
int result = 17;
result = 31 * result + path.hashCode();
result = 31 * result + type.hashCode();
if (columnName != null)
result = 31 * result + columnName.hashCode();
return result;
}
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == this)
return true;
if (!(obj instanceof IndexSpec))
return false;
IndexSpec rhs = (IndexSpec) obj;
boolean result = true;
result = result && path.equals(rhs.path);
result = result && type.equals(rhs.type);
if (columnName == null)
result = result && (columnName == rhs.columnName);
else
result = result && columnName.equals(rhs.columnName);
return result;
}
/**
* @return path | type
*/
public String getPathType() {
return path + "|" + type;
}
/**
* @return JSONObject for this IndexSpec
* @throws JSONException
*/
public JSONObject toJSON() throws JSONException {
JSONObject json = new JSONObject();
json.put("path", path);
json.put("type", type);
json.put("columnName", columnName);
return json;
}
/**
* @param indexSpecs
* @return JSONArray for the array of IndexSpec's
* @throws JSONException
*/
public static JSONArray toJSON(IndexSpec[] indexSpecs) throws JSONException {
JSONArray json = new JSONArray();
for(IndexSpec indexSpec : indexSpecs) {
json.put(indexSpec.toJSON());
}
return json;
}
/**
* @param jsonArray
* @return IndexSpec[] from a JSONArray
* @throws JSONException
*/
public static IndexSpec[] fromJSON(JSONArray jsonArray) throws JSONException {
List<IndexSpec> list = new ArrayList<IndexSpec>();
for(int i=0; i<jsonArray.length(); i++) {
list.add(IndexSpec.fromJSON(jsonArray.getJSONObject(i)));
}
return list.toArray(new IndexSpec[0]);
}
/**
* Return IndexSpec given JSONObject
* @param json
* @return
* @throws JSONException
*/
public static IndexSpec fromJSON(JSONObject json) throws JSONException {
return new IndexSpec(json.getString("path"), Type.valueOf(json.getString("type")), json.optString("columnName"));
}
/**
* @param indexSpecs
* @return map index spec path to index spec
*/
public static Map<String, IndexSpec> mapForIndexSpecs(IndexSpec[] indexSpecs) {
Map<String, IndexSpec> map = new HashMap<String, IndexSpec>();
for (IndexSpec indexSpec : indexSpecs) {
map.put(indexSpec.path, indexSpec);
}
return map;
}
/**
* @param indexSpecs
* @return true if at least one of the indexSpec is of type full_text
*/
public static boolean hasFTS(IndexSpec[] indexSpecs) {
for (IndexSpec indexSpec : indexSpecs) {
if (indexSpec.type == Type.full_text) {
return true;
}
}
return false;
}
} | huminzhi/SalesforceMobileSDK-Android | libs/SmartStore/src/com/salesforce/androidsdk/smartstore/store/IndexSpec.java | Java | apache-2.0 | 5,296 |
---
id: admin-api-tenants
title: Managing Tenants
sidebar_label: "Tenants"
original_id: admin-api-tenants
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
> **Important**
>
> This page only shows **some frequently used operations**.
>
> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
>
> - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
>
> - For the latest and complete information about `Java admin API`, including classes, methods, descriptions, and more, see [Java admin API doc](https://pulsar.apache.org/api/admin/).
Tenants, like namespaces, can be managed using the [admin API](admin-api-overview). There are currently two configurable aspects of tenants:
* Admin roles
* Allowed clusters
## Tenant resources
### List
You can list all of the tenants associated with an [instance](reference-terminology.md#instance).
<Tabs
defaultValue="pulsar-admin"
values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"JAVA","value":"JAVA"}]}>
<TabItem value="pulsar-admin">
Use the [`list`](reference-pulsar-admin.md#tenants-list) subcommand.
```shell
$ pulsar-admin tenants list
my-tenant-1
my-tenant-2
```
</TabItem>
<TabItem value="REST API">
{@inject: endpoint|GET|/admin/v2/tenants|operation/getTenants?version=@pulsar:version_number@}
</TabItem>
<TabItem value="JAVA">
```java
admin.tenants().getTenants();
```
</TabItem>
</Tabs>
### Create
You can create a new tenant.
<Tabs
defaultValue="pulsar-admin"
values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"JAVA","value":"JAVA"}]}>
<TabItem value="pulsar-admin">
Use the [`create`](reference-pulsar-admin.md#tenants-create) subcommand:
```shell
$ pulsar-admin tenants create my-tenant
```
When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
```shell
$ pulsar-admin tenants create my-tenant \
--admin-roles role1,role2,role3
$ pulsar-admin tenants create my-tenant \
-r role1
```
</TabItem>
<TabItem value="REST API">
{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
</TabItem>
<TabItem value="JAVA">
```java
admin.tenants().createTenant(tenantName, tenantInfo);
```
</TabItem>
</Tabs>
### Get configuration
You can fetch the [configuration](reference-configuration) for an existing tenant at any time.
<Tabs
defaultValue="pulsar-admin"
values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"JAVA","value":"JAVA"}]}>
<TabItem value="pulsar-admin">
Use the [`get`](reference-pulsar-admin.md#tenants-get) subcommand and specify the name of the tenant. Here's an example:
```shell
$ pulsar-admin tenants get my-tenant
{
"adminRoles": [
"admin1",
"admin2"
],
"allowedClusters": [
"cl1",
"cl2"
]
}
```
</TabItem>
<TabItem value="REST API">
{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
</TabItem>
<TabItem value="JAVA">
```java
admin.tenants().getTenantInfo(tenantName);
```
</TabItem>
</Tabs>
### Delete
Tenants can be deleted from a Pulsar [instance](reference-terminology.md#instance).
<Tabs
defaultValue="pulsar-admin"
values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"JAVA","value":"JAVA"}]}>
<TabItem value="pulsar-admin">
Use the [`delete`](reference-pulsar-admin.md#tenants-delete) subcommand and specify the name of the tenant.
```shell
$ pulsar-admin tenants delete my-tenant
```
</TabItem>
<TabItem value="REST API">
{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
</TabItem>
<TabItem value="JAVA">
```java
admin.Tenants().deleteTenant(tenantName);
```
</TabItem>
</Tabs>
### Update
You can update a tenant's configuration.
<Tabs
defaultValue="pulsar-admin"
values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"JAVA","value":"JAVA"}]}>
<TabItem value="pulsar-admin">
Use the [`update`](reference-pulsar-admin.md#tenants-update) subcommand.
```shell
$ pulsar-admin tenants update my-tenant
```
</TabItem>
<TabItem value="REST API">
{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
</TabItem>
<TabItem value="JAVA">
```java
admin.tenants().updateTenant(tenantName, tenantInfo);
```
</TabItem>
</Tabs>
| massakam/pulsar | site2/website-next/versioned_docs/version-2.5.2/admin-api-tenants.md | Markdown | apache-2.0 | 4,881 |
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->
<!--- http://www.apache.org/licenses/LICENSE-2.0 -->
<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
# Running inference on MXNet/Gluon from an ONNX model
[Open Neural Network Exchange (ONNX)](https://github.com/onnx/onnx) provides an open source format for AI models. It defines an extensible computation graph model, as well as definitions of built-in operators and standard data types.
In this tutorial we will:
- learn how to load a pre-trained .onnx model file into MXNet/Gluon
- learn how to test this model using the sample input/output
- learn how to test the model on custom images
## Pre-requisite
To run the tutorial you will need to have installed the following python modules:
- [MXNet > 1.1.0](https://mxnet.apache.org/get_started)
- [onnx](https://github.com/onnx/onnx) (follow the install guide)
- matplotlib
```{.python .input}
import numpy as np
import mxnet as mx
from mxnet.contrib import onnx as onnx_mxnet
from mxnet import gluon, nd
%matplotlib inline
import matplotlib.pyplot as plt
import tarfile, os
import json
import logging
logging.basicConfig(level=logging.INFO)
```
### Downloading supporting files
These are images and a vizualisation script
```{.python .input}
image_folder = "images"
utils_file = "utils.py" # contain utils function to plot nice visualization
image_net_labels_file = "image_net_labels.json"
images = ['apron.jpg', 'hammerheadshark.jpg', 'dog.jpg', 'wrench.jpg', 'dolphin.jpg', 'lotus.jpg']
base_url = "https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/onnx/{}?raw=true"
for image in images:
mx.test_utils.download(base_url.format("{}/{}".format(image_folder, image)), fname=image,dirname=image_folder)
mx.test_utils.download(base_url.format(utils_file), fname=utils_file)
mx.test_utils.download(base_url.format(image_net_labels_file), fname=image_net_labels_file)
from utils import *
```
## Downloading a model from the ONNX model zoo
We download a pre-trained model, in our case the [GoogleNet](https://arxiv.org/abs/1409.4842) model, trained on [ImageNet](http://www.image-net.org/) from the [ONNX model zoo](https://github.com/onnx/models). The model comes packaged in an archive `tar.gz` file containing an `model.onnx` model file.
```{.python .input}
base_url = "https://s3.amazonaws.com/download.onnx/models/opset_3/"
current_model = "bvlc_googlenet"
model_folder = "model"
archive = "{}.tar.gz".format(current_model)
archive_file = os.path.join(model_folder, archive)
url = "{}{}".format(base_url, archive)
```
Download and extract pre-trained model
```{.python .input}
mx.test_utils.download(url, dirname = model_folder)
if not os.path.isdir(os.path.join(model_folder, current_model)):
print('Extracting model...')
tar = tarfile.open(archive_file, "r:gz")
tar.extractall(model_folder)
tar.close()
print('Extracted')
```
The models have been pre-trained on ImageNet, let's load the label mapping of the 1000 classes.
```{.python .input}
categories = json.load(open(image_net_labels_file, 'r'))
```
## Loading the model into MXNet Gluon
```{.python .input}
onnx_path = os.path.join(model_folder, current_model, "model.onnx")
```
We get the symbol and parameter objects
```{.python .input}
sym, arg_params, aux_params = onnx_mxnet.import_model(onnx_path)
```
We pick a device, CPU is fine for inference, switch to mx.gpu() if you want to use your GPU.
```{.python .input}
device = mx.cpu()
```
We obtain the data names of the inputs to the model by using the model metadata API:
```{.python .input}
model_metadata = onnx_mxnet.get_model_metadata(onnx_path)
print(model_metadata)
```
```
{'output_tensor_data': [(u'gpu_0/softmax_1', (1L, 1000L))],
'input_tensor_data': [(u'gpu_0/data_0', (1L, 3L, 224L, 224L))]}
```
```{.python .input}
data_names = [inputs[0] for inputs in model_metadata.get('input_tensor_data')]
print(data_names)
```
And load them into a MXNet Gluon symbol block.
```{.python .input}
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
net = gluon.nn.SymbolBlock(outputs=sym, inputs=mx.sym.var('data_0'))
net_params = net.collect_params()
for param in arg_params:
if param in net_params:
net_params[param]._load_init(arg_params[param], device=device)
for param in aux_params:
if param in net_params:
net_params[param]._load_init(aux_params[param], device=device)
```
We can now cache the computational graph through [hybridization](https://mxnet.apache.org/versions/master/api/python/docs/tutorials/packages/gluon/blocks/hybridize.html) to gain some performance
```{.python .input}
net.hybridize()
```
We can visualize the network (requires graphviz installed)
```{.python .input}
mx.visualization.plot_network(sym, node_attrs={"shape":"oval","fixedsize":"false"})
```
<!--notebook-skip-line-->
This is a helper function to run M batches of data of batch-size N through the net and collate the outputs into an array of shape (K, 1000) where K=MxN is the total number of examples (mumber of batches x batch-size) run through the network.
```{.python .input}
def run_batch(net, data):
results = []
for batch in data:
outputs = net(batch)
results.extend([o for o in outputs.asnumpy()])
return np.array(results)
```
## Test using real images
```{.python .input}
TOP_P = 3 # How many top guesses we show in the visualization
```
Transform function to set the data into the format the network expects, (N, 3, 224, 224) where N is the batch size.
```{.python .input}
def transform(img):
return np.expand_dims(np.transpose(img, (2,0,1)),axis=0).astype(np.float32)
```
We load two sets of images in memory
```{.python .input}
image_net_images = [plt.imread('{}/{}.jpg'.format(image_folder, path)) for path in ['apron', 'hammerheadshark','dog']]
caltech101_images = [plt.imread('{}/{}.jpg'.format(image_folder, path)) for path in ['wrench', 'dolphin','lotus']]
images = image_net_images + caltech101_images
```
And run them as a batch through the network to get the predictions
```{.python .input}
batch = nd.array(np.concatenate([transform(img) for img in images], axis=0), device=device)
result = run_batch(net, [batch])
```
```{.python .input}
plot_predictions(image_net_images, result[:3], categories, TOP_P)
```
<!--notebook-skip-line-->
**Well done!** Looks like it is doing a pretty good job at classifying pictures when the category is a ImageNet label
Let's now see the results on the 3 other images
```{.python .input}
plot_predictions(caltech101_images, result[3:7], categories, TOP_P)
```
<!--notebook-skip-line-->
**Hmm, not so good...** Even though predictions are close, they are not accurate, which is due to the fact that the ImageNet dataset does not contain `wrench`, `dolphin`, or `lotus` categories and our network has been trained on ImageNet.
Lucky for us, the [Caltech101 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech101/) has them, let's see how we can fine-tune our network to classify these categories correctly.
We show that in our next tutorial:
- [Fine-tuning an ONNX Model using the modern imperative MXNet/Gluon](https://mxnet.apache.org/versions/master/api/python/docs/tutorials/packages/onnx/fine_tuning_gluon.html)
<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
| szha/mxnet | docs/python_docs/python/tutorials/packages/onnx/inference_on_onnx_model.md | Markdown | apache-2.0 | 8,432 |
#!/bin/sh
# $id:$
# $log:$
V_DATABASE=${DATABASE:-"tpcds"} # name of the database to be create
V_DBMS=${DBMS:-"db2"} # dbms flavor to use
# must match xx_setup.sh file name
V_DATA_DIR=${DATA_DIR:-"/data"} # flat file directory
if [ ! -f ${V_DBMS}_setup.sh ]
then
echo "Cannot find setup file for DBMS: ${V_DBMS}_setup.sh"
exit 1
fi
. ${V_DBMS}_setup.sh
connect_to $V_DATABASE
cd temp_build
if [ -f FAILED ]
then
exit
fi
if [ -z "$1" ]
then
activate_constraints tpcds_ri.sql
else
run_query $V_DATABASE tpcds_ri.sql
activate_constraints tpcds_ri.sql
fi
| vitesse-ftian/dgtools | tpcds/tpcds_tools/tests/ri_base_data.sh | Shell | apache-2.0 | 563 |
/*
* author: the5fire
* blog: the5fire.com
* date: 2014-03-16
* */
$(function(){
WEB_SOCKET_SWF_LOCATION = "/static/WebSocketMain.swf";
WEB_SOCKET_DEBUG = true;
var socket = io.connect();
socket.on('connect', function(){
console.log('connected');
});
$(window).bind("beforeunload", function() {
socket.disconnect();
});
var User = Backbone.Model.extend({
urlRoot: '/user',
});
var Topic = Backbone.Model.extend({
urlRoot: '/topic',
});
var Message = Backbone.Model.extend({
urlRoot: '/message',
sync: function(method, model, options) {
if (method === 'create') {
socket.emit('message', model.attributes);
// 错误处理没做
$('#comment').val('');
} else {
return Backbone.sync(method, model, options);
};
},
});
var Topics = Backbone.Collection.extend({
url: '/topic',
model: Topic,
});
var Messages = Backbone.Collection.extend({
url: '/message',
model: Message,
});
var topics = new Topics;
var TopicView = Backbone.View.extend({
tagName: "div class='column'",
templ: _.template($('#topic-template').html()),
// 渲染列表页模板
render: function() {
$(this.el).html(this.templ(this.model.toJSON()));
return this;
},
});
var messages = new Messages;
var MessageView = Backbone.View.extend({
tagName: "div class='comment'",
templ: _.template($('#message-template').html()),
// 渲染列表页模板
render: function() {
$(this.el).html(this.templ(this.model.toJSON()));
return this;
},
});
var AppView = Backbone.View.extend({
el: "#main",
topic_list: $("#topic_list"),
topic_section: $("#topic_section"),
message_section: $("#message_section"),
message_list: $("#message_list"),
message_head: $("#message_head"),
events: {
'click .submit': 'saveMessage',
'click .submit_topic': 'saveTopic',
'keypress #comment': 'saveMessageEvent',
},
initialize: function() {
_.bindAll(this, 'addTopic', 'addMessage');
topics.bind('add', this.addTopic);
// 定义消息列表池,每个topic有自己的message collection
// 这样保证每个主题下得消息不冲突
this.message_pool = {};
this.socket = null;
this.message_list_div = document.getElementById('message_list');
},
addTopic: function(topic) {
var view = new TopicView({model: topic});
this.topic_list.append(view.render().el);
},
addMessage: function(message) {
var view = new MessageView({model: message});
this.message_list.append(view.render().el);
self.message_list.scrollTop(self.message_list_div.scrollHeight);
},
saveMessageEvent: function(evt) {
if (evt.keyCode == 13) {
this.saveMessage(evt);
}
},
saveMessage: function(evt) {
var comment_box = $('#comment')
var content = comment_box.val();
if (content == '') {
alert('内容不能为空');
return false;
}
var topic_id = comment_box.attr('topic_id');
var message = new Message({
content: content,
topic_id: topic_id,
});
var messages = this.message_pool[topic_id];
message.save(); // 依赖上面对sync的重载
},
saveTopic: function(evt) {
var topic_title = $('#topic_title');
if (topic_title.val() == '') {
alert('主题不能为空!');
return false
}
var topic = new Topic({
title: topic_title.val(),
});
self = this;
topic.save(null, {
success: function(model, response, options){
topics.add(response);
topic_title.val('');
},
error: function(model, resp, options) {
alert(resp.responseText);
}
});
},
showTopic: function(){
topics.fetch();
this.topic_section.show();
this.message_section.hide();
this.message_list.html('');
this.goOut()
},
goOut: function(){
// 退出房间
socket.emit('go_out');
socket.removeAllListeners('message');
},
initMessage: function(topic_id) {
var messages = new Messages;
messages.bind('add', this.addMessage);
this.message_pool[topic_id] = messages;
},
showMessage: function(topic_id) {
this.initMessage(topic_id);
this.message_section.show();
this.topic_section.hide();
this.showMessageHead(topic_id);
$('#comment').attr('topic_id', topic_id);
var messages = this.message_pool[topic_id];
// 进入房间
socket.emit('topic', topic_id);
// 监听message事件,添加对话到messages中
socket.on('message', function(response) {
messages.add(response);
});
messages.fetch({
data: {topic_id: topic_id},
success: function(resp) {
self.message_list.scrollTop(self.message_list_div.scrollHeight)
},
error: function(model, resp, options) {
alert(resp.responseText);
}
});
},
showMessageHead: function(topic_id) {
var topic = new Topic({id: topic_id});
self = this;
topic.fetch({
success: function(resp, model, options){
self.message_head.html(model.title);
},
error: function(model, resp, options) {
alert(resp.responseText);
}
});
},
});
var LoginView = Backbone.View.extend({
el: "#login",
wrapper: $('#wrapper'),
events: {
'keypress #login_pwd': 'loginEvent',
'click .login_submit': 'login',
'keypress #reg_pwd_repeat': 'registeEvent',
'click .registe_submit': 'registe',
},
hide: function() {
this.wrapper.hide();
},
show: function() {
this.wrapper.show();
},
loginEvent: function(evt) {
if (evt.keyCode == 13) {
this.login(evt);
}
},
login: function(evt){
var username_input = $('#login_username');
var pwd_input = $('#login_pwd');
var u = new User({
username: username_input.val(),
password: pwd_input.val(),
});
u.save(null, {
url: '/login',
success: function(model, resp, options){
g_user = resp;
// 跳转到index
appRouter.navigate('index', {trigger: true});
},
error: function(model, resp, options) {
alert(resp.responseText);
}
});
},
registeEvent: function(evt) {
if (evt.keyCode == 13) {
this.registe(evt);
}
},
registe: function(evt){
var reg_username_input = $('#reg_username');
var reg_pwd_input = $('#reg_pwd');
var reg_pwd_repeat_input = $('#reg_pwd_repeat');
var u = new User({
username: reg_username_input.val(),
password: reg_pwd_input.val(),
password_repeat: reg_pwd_repeat_input.val(),
});
u.save(null, {
success: function(model, resp, options){
g_user = resp;
// 跳转到index
appRouter.navigate('index', {trigger: true});
},
error: function(model, resp, options) {
alert(resp.responseText);
}
});
},
});
var UserView = Backbone.View.extend({
el: "#user_info",
username: $('#username'),
show: function(username) {
this.username.html(username);
this.$el.show();
},
});
var AppRouter = Backbone.Router.extend({
routes: {
"login": "login",
"index": "index",
"topic/:id" : "topic",
},
initialize: function(){
// 初始化项目, 显示首页
this.appView = new AppView();
this.loginView = new LoginView();
this.userView = new UserView();
this.indexFlag = false;
},
login: function(){
this.loginView.show();
},
index: function(){
if (g_user && g_user.id != undefined) {
this.appView.showTopic();
this.userView.show(g_user.username);
this.loginView.hide();
this.indexFlag = true; // 标志已经到达主页了
}
},
topic: function(topic_id) {
if (g_user && g_user.id != undefined) {
this.appView.showMessage(topic_id);
this.userView.show(g_user.username);
this.loginView.hide();
this.indexFlag = true; // 标志已经到达主页了
}
},
});
var appRouter = new AppRouter();
var g_user = new User;
g_user.fetch({
success: function(model, resp, options){
g_user = resp;
Backbone.history.start({pustState: true});
if(g_user === null || g_user.id === undefined) {
// 跳转到登录页面
appRouter.navigate('login', {trigger: true});
} else if (appRouter.indexFlag == false){
// 跳转到首页
appRouter.navigate('index', {trigger: true});
}
},
error: function(model, resp, options) {
alert(resp.responseText);
}
}); // 获取当前用户
});
| yhbyun/wechat-1 | src/static/js/chat.js | JavaScript | apache-2.0 | 10,736 |
<?php
namespace metastore;
/**
* Autogenerated by Thrift Compiler (0.14.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
use Thrift\Base\TBase;
use Thrift\Type\TType;
use Thrift\Type\TMessageType;
use Thrift\Exception\TException;
use Thrift\Exception\TProtocolException;
use Thrift\Protocol\TProtocol;
use Thrift\Protocol\TBinaryProtocolAccelerated;
use Thrift\Exception\TApplicationException;
class ThriftHiveMetastore_create_wm_pool_args
{
static public $isValidate = false;
static public $_TSPEC = array(
1 => array(
'var' => 'request',
'isRequired' => false,
'type' => TType::STRUCT,
'class' => '\metastore\WMCreatePoolRequest',
),
);
/**
* @var \metastore\WMCreatePoolRequest
*/
public $request = null;
public function __construct($vals = null)
{
if (is_array($vals)) {
if (isset($vals['request'])) {
$this->request = $vals['request'];
}
}
}
public function getName()
{
return 'ThriftHiveMetastore_create_wm_pool_args';
}
public function read($input)
{
$xfer = 0;
$fname = null;
$ftype = 0;
$fid = 0;
$xfer += $input->readStructBegin($fname);
while (true) {
$xfer += $input->readFieldBegin($fname, $ftype, $fid);
if ($ftype == TType::STOP) {
break;
}
switch ($fid) {
case 1:
if ($ftype == TType::STRUCT) {
$this->request = new \metastore\WMCreatePoolRequest();
$xfer += $this->request->read($input);
} else {
$xfer += $input->skip($ftype);
}
break;
default:
$xfer += $input->skip($ftype);
break;
}
$xfer += $input->readFieldEnd();
}
$xfer += $input->readStructEnd();
return $xfer;
}
public function write($output)
{
$xfer = 0;
$xfer += $output->writeStructBegin('ThriftHiveMetastore_create_wm_pool_args');
if ($this->request !== null) {
if (!is_object($this->request)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
}
$xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
$xfer += $this->request->write($output);
$xfer += $output->writeFieldEnd();
}
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
}
}
| sankarh/hive | standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_create_wm_pool_args.php | PHP | apache-2.0 | 2,776 |
/***************************************************************************//**
* \file cyip_crypto.h
*
* \brief
* CRYPTO IP definitions
*
* \note
* Generator version: 1.3.0.1146
* Database revision: rev#1050929
*
********************************************************************************
* \copyright
* Copyright 2016-2018, Cypress Semiconductor Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*******************************************************************************/
#ifndef _CYIP_CRYPTO_H_
#define _CYIP_CRYPTO_H_
#include "cyip_headers.h"
/*******************************************************************************
* CRYPTO
*******************************************************************************/
#define CRYPTO_SECTION_SIZE 0x00010000UL
/**
* \brief Cryptography component (CRYPTO)
*/
typedef struct {
__IOM uint32_t CTL; /*!< 0x00000000 Control */
__IM uint32_t STATUS; /*!< 0x00000004 Status */
__IOM uint32_t RAM_PWRUP_DELAY; /*!< 0x00000008 Power up delay used for SRAM power domain */
__IM uint32_t RESERVED[5];
__IM uint32_t ERROR_STATUS0; /*!< 0x00000020 Error status 0 */
__IOM uint32_t ERROR_STATUS1; /*!< 0x00000024 Error status 1 */
__IM uint32_t RESERVED1[6];
__IOM uint32_t INSTR_FF_CTL; /*!< 0x00000040 Instruction FIFO control */
__IM uint32_t INSTR_FF_STATUS; /*!< 0x00000044 Instruction FIFO status */
__OM uint32_t INSTR_FF_WR; /*!< 0x00000048 Instruction FIFO write */
__IM uint32_t RESERVED2[13];
__IM uint32_t RF_DATA[16]; /*!< 0x00000080 Register-file */
__IM uint32_t RESERVED3[16];
__IOM uint32_t AES_CTL; /*!< 0x00000100 AES control */
__IM uint32_t RESERVED4[31];
__IM uint32_t STR_RESULT; /*!< 0x00000180 String result */
__IM uint32_t RESERVED5[31];
__IOM uint32_t PR_LFSR_CTL0; /*!< 0x00000200 Pseudo random LFSR control 0 */
__IOM uint32_t PR_LFSR_CTL1; /*!< 0x00000204 Pseudo random LFSR control 1 */
__IOM uint32_t PR_LFSR_CTL2; /*!< 0x00000208 Pseudo random LFSR control 2 */
__IM uint32_t RESERVED6;
__IOM uint32_t PR_RESULT; /*!< 0x00000210 Pseudo random result */
__IM uint32_t RESERVED7[27];
__IOM uint32_t TR_CTL0; /*!< 0x00000280 True random control 0 */
__IOM uint32_t TR_CTL1; /*!< 0x00000284 True random control 1 */
__IOM uint32_t TR_RESULT; /*!< 0x00000288 True random result */
__IM uint32_t RESERVED8[5];
__IOM uint32_t TR_GARO_CTL; /*!< 0x000002A0 True random GARO control */
__IOM uint32_t TR_FIRO_CTL; /*!< 0x000002A4 True random FIRO control */
__IM uint32_t RESERVED9[6];
__IOM uint32_t TR_MON_CTL; /*!< 0x000002C0 True random monitor control */
__IM uint32_t RESERVED10;
__IOM uint32_t TR_MON_CMD; /*!< 0x000002C8 True random monitor command */
__IM uint32_t RESERVED11;
__IOM uint32_t TR_MON_RC_CTL; /*!< 0x000002D0 True random monitor RC control */
__IM uint32_t RESERVED12;
__IM uint32_t TR_MON_RC_STATUS0; /*!< 0x000002D8 True random monitor RC status 0 */
__IM uint32_t TR_MON_RC_STATUS1; /*!< 0x000002DC True random monitor RC status 1 */
__IOM uint32_t TR_MON_AP_CTL; /*!< 0x000002E0 True random monitor AP control */
__IM uint32_t RESERVED13;
__IM uint32_t TR_MON_AP_STATUS0; /*!< 0x000002E8 True random monitor AP status 0 */
__IM uint32_t TR_MON_AP_STATUS1; /*!< 0x000002EC True random monitor AP status 1 */
__IM uint32_t RESERVED14[4];
__IOM uint32_t SHA_CTL; /*!< 0x00000300 SHA control */
__IM uint32_t RESERVED15[63];
__IOM uint32_t CRC_CTL; /*!< 0x00000400 CRC control */
__IM uint32_t RESERVED16[3];
__IOM uint32_t CRC_DATA_CTL; /*!< 0x00000410 CRC data control */
__IM uint32_t RESERVED17[3];
__IOM uint32_t CRC_POL_CTL; /*!< 0x00000420 CRC polynomial control */
__IM uint32_t RESERVED18[3];
__IOM uint32_t CRC_LFSR_CTL; /*!< 0x00000430 CRC LFSR control */
__IM uint32_t RESERVED19[3];
__IOM uint32_t CRC_REM_CTL; /*!< 0x00000440 CRC remainder control */
__IM uint32_t RESERVED20;
__IM uint32_t CRC_REM_RESULT; /*!< 0x00000448 CRC remainder result */
__IM uint32_t RESERVED21[13];
__IOM uint32_t VU_CTL0; /*!< 0x00000480 Vector unit control 0 */
__IOM uint32_t VU_CTL1; /*!< 0x00000484 Vector unit control 1 */
__IM uint32_t RESERVED22[2];
__IM uint32_t VU_STATUS; /*!< 0x00000490 Vector unit status */
__IM uint32_t RESERVED23[203];
__IOM uint32_t INTR; /*!< 0x000007C0 Interrupt register */
__IOM uint32_t INTR_SET; /*!< 0x000007C4 Interrupt set register */
__IOM uint32_t INTR_MASK; /*!< 0x000007C8 Interrupt mask register */
__IM uint32_t INTR_MASKED; /*!< 0x000007CC Interrupt masked register */
__IM uint32_t RESERVED24[3596];
__IOM uint32_t MEM_BUFF[4096]; /*!< 0x00004000 Memory buffer */
} CRYPTO_V1_Type; /*!< Size = 32768 (0x8000) */
/* CRYPTO.CTL */
#define CRYPTO_CTL_PWR_MODE_Pos 0UL
#define CRYPTO_CTL_PWR_MODE_Msk 0x3UL
#define CRYPTO_CTL_ENABLED_Pos 31UL
#define CRYPTO_CTL_ENABLED_Msk 0x80000000UL
/* CRYPTO.STATUS */
#define CRYPTO_STATUS_AES_BUSY_Pos 0UL
#define CRYPTO_STATUS_AES_BUSY_Msk 0x1UL
#define CRYPTO_STATUS_DES_BUSY_Pos 1UL
#define CRYPTO_STATUS_DES_BUSY_Msk 0x2UL
#define CRYPTO_STATUS_SHA_BUSY_Pos 2UL
#define CRYPTO_STATUS_SHA_BUSY_Msk 0x4UL
#define CRYPTO_STATUS_CRC_BUSY_Pos 3UL
#define CRYPTO_STATUS_CRC_BUSY_Msk 0x8UL
#define CRYPTO_STATUS_STR_BUSY_Pos 4UL
#define CRYPTO_STATUS_STR_BUSY_Msk 0x10UL
#define CRYPTO_STATUS_PR_BUSY_Pos 5UL
#define CRYPTO_STATUS_PR_BUSY_Msk 0x20UL
#define CRYPTO_STATUS_TR_BUSY_Pos 6UL
#define CRYPTO_STATUS_TR_BUSY_Msk 0x40UL
#define CRYPTO_STATUS_VU_BUSY_Pos 7UL
#define CRYPTO_STATUS_VU_BUSY_Msk 0x80UL
#define CRYPTO_STATUS_CMD_FF_BUSY_Pos 31UL
#define CRYPTO_STATUS_CMD_FF_BUSY_Msk 0x80000000UL
/* CRYPTO.RAM_PWRUP_DELAY */
#define CRYPTO_RAM_PWRUP_DELAY_PWRUP_DELAY_Pos 0UL
#define CRYPTO_RAM_PWRUP_DELAY_PWRUP_DELAY_Msk 0x3FFUL
/* CRYPTO.ERROR_STATUS0 */
#define CRYPTO_ERROR_STATUS0_DATA32_Pos 0UL
#define CRYPTO_ERROR_STATUS0_DATA32_Msk 0xFFFFFFFFUL
/* CRYPTO.ERROR_STATUS1 */
#define CRYPTO_ERROR_STATUS1_DATA23_Pos 0UL
#define CRYPTO_ERROR_STATUS1_DATA23_Msk 0xFFFFFFUL
#define CRYPTO_ERROR_STATUS1_IDX_Pos 24UL
#define CRYPTO_ERROR_STATUS1_IDX_Msk 0x7000000UL
#define CRYPTO_ERROR_STATUS1_VALID_Pos 31UL
#define CRYPTO_ERROR_STATUS1_VALID_Msk 0x80000000UL
/* CRYPTO.INSTR_FF_CTL */
#define CRYPTO_INSTR_FF_CTL_EVENT_LEVEL_Pos 0UL
#define CRYPTO_INSTR_FF_CTL_EVENT_LEVEL_Msk 0x7UL
#define CRYPTO_INSTR_FF_CTL_CLEAR_Pos 16UL
#define CRYPTO_INSTR_FF_CTL_CLEAR_Msk 0x10000UL
#define CRYPTO_INSTR_FF_CTL_BLOCK_Pos 17UL
#define CRYPTO_INSTR_FF_CTL_BLOCK_Msk 0x20000UL
/* CRYPTO.INSTR_FF_STATUS */
#define CRYPTO_INSTR_FF_STATUS_USED_Pos 0UL
#define CRYPTO_INSTR_FF_STATUS_USED_Msk 0xFUL
#define CRYPTO_INSTR_FF_STATUS_EVENT_Pos 16UL
#define CRYPTO_INSTR_FF_STATUS_EVENT_Msk 0x10000UL
#define CRYPTO_INSTR_FF_STATUS_BUSY_Pos 31UL
#define CRYPTO_INSTR_FF_STATUS_BUSY_Msk 0x80000000UL
/* CRYPTO.INSTR_FF_WR */
#define CRYPTO_INSTR_FF_WR_DATA32_Pos 0UL
#define CRYPTO_INSTR_FF_WR_DATA32_Msk 0xFFFFFFFFUL
/* CRYPTO.RF_DATA */
#define CRYPTO_RF_DATA_DATA32_Pos 0UL
#define CRYPTO_RF_DATA_DATA32_Msk 0xFFFFFFFFUL
/* CRYPTO.AES_CTL */
#define CRYPTO_AES_CTL_KEY_SIZE_Pos 0UL
#define CRYPTO_AES_CTL_KEY_SIZE_Msk 0x3UL
/* CRYPTO.STR_RESULT */
#define CRYPTO_STR_RESULT_MEMCMP_Pos 0UL
#define CRYPTO_STR_RESULT_MEMCMP_Msk 0x1UL
/* CRYPTO.PR_LFSR_CTL0 */
#define CRYPTO_PR_LFSR_CTL0_LFSR32_Pos 0UL
#define CRYPTO_PR_LFSR_CTL0_LFSR32_Msk 0xFFFFFFFFUL
/* CRYPTO.PR_LFSR_CTL1 */
#define CRYPTO_PR_LFSR_CTL1_LFSR31_Pos 0UL
#define CRYPTO_PR_LFSR_CTL1_LFSR31_Msk 0x7FFFFFFFUL
/* CRYPTO.PR_LFSR_CTL2 */
#define CRYPTO_PR_LFSR_CTL2_LFSR29_Pos 0UL
#define CRYPTO_PR_LFSR_CTL2_LFSR29_Msk 0x1FFFFFFFUL
/* CRYPTO.PR_RESULT */
#define CRYPTO_PR_RESULT_DATA32_Pos 0UL
#define CRYPTO_PR_RESULT_DATA32_Msk 0xFFFFFFFFUL
/* CRYPTO.TR_CTL0 */
#define CRYPTO_TR_CTL0_SAMPLE_CLOCK_DIV_Pos 0UL
#define CRYPTO_TR_CTL0_SAMPLE_CLOCK_DIV_Msk 0xFFUL
#define CRYPTO_TR_CTL0_RED_CLOCK_DIV_Pos 8UL
#define CRYPTO_TR_CTL0_RED_CLOCK_DIV_Msk 0xFF00UL
#define CRYPTO_TR_CTL0_INIT_DELAY_Pos 16UL
#define CRYPTO_TR_CTL0_INIT_DELAY_Msk 0xFF0000UL
#define CRYPTO_TR_CTL0_VON_NEUMANN_CORR_Pos 24UL
#define CRYPTO_TR_CTL0_VON_NEUMANN_CORR_Msk 0x1000000UL
#define CRYPTO_TR_CTL0_STOP_ON_AP_DETECT_Pos 28UL
#define CRYPTO_TR_CTL0_STOP_ON_AP_DETECT_Msk 0x10000000UL
#define CRYPTO_TR_CTL0_STOP_ON_RC_DETECT_Pos 29UL
#define CRYPTO_TR_CTL0_STOP_ON_RC_DETECT_Msk 0x20000000UL
/* CRYPTO.TR_CTL1 */
#define CRYPTO_TR_CTL1_RO11_EN_Pos 0UL
#define CRYPTO_TR_CTL1_RO11_EN_Msk 0x1UL
#define CRYPTO_TR_CTL1_RO15_EN_Pos 1UL
#define CRYPTO_TR_CTL1_RO15_EN_Msk 0x2UL
#define CRYPTO_TR_CTL1_GARO15_EN_Pos 2UL
#define CRYPTO_TR_CTL1_GARO15_EN_Msk 0x4UL
#define CRYPTO_TR_CTL1_GARO31_EN_Pos 3UL
#define CRYPTO_TR_CTL1_GARO31_EN_Msk 0x8UL
#define CRYPTO_TR_CTL1_FIRO15_EN_Pos 4UL
#define CRYPTO_TR_CTL1_FIRO15_EN_Msk 0x10UL
#define CRYPTO_TR_CTL1_FIRO31_EN_Pos 5UL
#define CRYPTO_TR_CTL1_FIRO31_EN_Msk 0x20UL
/* CRYPTO.TR_RESULT */
#define CRYPTO_TR_RESULT_DATA32_Pos 0UL
#define CRYPTO_TR_RESULT_DATA32_Msk 0xFFFFFFFFUL
/* CRYPTO.TR_GARO_CTL */
#define CRYPTO_TR_GARO_CTL_POLYNOMIAL31_Pos 0UL
#define CRYPTO_TR_GARO_CTL_POLYNOMIAL31_Msk 0x7FFFFFFFUL
/* CRYPTO.TR_FIRO_CTL */
#define CRYPTO_TR_FIRO_CTL_POLYNOMIAL31_Pos 0UL
#define CRYPTO_TR_FIRO_CTL_POLYNOMIAL31_Msk 0x7FFFFFFFUL
/* CRYPTO.TR_MON_CTL */
#define CRYPTO_TR_MON_CTL_BITSTREAM_SEL_Pos 0UL
#define CRYPTO_TR_MON_CTL_BITSTREAM_SEL_Msk 0x3UL
/* CRYPTO.TR_MON_CMD */
#define CRYPTO_TR_MON_CMD_START_AP_Pos 0UL
#define CRYPTO_TR_MON_CMD_START_AP_Msk 0x1UL
#define CRYPTO_TR_MON_CMD_START_RC_Pos 1UL
#define CRYPTO_TR_MON_CMD_START_RC_Msk 0x2UL
/* CRYPTO.TR_MON_RC_CTL */
#define CRYPTO_TR_MON_RC_CTL_CUTOFF_COUNT8_Pos 0UL
#define CRYPTO_TR_MON_RC_CTL_CUTOFF_COUNT8_Msk 0xFFUL
/* CRYPTO.TR_MON_RC_STATUS0 */
#define CRYPTO_TR_MON_RC_STATUS0_BIT_Pos 0UL
#define CRYPTO_TR_MON_RC_STATUS0_BIT_Msk 0x1UL
/* CRYPTO.TR_MON_RC_STATUS1 */
#define CRYPTO_TR_MON_RC_STATUS1_REP_COUNT_Pos 0UL
#define CRYPTO_TR_MON_RC_STATUS1_REP_COUNT_Msk 0xFFUL
/* CRYPTO.TR_MON_AP_CTL */
#define CRYPTO_TR_MON_AP_CTL_CUTOFF_COUNT16_Pos 0UL
#define CRYPTO_TR_MON_AP_CTL_CUTOFF_COUNT16_Msk 0xFFFFUL
#define CRYPTO_TR_MON_AP_CTL_WINDOW_SIZE_Pos 16UL
#define CRYPTO_TR_MON_AP_CTL_WINDOW_SIZE_Msk 0xFFFF0000UL
/* CRYPTO.TR_MON_AP_STATUS0 */
#define CRYPTO_TR_MON_AP_STATUS0_BIT_Pos 0UL
#define CRYPTO_TR_MON_AP_STATUS0_BIT_Msk 0x1UL
/* CRYPTO.TR_MON_AP_STATUS1 */
#define CRYPTO_TR_MON_AP_STATUS1_OCC_COUNT_Pos 0UL
#define CRYPTO_TR_MON_AP_STATUS1_OCC_COUNT_Msk 0xFFFFUL
#define CRYPTO_TR_MON_AP_STATUS1_WINDOW_INDEX_Pos 16UL
#define CRYPTO_TR_MON_AP_STATUS1_WINDOW_INDEX_Msk 0xFFFF0000UL
/* CRYPTO.SHA_CTL */
#define CRYPTO_SHA_CTL_MODE_Pos 0UL
#define CRYPTO_SHA_CTL_MODE_Msk 0x7UL
/* CRYPTO.CRC_CTL */
#define CRYPTO_CRC_CTL_DATA_REVERSE_Pos 0UL
#define CRYPTO_CRC_CTL_DATA_REVERSE_Msk 0x1UL
#define CRYPTO_CRC_CTL_REM_REVERSE_Pos 8UL
#define CRYPTO_CRC_CTL_REM_REVERSE_Msk 0x100UL
/* CRYPTO.CRC_DATA_CTL */
#define CRYPTO_CRC_DATA_CTL_DATA_XOR_Pos 0UL
#define CRYPTO_CRC_DATA_CTL_DATA_XOR_Msk 0xFFUL
/* CRYPTO.CRC_POL_CTL */
#define CRYPTO_CRC_POL_CTL_POLYNOMIAL_Pos 0UL
#define CRYPTO_CRC_POL_CTL_POLYNOMIAL_Msk 0xFFFFFFFFUL
/* CRYPTO.CRC_LFSR_CTL */
#define CRYPTO_CRC_LFSR_CTL_LFSR32_Pos 0UL
#define CRYPTO_CRC_LFSR_CTL_LFSR32_Msk 0xFFFFFFFFUL
/* CRYPTO.CRC_REM_CTL */
#define CRYPTO_CRC_REM_CTL_REM_XOR_Pos 0UL
#define CRYPTO_CRC_REM_CTL_REM_XOR_Msk 0xFFFFFFFFUL
/* CRYPTO.CRC_REM_RESULT */
#define CRYPTO_CRC_REM_RESULT_REM_Pos 0UL
#define CRYPTO_CRC_REM_RESULT_REM_Msk 0xFFFFFFFFUL
/* CRYPTO.VU_CTL0 */
#define CRYPTO_VU_CTL0_ALWAYS_EXECUTE_Pos 0UL
#define CRYPTO_VU_CTL0_ALWAYS_EXECUTE_Msk 0x1UL
/* CRYPTO.VU_CTL1 */
#define CRYPTO_VU_CTL1_ADDR_Pos 14UL
#define CRYPTO_VU_CTL1_ADDR_Msk 0xFFFFC000UL
/* CRYPTO.VU_STATUS */
#define CRYPTO_VU_STATUS_CARRY_Pos 0UL
#define CRYPTO_VU_STATUS_CARRY_Msk 0x1UL
#define CRYPTO_VU_STATUS_EVEN_Pos 1UL
#define CRYPTO_VU_STATUS_EVEN_Msk 0x2UL
#define CRYPTO_VU_STATUS_ZERO_Pos 2UL
#define CRYPTO_VU_STATUS_ZERO_Msk 0x4UL
#define CRYPTO_VU_STATUS_ONE_Pos 3UL
#define CRYPTO_VU_STATUS_ONE_Msk 0x8UL
/* CRYPTO.INTR */
#define CRYPTO_INTR_INSTR_FF_LEVEL_Pos 0UL
#define CRYPTO_INTR_INSTR_FF_LEVEL_Msk 0x1UL
#define CRYPTO_INTR_INSTR_FF_OVERFLOW_Pos 1UL
#define CRYPTO_INTR_INSTR_FF_OVERFLOW_Msk 0x2UL
#define CRYPTO_INTR_TR_INITIALIZED_Pos 2UL
#define CRYPTO_INTR_TR_INITIALIZED_Msk 0x4UL
#define CRYPTO_INTR_TR_DATA_AVAILABLE_Pos 3UL
#define CRYPTO_INTR_TR_DATA_AVAILABLE_Msk 0x8UL
#define CRYPTO_INTR_PR_DATA_AVAILABLE_Pos 4UL
#define CRYPTO_INTR_PR_DATA_AVAILABLE_Msk 0x10UL
#define CRYPTO_INTR_INSTR_OPC_ERROR_Pos 16UL
#define CRYPTO_INTR_INSTR_OPC_ERROR_Msk 0x10000UL
#define CRYPTO_INTR_INSTR_CC_ERROR_Pos 17UL
#define CRYPTO_INTR_INSTR_CC_ERROR_Msk 0x20000UL
#define CRYPTO_INTR_BUS_ERROR_Pos 18UL
#define CRYPTO_INTR_BUS_ERROR_Msk 0x40000UL
#define CRYPTO_INTR_TR_AP_DETECT_ERROR_Pos 19UL
#define CRYPTO_INTR_TR_AP_DETECT_ERROR_Msk 0x80000UL
#define CRYPTO_INTR_TR_RC_DETECT_ERROR_Pos 20UL
#define CRYPTO_INTR_TR_RC_DETECT_ERROR_Msk 0x100000UL
/* CRYPTO.INTR_SET */
#define CRYPTO_INTR_SET_INSTR_FF_LEVEL_Pos 0UL
#define CRYPTO_INTR_SET_INSTR_FF_LEVEL_Msk 0x1UL
#define CRYPTO_INTR_SET_INSTR_FF_OVERFLOW_Pos 1UL
#define CRYPTO_INTR_SET_INSTR_FF_OVERFLOW_Msk 0x2UL
#define CRYPTO_INTR_SET_TR_INITIALIZED_Pos 2UL
#define CRYPTO_INTR_SET_TR_INITIALIZED_Msk 0x4UL
#define CRYPTO_INTR_SET_TR_DATA_AVAILABLE_Pos 3UL
#define CRYPTO_INTR_SET_TR_DATA_AVAILABLE_Msk 0x8UL
#define CRYPTO_INTR_SET_PR_DATA_AVAILABLE_Pos 4UL
#define CRYPTO_INTR_SET_PR_DATA_AVAILABLE_Msk 0x10UL
#define CRYPTO_INTR_SET_INSTR_OPC_ERROR_Pos 16UL
#define CRYPTO_INTR_SET_INSTR_OPC_ERROR_Msk 0x10000UL
#define CRYPTO_INTR_SET_INSTR_CC_ERROR_Pos 17UL
#define CRYPTO_INTR_SET_INSTR_CC_ERROR_Msk 0x20000UL
#define CRYPTO_INTR_SET_BUS_ERROR_Pos 18UL
#define CRYPTO_INTR_SET_BUS_ERROR_Msk 0x40000UL
#define CRYPTO_INTR_SET_TR_AP_DETECT_ERROR_Pos 19UL
#define CRYPTO_INTR_SET_TR_AP_DETECT_ERROR_Msk 0x80000UL
#define CRYPTO_INTR_SET_TR_RC_DETECT_ERROR_Pos 20UL
#define CRYPTO_INTR_SET_TR_RC_DETECT_ERROR_Msk 0x100000UL
/* CRYPTO.INTR_MASK */
#define CRYPTO_INTR_MASK_INSTR_FF_LEVEL_Pos 0UL
#define CRYPTO_INTR_MASK_INSTR_FF_LEVEL_Msk 0x1UL
#define CRYPTO_INTR_MASK_INSTR_FF_OVERFLOW_Pos 1UL
#define CRYPTO_INTR_MASK_INSTR_FF_OVERFLOW_Msk 0x2UL
#define CRYPTO_INTR_MASK_TR_INITIALIZED_Pos 2UL
#define CRYPTO_INTR_MASK_TR_INITIALIZED_Msk 0x4UL
#define CRYPTO_INTR_MASK_TR_DATA_AVAILABLE_Pos 3UL
#define CRYPTO_INTR_MASK_TR_DATA_AVAILABLE_Msk 0x8UL
#define CRYPTO_INTR_MASK_PR_DATA_AVAILABLE_Pos 4UL
#define CRYPTO_INTR_MASK_PR_DATA_AVAILABLE_Msk 0x10UL
#define CRYPTO_INTR_MASK_INSTR_OPC_ERROR_Pos 16UL
#define CRYPTO_INTR_MASK_INSTR_OPC_ERROR_Msk 0x10000UL
#define CRYPTO_INTR_MASK_INSTR_CC_ERROR_Pos 17UL
#define CRYPTO_INTR_MASK_INSTR_CC_ERROR_Msk 0x20000UL
#define CRYPTO_INTR_MASK_BUS_ERROR_Pos 18UL
#define CRYPTO_INTR_MASK_BUS_ERROR_Msk 0x40000UL
#define CRYPTO_INTR_MASK_TR_AP_DETECT_ERROR_Pos 19UL
#define CRYPTO_INTR_MASK_TR_AP_DETECT_ERROR_Msk 0x80000UL
#define CRYPTO_INTR_MASK_TR_RC_DETECT_ERROR_Pos 20UL
#define CRYPTO_INTR_MASK_TR_RC_DETECT_ERROR_Msk 0x100000UL
/* CRYPTO.INTR_MASKED */
#define CRYPTO_INTR_MASKED_INSTR_FF_LEVEL_Pos 0UL
#define CRYPTO_INTR_MASKED_INSTR_FF_LEVEL_Msk 0x1UL
#define CRYPTO_INTR_MASKED_INSTR_FF_OVERFLOW_Pos 1UL
#define CRYPTO_INTR_MASKED_INSTR_FF_OVERFLOW_Msk 0x2UL
#define CRYPTO_INTR_MASKED_TR_INITIALIZED_Pos 2UL
#define CRYPTO_INTR_MASKED_TR_INITIALIZED_Msk 0x4UL
#define CRYPTO_INTR_MASKED_TR_DATA_AVAILABLE_Pos 3UL
#define CRYPTO_INTR_MASKED_TR_DATA_AVAILABLE_Msk 0x8UL
#define CRYPTO_INTR_MASKED_PR_DATA_AVAILABLE_Pos 4UL
#define CRYPTO_INTR_MASKED_PR_DATA_AVAILABLE_Msk 0x10UL
#define CRYPTO_INTR_MASKED_INSTR_OPC_ERROR_Pos 16UL
#define CRYPTO_INTR_MASKED_INSTR_OPC_ERROR_Msk 0x10000UL
#define CRYPTO_INTR_MASKED_INSTR_CC_ERROR_Pos 17UL
#define CRYPTO_INTR_MASKED_INSTR_CC_ERROR_Msk 0x20000UL
#define CRYPTO_INTR_MASKED_BUS_ERROR_Pos 18UL
#define CRYPTO_INTR_MASKED_BUS_ERROR_Msk 0x40000UL
#define CRYPTO_INTR_MASKED_TR_AP_DETECT_ERROR_Pos 19UL
#define CRYPTO_INTR_MASKED_TR_AP_DETECT_ERROR_Msk 0x80000UL
#define CRYPTO_INTR_MASKED_TR_RC_DETECT_ERROR_Pos 20UL
#define CRYPTO_INTR_MASKED_TR_RC_DETECT_ERROR_Msk 0x100000UL
/* CRYPTO.MEM_BUFF */
#define CRYPTO_MEM_BUFF_DATA32_Pos 0UL
#define CRYPTO_MEM_BUFF_DATA32_Msk 0xFFFFFFFFUL
#endif /* _CYIP_CRYPTO_H_ */
/* [] END OF FILE */
| ldts/zephyr | ext/hal/cypress/PDL/3.1.0/devices/psoc6/include/ip/cyip_crypto.h | C | apache-2.0 | 18,869 |
/*
Copyright 2016-present the Material Components for iOS authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#import <CoreGraphics/CoreGraphics.h>
#import <Foundation/Foundation.h>
#import "MDCButton.h"
/**
Shapes for Material Floating buttons.
The mini size should only be used when required for visual continuity with other elements on the
screen.
*/
typedef NS_ENUM(NSInteger, MDCFloatingButtonShape) {
MDCFloatingButtonShapeDefault,
MDCFloatingButtonShapeMini
};
/**
A "floating" MDCButton.
Floating action buttons are circular, float a considerable amount above their parent, have
their own background color, and also raise briefly when touched. Floating action buttons should
only be used rarely, for the main action of a screen.
@see http://www.google.com/design/spec/components/buttons.html#buttons-main-buttons
*/
@interface MDCFloatingButton : MDCButton
/**
Returns a MDCFloatingButton with default colors and the given @c shape.
@param shape Button shape.
@return Button with shape.
*/
+ (nonnull instancetype)floatingButtonWithShape:(MDCFloatingButtonShape)shape;
/**
@return The default floating button size dimension.
*/
+ (CGFloat)defaultDimension;
/**
@return The mini floating button size dimension.
*/
+ (CGFloat)miniDimension;
/**
Initializes self to a button with the given @c shape.
@param frame Button frame.
@param shape Button shape.
@return Button with shape.
*/
- (nonnull instancetype)initWithFrame:(CGRect)frame
shape:(MDCFloatingButtonShape)shape NS_DESIGNATED_INITIALIZER;
/**
Initializes self to a button with the MDCFloatingButtonShapeDefault shape.
@param frame Button frame.
@return Button with MDCFloatingButtonShapeDefault shape.
*/
- (nonnull instancetype)initWithFrame:(CGRect)frame;
- (nullable instancetype)initWithCoder:(nonnull NSCoder *)aDecoder NS_DESIGNATED_INITIALIZER;
#pragma mark - Deprecations
+ (nonnull instancetype)buttonWithShape:(MDCFloatingButtonShape)shape
__deprecated_msg("Use floatingButtonWithShape: instead.");
@end
| chriscox/material-components-ios | demos/Codelabs/BuildingBeautifulApps/ObjectiveC/Starter/Pods/MaterialComponents/components/Buttons/src/MDCFloatingButton.h | C | apache-2.0 | 2,578 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.registry.ui;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.openqa.selenium.Alert;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.NoAlertPresentException;
import org.openqa.selenium.NoSuchElementException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.interactions.Actions;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
import io.github.bonigarcia.wdm.WebDriverManager;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class ITCreateDuplicateBucket {
private WebDriver driver;
private String baseUrl;
private boolean acceptNextAlert = true;
private WebDriverWait wait;
private StringBuffer verificationErrors = new StringBuffer();
@Before
public void setUp() throws Exception {
WebDriverManager.chromedriver().setup();
driver = new ChromeDriver();
baseUrl = "http://localhost:18080/nifi-registry";
wait = new WebDriverWait(driver, 30);
}
@Test
public void testCreateDuplicateBucket() throws Exception {
// go directly to settings by URL
driver.get(baseUrl + "/#/administration/workflow");
// wait for administration route to load
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='no-buckets-message']")));
// confirm new bucket button exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='new-bucket-button']")));
// select new bucket button
WebElement newBucketButton = driver.findElement(By.cssSelector("[data-automation-id='new-bucket-button']"));
newBucketButton.click();
// wait for new bucket dialog
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog")));
// confirm bucket name field exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog input")));
// place cursor in bucket name field
WebElement bucketNameInput = driver.findElement(By.cssSelector("#nifi-registry-admin-create-bucket-dialog input"));
bucketNameInput.clear();
// name the bucket ABC
bucketNameInput.sendKeys("ABC");
// confirm create bucket button exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='create-new-bucket-button']")));
// select create bucket button
WebElement createNewBucketButton = driver.findElement(By.cssSelector("[data-automation-id='create-new-bucket-button']"));
createNewBucketButton.click();
// wait for create bucket dialog to close
wait.until(ExpectedConditions.invisibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog")));
// verify bucket added
List<WebElement> bucketCount = driver.findElements(By.cssSelector("#nifi-registry-workflow-administration-buckets-list-container > div"));
assertEquals(1, bucketCount.size());
// confirm new bucket button exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='new-bucket-button']")));
// select new bucket button
newBucketButton = driver.findElement(By.cssSelector("[data-automation-id='new-bucket-button']"));
newBucketButton.click();
// wait for new bucket dialog
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog")));
// confirm bucket name field exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog input")));
// place cursor in bucket name field
bucketNameInput = wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog input")));
bucketNameInput.clear();
// name the bucket ABC again
bucketNameInput.sendKeys("ABC");
// confirm create bucket button exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='create-new-bucket-button']")));
// select create bucket button
createNewBucketButton = driver.findElement(By.cssSelector("[data-automation-id='create-new-bucket-button']"));
createNewBucketButton.click();
// wait for the new bucket dialog to close
wait.until(ExpectedConditions.invisibilityOfElementLocated(By.cssSelector("#nifi-registry-admin-create-bucket-dialog")));
// wait for error dialog
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("div.cdk-overlay-pane")));
// confirm the duplicate bucket error
WebElement selectOKButton = wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("div.cdk-overlay-pane")));
Actions actions = new Actions(driver);
actions.moveToElement(selectOKButton).click().build().perform();
// wait for the confirm dialog to close
wait.until(ExpectedConditions.invisibilityOfElementLocated(By.cssSelector("div.cdk-overlay-pane")));
// verify bucket ABC still there
bucketCount = driver.findElements(By.cssSelector("#nifi-registry-workflow-administration-buckets-list-container > div"));
assertEquals(1, bucketCount.size());
}
@After
public void tearDown() throws Exception {
// bucket cleanup
// confirm all buckets checkbox exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-workflow-administration-buckets-list-container-column-header div.mat-checkbox-inner-container")));
// select all buckets checkbox
WebElement selectAllCheckbox = driver.findElement(By.cssSelector("#nifi-registry-workflow-administration-buckets-list-container-column-header div.mat-checkbox-inner-container"));
selectAllCheckbox.click();
// confirm actions drop down menu exists
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("#nifi-registry-workflow-administration-perspective-buckets-container button.mat-fds-primary")));
// select actions drop down
WebElement selectActions = driver.findElement(By.cssSelector("#nifi-registry-workflow-administration-perspective-buckets-container button.mat-fds-primary"));
selectActions.click();
// select delete
WebElement selectDeleteBucket = driver.findElement(By.cssSelector("div.mat-menu-content button.mat-menu-item"));
JavascriptExecutor executor = (JavascriptExecutor)driver;
executor.executeScript("arguments[0].click();", selectDeleteBucket);
// verify bucket deleted
WebElement confirmDelete = wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("div.fds-dialog-actions button.mat-fds-warn")));
confirmDelete.click();
wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("[data-automation-id='no-buckets-message']")));
driver.quit();
String verificationErrorString = verificationErrors.toString();
if (!"".equals(verificationErrorString)) {
fail(verificationErrorString);
}
}
private boolean isElementPresent(By by) {
try {
driver.findElement(by);
return true;
} catch (NoSuchElementException e) {
return false;
}
}
private boolean isAlertPresent() {
try {
driver.switchTo().alert();
return true;
} catch (NoAlertPresentException e) {
return false;
}
}
private String closeAlertAndGetItsText() {
try {
Alert alert = driver.switchTo().alert();
String alertText = alert.getText();
if (acceptNextAlert) {
alert.accept();
} else {
alert.dismiss();
}
return alertText;
} finally {
acceptNextAlert = true;
}
}
} | MikeThomsen/nifi | nifi-registry/nifi-registry-core/nifi-registry-web-ui/src/test/java/org/apache/nifi/registry/ui/ITCreateDuplicateBucket.java | Java | apache-2.0 | 9,295 |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.siddhi.core.util.extension.holder;
import org.wso2.siddhi.core.config.ExecutionPlanContext;
import org.wso2.siddhi.core.query.processor.stream.window.WindowProcessor;
public class WindowProcessorExtensionHolder extends AbstractExtensionHolder {
private static WindowProcessorExtensionHolder instance;
private WindowProcessorExtensionHolder(ExecutionPlanContext executionPlanContext) {
super(WindowProcessor.class, executionPlanContext);
}
public static WindowProcessorExtensionHolder getInstance(ExecutionPlanContext executionPlanContext) {
if (instance == null) {
instance = new WindowProcessorExtensionHolder(executionPlanContext);
}
return instance;
}
}
| keizer619/siddhi | modules/siddhi-core/src/main/java/org/wso2/siddhi/core/util/extension/holder/WindowProcessorExtensionHolder.java | Java | apache-2.0 | 1,383 |
package com.vaadin.tests.elements.abstracttextfield;
import com.vaadin.server.VaadinRequest;
import com.vaadin.tests.components.AbstractTestUI;
import com.vaadin.ui.AbstractField;
import com.vaadin.ui.AbstractMultiSelect;
import com.vaadin.ui.AbstractSingleSelect;
import com.vaadin.ui.CheckBox;
import com.vaadin.ui.CheckBoxGroup;
import com.vaadin.ui.ComboBox;
import com.vaadin.ui.DateField;
import com.vaadin.ui.ListSelect;
import com.vaadin.ui.NativeSelect;
import com.vaadin.ui.PasswordField;
import com.vaadin.ui.RadioButtonGroup;
import com.vaadin.ui.RichTextArea;
import com.vaadin.ui.Slider;
import com.vaadin.ui.TextArea;
import com.vaadin.ui.TextField;
import com.vaadin.ui.TwinColSelect;
public class AbstractFieldElementSetValueReadOnly extends AbstractTestUI {
private AbstractField<?>[] fields = { new TextArea(), new TextField(),
new DateField(), new PasswordField(), new CheckBox(),
new RichTextArea(), new Slider() };
private AbstractMultiSelect<?>[] multiSelects = { new ListSelect(),
new CheckBoxGroup(), new TwinColSelect() };
private AbstractSingleSelect<?>[] singleSelects = { new ComboBox(),
new NativeSelect(), new RadioButtonGroup() };
@Override
protected void setup(VaadinRequest request) {
for (AbstractField field : fields) {
field.setReadOnly(true);
addComponent(field);
}
for (AbstractMultiSelect multiSelect : multiSelects) {
multiSelect.setReadOnly(true);
addComponent(multiSelect);
}
for (AbstractSingleSelect singleSelect : singleSelects) {
singleSelect.setReadOnly(true);
addComponent(singleSelect);
}
}
@Override
protected String getTestDescription() {
return "When vaadin element is set ReadOnly, setValue() method should raise an exception";
}
@Override
protected Integer getTicketNumber() {
return 14068;
}
}
| peterl1084/framework | uitest/src/main/java/com/vaadin/tests/elements/abstracttextfield/AbstractFieldElementSetValueReadOnly.java | Java | apache-2.0 | 1,989 |
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.rules.query;
import com.facebook.buck.query.CachingQueryEvaluator;
import com.facebook.buck.query.QueryEvaluator;
import com.facebook.buck.query.QueryException;
import com.facebook.buck.query.QueryExpression;
import com.facebook.buck.rules.TargetGraph;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
/** Cache that evaluates and stores the result of a dependency {@link Query}. */
public class QueryCache {
private final LoadingCache<TargetGraph, CachingQueryEvaluator> evaluators;
public QueryCache() {
evaluators = CacheBuilder.newBuilder().build(CacheLoader.from(CachingQueryEvaluator::new));
}
QueryEvaluator getQueryEvaluator(TargetGraph targetGraph) {
try {
return evaluators.get(targetGraph);
} catch (ExecutionException e) {
throw new RuntimeException("Failed to obtain query evaluator", e);
}
}
@VisibleForTesting
boolean isPresent(TargetGraph targetGraph, GraphEnhancementQueryEnvironment env, Query query)
throws ExecutionException, QueryException {
CachingQueryEvaluator evaluator = evaluators.getIfPresent(targetGraph);
return Objects.nonNull(evaluator)
&& evaluator.isPresent(QueryExpression.parse(query.getQuery(), env));
}
}
| dsyang/buck | src/com/facebook/buck/rules/query/QueryCache.java | Java | apache-2.0 | 2,062 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.wso2.andes.server.management;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.util.Set;
import javax.management.Attribute;
import javax.management.JMException;
import javax.management.MBeanInfo;
import javax.management.MBeanOperationInfo;
import javax.management.MBeanServer;
import javax.management.Notification;
import javax.management.NotificationListener;
import javax.management.ObjectName;
import javax.management.remote.JMXConnectionNotification;
import javax.management.remote.JMXPrincipal;
import javax.management.remote.MBeanServerForwarder;
import javax.security.auth.Subject;
import org.apache.log4j.Logger;
import org.wso2.andes.server.logging.actors.ManagementActor;
import org.wso2.andes.server.logging.messages.ManagementConsoleMessages;
import org.wso2.andes.server.registry.ApplicationRegistry;
import org.wso2.andes.server.security.SecurityManager;
import org.wso2.andes.server.security.access.Operation;
/**
* This class can be used by the JMXConnectorServer as an InvocationHandler for the mbean operations. It delegates
* JMX access decisions to the SecurityPlugin.
*/
public class MBeanInvocationHandlerImpl implements InvocationHandler, NotificationListener
{
private static final Logger _logger = Logger.getLogger(MBeanInvocationHandlerImpl.class);
private final static String DELEGATE = "JMImplementation:type=MBeanServerDelegate";
private MBeanServer _mbs;
private static ManagementActor _logActor;
public static MBeanServerForwarder newProxyInstance()
{
final InvocationHandler handler = new MBeanInvocationHandlerImpl();
final Class<?>[] interfaces = new Class[] { MBeanServerForwarder.class };
_logActor = new ManagementActor(ApplicationRegistry.getInstance().getRootMessageLogger());
Object proxy = Proxy.newProxyInstance(MBeanServerForwarder.class.getClassLoader(), interfaces, handler);
return MBeanServerForwarder.class.cast(proxy);
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable
{
final String methodName = getMethodName(method, args);
if (methodName.equals("getMBeanServer"))
{
return _mbs;
}
if (methodName.equals("setMBeanServer"))
{
if (args[0] == null)
{
throw new IllegalArgumentException("Null MBeanServer");
}
if (_mbs != null)
{
throw new IllegalArgumentException("MBeanServer object already initialized");
}
_mbs = (MBeanServer) args[0];
return null;
}
// Retrieve Subject from current AccessControlContext
AccessControlContext acc = AccessController.getContext();
Subject subject = Subject.getSubject(acc);
try
{
// Allow operations performed locally on behalf of the connector server itself
if (subject == null)
{
return method.invoke(_mbs, args);
}
if (args == null || DELEGATE.equals(args[0]))
{
return method.invoke(_mbs, args);
}
// Restrict access to "createMBean" and "unregisterMBean" to any user
if (methodName.equals("createMBean") || methodName.equals("unregisterMBean"))
{
_logger.debug("User trying to create or unregister an MBean");
throw new SecurityException("Access denied: " + methodName);
}
// Allow querying available object names
if (methodName.equals("queryNames"))
{
return method.invoke(_mbs, args);
}
// Retrieve JMXPrincipal from Subject
Set<JMXPrincipal> principals = subject.getPrincipals(JMXPrincipal.class);
if (principals == null || principals.isEmpty())
{
throw new SecurityException("Access denied: no JMX principal");
}
// Save the subject
SecurityManager.setThreadSubject(subject);
// Get the component, type and impact, which may be null
String type = getType(method, args);
String vhost = getVirtualHost(method, args);
int impact = getImpact(method, args);
// Get the security manager for the virtual host (if set)
SecurityManager security;
if (vhost == null)
{
security = ApplicationRegistry.getInstance().getSecurityManager();
}
else
{
security = ApplicationRegistry.getInstance().getVirtualHostRegistry().getVirtualHost(vhost).getSecurityManager();
}
if (isAccessMethod(methodName) || impact == MBeanOperationInfo.INFO)
{
// Check for read-only method invocation permission
if (!security.authoriseMethod(Operation.ACCESS, type, methodName))
{
throw new SecurityException("Permission denied: Access " + methodName);
}
}
else if (isUpdateMethod(methodName))
{
// Check for setting properties permission
if (!security.authoriseMethod(Operation.UPDATE, type, methodName))
{
throw new SecurityException("Permission denied: Update " + methodName);
}
}
else
{
// Check for invoking/executing method action/operation permission
if (!security.authoriseMethod(Operation.EXECUTE, type, methodName))
{
throw new SecurityException("Permission denied: Execute " + methodName);
}
}
// Actually invoke the method
return method.invoke(_mbs, args);
}
catch (InvocationTargetException e)
{
throw e.getTargetException();
}
}
private String getType(Method method, Object[] args)
{
if (args[0] instanceof ObjectName)
{
ObjectName object = (ObjectName) args[0];
String type = object.getKeyProperty("type");
return type;
}
return null;
}
private String getVirtualHost(Method method, Object[] args)
{
if (args[0] instanceof ObjectName)
{
ObjectName object = (ObjectName) args[0];
String vhost = object.getKeyProperty("VirtualHost");
if(vhost != null)
{
try
{
//if the name is quoted in the ObjectName, unquote it
vhost = ObjectName.unquote(vhost);
}
catch(IllegalArgumentException e)
{
//ignore, this just means the name is not quoted
//and can be left unchanged
}
}
return vhost;
}
return null;
}
private String getMethodName(Method method, Object[] args)
{
String methodName = method.getName();
// if arguments are set, try and work out real method name
if (args != null && args.length >= 1 && args[0] instanceof ObjectName)
{
if (methodName.equals("getAttribute"))
{
methodName = "get" + (String) args[1];
}
else if (methodName.equals("setAttribute"))
{
methodName = "set" + ((Attribute) args[1]).getName();
}
else if (methodName.equals("invoke"))
{
methodName = (String) args[1];
}
}
return methodName;
}
private int getImpact(Method method, Object[] args)
{
//handle invocation of other methods on mbeans
if ((args[0] instanceof ObjectName) && (method.getName().equals("invoke")))
{
//get invoked method name
String mbeanMethod = (args.length > 1) ? (String) args[1] : null;
if (mbeanMethod == null)
{
return -1;
}
try
{
//Get the impact attribute
MBeanInfo mbeanInfo = _mbs.getMBeanInfo((ObjectName) args[0]);
if (mbeanInfo != null)
{
MBeanOperationInfo[] opInfos = mbeanInfo.getOperations();
for (MBeanOperationInfo opInfo : opInfos)
{
if (opInfo.getName().equals(mbeanMethod))
{
return opInfo.getImpact();
}
}
}
}
catch (JMException ex)
{
_logger.error("Unable to determine mbean impact for method : " + mbeanMethod, ex);
}
}
return -1;
}
private boolean isAccessMethod(String methodName)
{
//handle standard get/query/is methods from MBeanServer
return (methodName.startsWith("query") || methodName.startsWith("get") || methodName.startsWith("is"));
}
private boolean isUpdateMethod(String methodName)
{
//handle standard set methods from MBeanServer
return methodName.startsWith("set");
}
public void handleNotification(Notification notification, Object handback)
{
assert notification instanceof JMXConnectionNotification;
// only RMI Connections are serviced here, Local API atta
// rmi://169.24.29.116 guest 3
String[] connectionData = ((JMXConnectionNotification) notification).getConnectionId().split(" ");
String user = connectionData[1];
if (notification.getType().equals(JMXConnectionNotification.OPENED))
{
_logActor.message(ManagementConsoleMessages.OPEN(user));
}
else if (notification.getType().equals(JMXConnectionNotification.CLOSED) ||
notification.getType().equals(JMXConnectionNotification.FAILED))
{
_logActor.message(ManagementConsoleMessages.CLOSE());
}
}
}
| akalankapagoda/andes | modules/andes-core/broker/src/main/java/org/wso2/andes/server/management/MBeanInvocationHandlerImpl.java | Java | apache-2.0 | 11,273 |
//
// UriSection.cs
//
// Authors:
// Gonzalo Paniagua Javier (gonzalo@novell.com)
//
// (c) 2009 Novell, Inc. (http://www.novell.com)
//
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#if CONFIGURATION_DEP
using System.Configuration;
namespace System.Configuration
{
public sealed class UriSection : ConfigurationSection
{
#region Fields
static ConfigurationPropertyCollection properties;
static ConfigurationProperty idn_prop;
static ConfigurationProperty iriParsing_prop;
#endregion // Fields
#region Constructors
static UriSection ()
{
idn_prop = new ConfigurationProperty ("idn", typeof (IdnElement), null);
iriParsing_prop = new ConfigurationProperty ( "iriParsing", typeof (IriParsingElement), null);
properties = new ConfigurationPropertyCollection ();
properties.Add (idn_prop);
properties.Add (iriParsing_prop);
}
public UriSection ()
{
}
#endregion // Constructors
#region Properties
[ConfigurationProperty ("idn")]
public IdnElement Idn {
get { return (IdnElement) base [idn_prop]; }
}
[ConfigurationProperty ("iriParsing")]
public IriParsingElement IriParsing {
get { return (IriParsingElement) base [iriParsing_prop]; }
}
protected override ConfigurationPropertyCollection Properties {
get { return properties; }
}
#endregion // Properties
}
}
#endif
| symplified/Symplified.Auth | lib/mono/mcs/class/System/System.Configuration/UriSection.cs | C# | apache-2.0 | 2,389 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Cauchy distribution"""
__all__ = ['Cauchy']
from numbers import Number
from numpy import nan, pi
from .constraint import Real
from .distribution import Distribution
from .utils import sample_n_shape_converter
from .... import np
class Cauchy(Distribution):
r"""Create a relaxed Cauchy distribution object.
Parameters
----------
loc : Tensor or scalar, default 0
mode or median of the distribution
scale : Tensor or scalar, default 1
half width at half maximum
"""
# pylint: disable=abstract-method
has_grad = True
support = Real()
arg_constraints = {'loc': Real(), 'scale': Real()}
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc = loc
self.scale = scale
super(Cauchy, self).__init__(
event_dim=0, validate_args=validate_args)
@property
def mean(self):
return nan
@property
def variance(self):
return nan
def sample(self, size=None):
# TODO: Implement sampling op in the backend.
# `np.zeros_like` does not support scalar at this moment.
if (isinstance(self.loc, Number), isinstance(self.scale, Number)) == (True, True):
u = np.random.uniform(size=size)
else:
u = np.random.uniform(np.zeros_like( # pylint: disable=too-many-function-args
self.loc + self.scale), size=size)
return self.icdf(u)
def sample_n(self, size=None):
return self.sample(sample_n_shape_converter(size))
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
return (-np.log(pi) - np.log(self.scale) -
np.log(1 + ((value - self.loc) / self.scale) ** 2))
def cdf(self, value):
if self._validate_args:
self._validate_samples(value)
return np.arctan((value - self.loc) / self.scale) / pi + 0.5
def icdf(self, value):
return np.tan(pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return np.log(4 * pi) + np.log(self.scale)
| szha/mxnet | python/mxnet/gluon/probability/distributions/cauchy.py | Python | apache-2.0 | 2,935 |
package org.batfish.datamodel.vendor_family.juniper;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.annotations.VisibleForTesting;
import java.io.Serializable;
import java.util.Collections;
import java.util.SortedMap;
import java.util.TreeMap;
import org.batfish.datamodel.AaaAuthenticationLoginList;
import org.batfish.datamodel.AuthenticationMethod;
import org.batfish.datamodel.Line;
public class JuniperFamily implements Serializable {
private static final String PROP_LINES = "lines";
private static final String PROP_ROOT_AUTHENTICATION_ENCRYPTED_PASSWORD =
"rootAuthenticationEncryptedPassword";
private static final String PROP_SYSTEM_AUTHENTICATION_ORDER = "systemAuthenticationOrder";
private static final String PROP_TACPLUS_SERVERS = "tacplusServers";
@VisibleForTesting public static final String CONSOLE_LINE_NAME = "console";
@VisibleForTesting public static final String AUXILIARY_LINE_NAME = "auxiliary";
private SortedMap<String, Line> _lines;
private String _rootAuthenticationEncryptedPassword;
private AaaAuthenticationLoginList _systemAuthenticationOrder;
private SortedMap<String, TacplusServer> _tacplusServers;
public JuniperFamily() {
_lines = new TreeMap<>();
_tacplusServers = new TreeMap<>();
_systemAuthenticationOrder = // default authentication order is just password authentication
new AaaAuthenticationLoginList(
Collections.singletonList(AuthenticationMethod.PASSWORD), true);
// Juniper has by default the console and aux lines enabled
Line console = new Line(CONSOLE_LINE_NAME);
console.setAaaAuthenticationLoginList(_systemAuthenticationOrder);
_lines.put(CONSOLE_LINE_NAME, console);
Line aux = new Line(AUXILIARY_LINE_NAME);
aux.setAaaAuthenticationLoginList(_systemAuthenticationOrder);
_lines.put(AUXILIARY_LINE_NAME, aux);
}
@JsonProperty(PROP_LINES)
public SortedMap<String, Line> getLines() {
return _lines;
}
@JsonProperty(PROP_ROOT_AUTHENTICATION_ENCRYPTED_PASSWORD)
public String getRootAuthenticationEncryptedPassword() {
return _rootAuthenticationEncryptedPassword;
}
@JsonProperty(PROP_SYSTEM_AUTHENTICATION_ORDER)
public AaaAuthenticationLoginList getSystemAuthenticationOrder() {
return _systemAuthenticationOrder;
}
@JsonProperty(PROP_TACPLUS_SERVERS)
public SortedMap<String, TacplusServer> getTacplusServers() {
return _tacplusServers;
}
@JsonProperty(PROP_LINES)
public void setLines(SortedMap<String, Line> lines) {
_lines = lines;
}
@JsonProperty(PROP_ROOT_AUTHENTICATION_ENCRYPTED_PASSWORD)
public void setRootAuthenticationEncryptedPassword(String rootAuthenticationEncryptedPassword) {
_rootAuthenticationEncryptedPassword = rootAuthenticationEncryptedPassword;
}
@JsonProperty(PROP_SYSTEM_AUTHENTICATION_ORDER)
public void setSystemAuthenticationOrder(AaaAuthenticationLoginList authenticationOrder) {
_systemAuthenticationOrder = authenticationOrder;
}
@JsonProperty(PROP_TACPLUS_SERVERS)
public void setTacplusServers(SortedMap<String, TacplusServer> tacplusServers) {
_tacplusServers = tacplusServers;
}
}
| batfish/batfish | projects/batfish-common-protocol/src/main/java/org/batfish/datamodel/vendor_family/juniper/JuniperFamily.java | Java | apache-2.0 | 3,192 |
package client
import (
"runtime"
"text/template"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
Cli "github.com/docker/docker/cli"
"github.com/docker/docker/dockerversion"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/utils"
)
var versionTemplate = `Client:
Version: {{.Client.Version}}
API version: {{.Client.APIVersion}}
Go version: {{.Client.GoVersion}}
Git commit: {{.Client.GitCommit}}
Built: {{.Client.BuildTime}}
OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}}
Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}}
Server:
Version: {{.Server.Version}}
API version: {{.Server.APIVersion}}
Go version: {{.Server.GoVersion}}
Git commit: {{.Server.GitCommit}}
Built: {{.Server.BuildTime}}
OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}}
Experimental: {{.Server.Experimental}}{{end}}{{end}}`
// CmdVersion shows Docker version information.
//
// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch.
//
// Usage: docker version
func (cli *DockerCli) CmdVersion(args ...string) (err error) {
cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true)
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
cmd.Require(flag.Exact, 0)
cmd.ParseFlags(args, true)
templateFormat := versionTemplate
if *tmplStr != "" {
templateFormat = *tmplStr
}
var tmpl *template.Template
if tmpl, err = template.New("").Funcs(funcMap).Parse(templateFormat); err != nil {
return Cli.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
vd := types.VersionResponse{
Client: &types.Version{
Version: dockerversion.Version,
APIVersion: api.Version,
GoVersion: runtime.Version(),
GitCommit: dockerversion.GitCommit,
BuildTime: dockerversion.BuildTime,
Os: runtime.GOOS,
Arch: runtime.GOARCH,
Experimental: utils.ExperimentalBuild(),
},
}
serverVersion, err := cli.client.ServerVersion()
if err == nil {
vd.Server = &serverVersion
}
// first we need to make BuildTime more human friendly
t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime)
if errTime == nil {
vd.Client.BuildTime = t.Format(time.ANSIC)
}
if vd.ServerOK() {
t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime)
if errTime == nil {
vd.Server.BuildTime = t.Format(time.ANSIC)
}
}
if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil {
err = err2
}
cli.out.Write([]byte{'\n'})
return err
}
| mauidev/docker | api/client/version.go | GO | apache-2.0 | 2,847 |
/* */
"format cjs";
/**
* Determines whether two sequences are equal by comparing the elements pairwise using a specified equality comparer.
*
* @example
* var res = res = source.sequenceEqual([1,2,3]);
* var res = res = source.sequenceEqual([{ value: 42 }], function (x, y) { return x.value === y.value; });
* 3 - res = source.sequenceEqual(Rx.Observable.returnValue(42));
* 4 - res = source.sequenceEqual(Rx.Observable.returnValue({ value: 42 }), function (x, y) { return x.value === y.value; });
* @param {Observable} second Second observable sequence or array to compare.
* @param {Function} [comparer] Comparer used to compare elements of both sequences.
* @returns {Observable} An observable sequence that contains a single element which indicates whether both sequences are of equal length and their corresponding elements are equal according to the specified equality comparer.
*/
observableProto.sequenceEqual = function (second, comparer) {
var first = this;
comparer || (comparer = defaultComparer);
return new AnonymousObservable(function (o) {
var donel = false, doner = false, ql = [], qr = [];
var subscription1 = first.subscribe(function (x) {
var equal, v;
if (qr.length > 0) {
v = qr.shift();
try {
equal = comparer(v, x);
} catch (e) {
o.onError(e);
return;
}
if (!equal) {
o.onNext(false);
o.onCompleted();
}
} else if (doner) {
o.onNext(false);
o.onCompleted();
} else {
ql.push(x);
}
}, function(e) { o.onError(e); }, function () {
donel = true;
if (ql.length === 0) {
if (qr.length > 0) {
o.onNext(false);
o.onCompleted();
} else if (doner) {
o.onNext(true);
o.onCompleted();
}
}
});
(isArrayLike(second) || isIterable(second)) && (second = observableFrom(second));
isPromise(second) && (second = observableFromPromise(second));
var subscription2 = second.subscribe(function (x) {
var equal;
if (ql.length > 0) {
var v = ql.shift();
try {
equal = comparer(v, x);
} catch (exception) {
o.onError(exception);
return;
}
if (!equal) {
o.onNext(false);
o.onCompleted();
}
} else if (donel) {
o.onNext(false);
o.onCompleted();
} else {
qr.push(x);
}
}, function(e) { o.onError(e); }, function () {
doner = true;
if (qr.length === 0) {
if (ql.length > 0) {
o.onNext(false);
o.onCompleted();
} else if (donel) {
o.onNext(true);
o.onCompleted();
}
}
});
return new CompositeDisposable(subscription1, subscription2);
}, first);
};
| cfraz89/moonrock-js-starter | jspm_packages/npm/rx@2.5.3/src/core/linq/observable/sequenceequal.js | JavaScript | apache-2.0 | 3,046 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
/**
* Entries which are stored in data containers. This package contains different implementations of
* entries based on the information needed to store an entry. Certain entries need more information - such as timestamps
* and lifespans, if they are used - than others, and the appropriate implementation is selected dynamically. This
* helps minimize Infinispan's memory requirements without storing unnecessary metadata.
*/
package org.infinispan.container.entries; | nmldiegues/stibt | infinispan/core/src/main/java/org/infinispan/container/entries/package-info.java | Java | apache-2.0 | 1,532 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.