repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
dostodabsi/jasp-desktop | JASP-Desktop/widgets/tablemodelpairsassigned.cpp | 7073 | //
// Copyright (C) 2013-2016 University of Amsterdam
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public
// License along with this program. If not, see
// <http://www.gnu.org/licenses/>.
//
#include "tablemodelpairsassigned.h"
#include "column.h"
#include <QMimeData>
#include <QTimer>
#include <QDebug>
using namespace std;
TableModelPairsAssigned::TableModelPairsAssigned(QObject *parent)
: TableModel(parent)
{
_boundTo = NULL;
_source = NULL;
_variableTypesAllowed = 0;
_variableTypesSuggested = Column::ColumnTypeNominal | Column::ColumnTypeOrdinal | Column::ColumnTypeScale;
}
void TableModelPairsAssigned::bindTo(Option *option)
{
_boundTo = dynamic_cast<OptionVariablesGroups *>(option);
if (_boundTo == NULL)
{
qDebug() << "TableModelPairsAssigned::bindTo(); Could not bind to option";
return;
}
if (_source == NULL)
{
qDebug() << "TableModelPairsAssigned::bindTo(); source not set";
return;
}
beginResetModel();
Terms terms = _boundTo->value();
_values = terms.asQListOfQLists();
endResetModel();
}
void TableModelPairsAssigned::setSource(TableModelVariablesAvailable *source)
{
_source = source;
this->setInfoProvider(source);
}
int TableModelPairsAssigned::rowCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
return _values.size();
}
int TableModelPairsAssigned::columnCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
return 2;
}
QVariant TableModelPairsAssigned::data(const QModelIndex &index, int role) const
{
if ( ! index.isValid())
return QVariant();
if (role == Qt::DisplayRole)
{
const QStringList &row = _values.at(index.row());
return row.at(index.column());
}
return QVariant();
}
Qt::ItemFlags TableModelPairsAssigned::flags(const QModelIndex &index) const
{
if (index.isValid())
return Qt::ItemIsSelectable | Qt::ItemIsEnabled | Qt::ItemIsDragEnabled | Qt::ItemIsDropEnabled;
else
return Qt::ItemIsEnabled | Qt::ItemIsDropEnabled;
}
Qt::DropActions TableModelPairsAssigned::supportedDropActions() const
{
return Qt::CopyAction;
}
Qt::DropActions TableModelPairsAssigned::supportedDragActions() const
{
return Qt::MoveAction;
}
QStringList TableModelPairsAssigned::mimeTypes() const
{
QStringList types;
types << "application/vnd.list.variable";
return types;
}
QMimeData *TableModelPairsAssigned::mimeData(const QModelIndexList &indexes) const
{
Q_UNUSED(indexes);
QMimeData *mimeData = new QMimeData();
QByteArray encodedData;
QDataStream dataStream(&encodedData, QIODevice::WriteOnly);
dataStream << 0;
mimeData->setData("application/vnd.list.variable", encodedData);
return mimeData;
}
bool TableModelPairsAssigned::dropMimeData(const QMimeData *data, Qt::DropAction action, int row, int column, const QModelIndex &parent)
{
if (action == Qt::IgnoreAction)
return true;
if ( ! canDropMimeData(data, action, row, column, parent))
return false;
if (mimeTypes().contains("application/vnd.list.variable"))
{
QByteArray encodedData = data->data("application/vnd.list.variable");
QDataStream stream(&encodedData, QIODevice::ReadOnly);
Terms terms;
terms.set(encodedData);
if (parent.isValid()) // drop into cell
{
QStringList row = _values.at(parent.row());
row.replace(parent.column(), terms.at(0).asQString());
_values.replace(parent.row(), row);
emit dataChanged(parent, parent);
}
else if (row == -1 && _values.size() > 0 && _values.last().last() == "")
{
int row = _values.length() - 1;
int column = _values.at(row).length() - 1;
_values.last().last() = terms.at(0).asQString();
emit dataChanged(index(row, column), index(row, column));
}
else
{
int beginRow;
if (row != -1)
beginRow = row;
else
beginRow = rowCount(QModelIndex());
beginInsertRows(QModelIndex(), beginRow, beginRow);
if (terms.size() == 1)
{
QList<QString> newRow;
newRow.append(terms.at(0).asQString());
newRow.append("");
_values.insert(beginRow, newRow);
}
else
{
QList<QString> newRow;
newRow.append(terms.at(0).asQString());
newRow.append(terms.at(1).asQString());
_values.insert(beginRow, newRow);
}
endInsertRows();
}
assignToOption();
return true;
}
return false;
}
bool TableModelPairsAssigned::canDropMimeData(const QMimeData *data, Qt::DropAction, int row, int column, const QModelIndex &parent) const
{
Q_UNUSED(parent);
Q_UNUSED(row);
Q_UNUSED(column);
if (mimeTypes().contains("application/vnd.list.variable"))
{
QByteArray encodedData = data->data("application/vnd.list.variable");
Terms variables;
variables.set(encodedData);
foreach (const Term &variable, variables)
{
if ( ! isAllowed(variable))
return false;
}
return true;
}
else
{
return false;
}
}
bool TableModelPairsAssigned::isAllowed(const Term &term) const
{
QVariant v = requestInfo(term, VariableInfo::VariableType);
int variableType = v.toInt();
return variableType == 0 || variableType & _variableTypesAllowed;
}
bool TableModelPairsAssigned::insertRows(int row, int count, const QModelIndex &parent)
{
beginInsertRows(parent, row, row + count - 1);
for (int i = 0; i < count; i++)
{
QList<QString> newRow;
newRow.append("");
newRow.append("");
_values.insert(row, newRow);
}
endInsertRows();
return true;
}
void TableModelPairsAssigned::mimeDataMoved(const QModelIndexList &indexes)
{
beginResetModel();
QModelIndexList sorted = indexes;
int lastRowDeleted = -1;
qSort(sorted.begin(), sorted.end(), qGreater<QModelIndex>());
foreach (const QModelIndex &index, sorted)
{
int row = index.row();
if (row != lastRowDeleted)
_values.removeAt(row);
lastRowDeleted = row;
}
endResetModel();
assignToOption();
}
void TableModelPairsAssigned::assignToOption()
{
if (_boundTo != NULL)
{
vector<vector<string> > pairs;
foreach (const QStringList &qPair, _values)
{
vector<string> pair;
pair.push_back(qPair.first().toStdString());
pair.push_back(qPair.at(1).toStdString());
pairs.push_back(pair);
}
_boundTo->setValue(pairs);
}
}
void TableModelPairsAssigned::setVariableTypesSuggested(int variableTypesSuggested)
{
_variableTypesSuggested = variableTypesSuggested;
}
int TableModelPairsAssigned::variableTypesSuggested() const
{
return _variableTypesSuggested;
}
void TableModelPairsAssigned::setVariableTypesAllowed(int variableTypesAllowed)
{
_variableTypesAllowed = variableTypesAllowed;
}
int TableModelPairsAssigned::variableTypesAllowed() const
{
return _variableTypesAllowed;
}
| agpl-3.0 |
freyes/juju | api/backups/info_test.go | 1091 | // Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package backups_test
import (
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
"github.com/juju/juju/api/backups"
apiserverbackups "github.com/juju/juju/apiserver/facades/client/backups"
"github.com/juju/juju/apiserver/params"
)
type infoSuite struct {
baseSuite
}
var _ = gc.Suite(&infoSuite{})
func (s *infoSuite) TestInfo(c *gc.C) {
cleanup := backups.PatchClientFacadeCall(s.client,
func(req string, paramsIn interface{}, resp interface{}) error {
c.Check(req, gc.Equals, "Info")
c.Assert(paramsIn, gc.FitsTypeOf, params.BackupsInfoArgs{})
p := paramsIn.(params.BackupsInfoArgs)
c.Check(p.ID, gc.Equals, "spam")
if result, ok := resp.(*params.BackupsMetadataResult); ok {
*result = apiserverbackups.CreateResult(s.Meta, "test-filename")
} else {
c.Fatalf("wrong output structure")
}
return nil
},
)
defer cleanup()
result, err := s.client.Info("spam")
c.Assert(err, jc.ErrorIsNil)
s.checkMetadataResult(c, result, s.Meta)
}
| agpl-3.0 |
tobiasd/friendica | src/Model/Search.php | 1326 | <?php
/**
* @copyright Copyright (C) 2010-2021, the Friendica project
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
namespace Friendica\Model;
use Friendica\Database\DBA;
/**
* Model for DB specific logic for the search entity
*/
class Search
{
/**
* Returns the list of user defined tags (e.g. #Friendica)
*
* @return array
*
* @throws \Exception
*/
public static function getUserTags()
{
$termsStmt = DBA::p("SELECT DISTINCT(`term`) FROM `search`");
$tags = [];
while ($term = DBA::fetch($termsStmt)) {
$tags[] = trim(mb_strtolower($term['term']), '#');
}
DBA::close($termsStmt);
return $tags;
}
}
| agpl-3.0 |
wlwwt/shopware | themes/Backend/ExtJs/backend/index/controller/main.js | 24405 | /**
* Shopware 5
* Copyright (c) shopware AG
*
* According to our dual licensing model, this program can be used either
* under the terms of the GNU Affero General Public License, version 3,
* or under a proprietary license.
*
* The texts of the GNU Affero General Public License with an additional
* permission and of our proprietary license can be found at and
* in the LICENSE file you have received along with this program.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* "Shopware" is a registered trademark of shopware AG.
* The licensing of the program under the AGPLv3 does not imply a
* trademark license. Therefore any rights, title and interest in
* our trademarks remain entirely with us.
*/
/**
* SHOPWARE UI - Index Controller
*
* This file contains the index application which represents
* the basic backend structure.
*/
//{namespace name=backend/index/controller/main}
//{block name="backend/index/controller/main"}
Ext.define('Shopware.apps.Index.controller.Main', {
extend: 'Ext.app.Controller',
/**
* Creates the necessary event listener for this
* specific controller and opens a new Ext.window.Window
* to display the subapplication
*
* @public
* @return void
*/
init: function() {
var me = this,
firstRunWizardStep = Ext.util.Cookies.get('firstRunWizardStep'),
firstRunWizardEnabled = me.subApplication.firstRunWizardEnabled,
enableInstallationFeedback = me.subApplication.enableInstallationFeedback,
enableBetaFeedback = me.subApplication.enableBetaFeedback,
biOverviewEnabled = me.subApplication.biOverviewEnabled;
if (!firstRunWizardEnabled) {
firstRunWizardStep = 0;
} else if (Ext.isEmpty(firstRunWizardStep)) {
firstRunWizardStep = firstRunWizardEnabled;
}
if (firstRunWizardStep > 0) {
Ext.util.Cookies.set('firstRunWizardStep', firstRunWizardStep);
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.PluginManager',
params: {
hidden: true
}
},
undefined,
function() {
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.FirstRunWizard'
});
}
);
} else {
me.initBackendDesktop();
if (enableInstallationFeedback) {
Ext.Function.defer(function() {
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Feedback',
params: {
installationFeedback: true
}
});
}, 2000);
}
if (enableBetaFeedback && (typeof Storage !== "undefined")) {
var item = window.localStorage.getItem("hideBetaFeedback");
if (!item) {
Ext.Function.defer(function() {
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Feedback',
params: {
previewFeedback: true
}
});
}, 2000);
}
}
/*{if {acl_is_allowed privilege=manage resource=benchmark}}*/
if (biOverviewEnabled) {
Ext.Function.defer(function() {
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Benchmark',
params: {
isTeaser: true
}
});
}, 2000);
}
/* {/if} */
}
},
initBackendDesktop: function() {
var me = this,
mainApp = Shopware.app.Application,
viewport = mainApp.viewport = Ext.create('Shopware.container.Viewport');
/** Create our menu and footer */
me.menu = me.getView('Menu').create();
me.footer = me.getView('Footer').create();
viewport.add(me.menu);
viewport.add(me.footer);
me.addKeyboardEvents();
me.checkLoginStatus();
/*{if {acl_is_allowed privilege=submit resource=benchmark}}*/
if (me.subApplication.biIsActive) {
me.checkBenchmarksStatus();
}
/*{/if}*/
},
/**
* This method provides experimental support
* for shortcuts in the Shopware Backend.
*
* @return void
*/
addKeyboardEvents: function() {
var me = this, map,
msg = Shopware.Notification;
map = new Ext.util.KeyMap(document, [
/*{if {acl_is_allowed privilege=read resource=article}}*/
// New article - CTRL + ALT + N
{
key: 'n',
ctrl: true,
alt: true,
fn: function() {
msg.createGrowlMessage('{s name=title/key_pressed}{/s}', '{s name=content/article_open}{/s}');
openNewModule('Shopware.apps.Article', {
params: {
articleId: null
}
});
}
},
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=articlelist}}*/
// Article overview - CTRL + ALT + O
{
key: "o",
ctrl: true,
alt: true,
fn: function(){
msg.createGrowlMessage('{s name=title/key_pressed}{/s}', '{s name=content/article_overview_open}Article overview module will be opened.{/s}');
openNewModule('Shopware.apps.ArticleList');
}
},
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=order}}*/
// Order overview - CTRL + ALT + B
{
key: "b",
ctrl: true,
alt: true,
fn: function() {
msg.createGrowlMessage('{s name=title/key_pressed}{/s}', '{s name=content/order_open}{/s}');
openNewModule('Shopware.apps.Order');
}
},
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=customer}}*/
// Order overview - CTRL + ALT + K
{
key: "k",
ctrl: true,
alt: true,
fn: function(){
msg.createGrowlMessage('{s name=title/key_pressed}{/s}', '{s name=content/customer_open}{/s}');
openNewModule('Shopware.apps.Customer');
}
},
/*{/if}*/
// Keymap Overview - CTRL + ALT + H
{
key: 'h',
ctrl: true,
alt: true,
fn: function() {
createKeyNavOverlay();
}
},
/*{if {acl_is_allowed privilege=read resource=pluginmanager}}*/
// Plugin Manager - CTRL + ALT + P
{
key: 'p',
ctrl: true,
alt: true,
fn: function() {
msg.createGrowlMessage('{s name=title/key_pressed}{/s}', '{s name=content/plugin_open}{/s}');
openNewModule('Shopware.apps.PluginManager');
}
},
/*{/if}*/
/*{if {acl_is_allowed privilege=clear resource=performance}}*/
// Cache Manager - CTRL + ALT + TFX
{
key: 'tfx',
ctrl: true,
alt: true,
handler: function(keyCode, e) {
switch(keyCode) {
// Frontend Cache - CTRL + ALT + F
case 70: var action = 'Frontend'; break;
// Template Cache - CTRL + ALT + T
case 84: var action = 'Template'; break;
// Config Cache - CTRL + ALT + X
case 88: var action = 'Config'; break;
default: return;
}
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Performance',
action: action
});
}
}
/*{/if}*/
]);
},
/**
* Helper method which checks every 30 seconds wether the user is logged in.
*
* @private
* @return void
*/
checkLoginStatus: function () {
Ext.TaskManager.start({
interval: 30000,
run: function () {
Ext.Ajax.request({
url: '{url controller=login action=getLoginStatus}',
success: function(response) {
var json = Ext.decode(response.responseText);
if(!json.success) {
window.location.href = '{url controller=index}';
}
},
failure: function() {
window.location.href = '{url controller=index}';
}
});
}
});
},
/**
* Helper method which checks for new Benchmark data periodically (every 10 seconds).
*
* @private
* @return void
*/
checkBenchmarksStatus: function () {
var interval = 10000,
checkBenchmarksFn = function () {
Ext.Ajax.request({
url: '{url controller=benchmark action=checkBenchmarks}',
success: function(response) {
var res = Ext.decode(response.responseText);
interval = 10000;
// Set interval to 5 minutes if all data was sent
if (!res.statistics && res.bi) {
interval = 300000;
}
// If we received new BI statistics, we print a growl message
if (res.bi) {
Shopware.Notification.createStickyGrowlMessage({
title: '{s name=title/new_benchmark}{/s}',
text: '{s name=content/new_benchmark}{/s}',
btnDetail: {
text: '{s name=open}{/s}',
callback: function () {
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Benchmark',
params: {
shopId: res.shopId
}
});
}
}
});
}
// If neither sending nor receiving is necessary, set interval to 12 hours
if (!res.statistics && !res.bi && !res.message) {
interval = 43200000;
}
if (!res.success) {
interval = 43200000;
var cur = new Date();
cur.setSeconds(cur.getSeconds() + 900);
Ext.util.Cookies.set('benchmarkWait', '1', cur);
}
window.setTimeout(checkBenchmarksFn, interval);
}
});
};
if (Ext.util.Cookies.get('benchmarkWait')) {
return;
}
window.setTimeout(checkBenchmarksFn, interval);
}
});
Ext.define('Shopware.apps.Index.view.Main', {
extend: 'Ext.panel.Panel',
alias: 'widget.index-desktoppanel',
cls: 'main-backend-holder',
height: '100%',
width: '100%',
border: false,
plain: true,
frame: false,
region: 'center',
layout: 'fit',
bodyStyle: 'background: transparent'
});
/**
* Wrapper methods which allows to open deprecated
* modules in the new ExtJS 4 structure.
*
* Note that this method is only an alias and isn't
* needed for new modules. New modules will be loaded
* with the method Shopware.app.Application.addSubApplication
* or the shorthand openNewModule()
*
* @param [string] module - the module to load
* @param [boolean] forceNewWindow - has no impact
* @param [object] requestConfig - additional params which will passed to the module
* @return void
*/
loadSkeleton = function(module, forceNewWindow, requestConfig) {
var options = { };
options.name = 'Shopware.apps.Deprecated';
options.moduleName = module;
options.requestConfig = requestConfig || {};
Shopware.app.Application.addSubApplication(options);
};
/**
* Wrapper method which loads newer modules. This method
* is mostly used by backend modules which are shipped
* within a plugin
*
* @param [string] controller - the controllername to load
* @return void
*/
openAction = function(controller, action) {
var options = {};
options.name = 'Shopware.apps.Deprecated';
options.controllerName = controller;
options.actionName = action;
Shopware.app.Application.addSubApplication(options);
};
/**
* Initialize a new sub application. This method
* will be used in the future to load new
* backend modules
*
* @param [string] subapp - the complete name of the controller
* @param [object] options - additional options
* @return void
*
* @example openModule('Shopware.apps.Auth')
*/
openNewModule = function(subapp, options) {
options = options || { };
options.name = subapp;
Shopware.app.Application.addSubApplication(options);
};
createKeyNavOverlay = function() {
var store = Ext.create('Ext.data.Store', {
fields: [ 'name', 'key', 'alt', 'ctrl' ],
data: [
/*{if {acl_is_allowed privilege=read resource=article}}*/
{ name: '{s name=title/article}Article{/s}', key: 'n', alt: true , ctrl: true },
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=articlelist}}*/
{ name: '{s name=title/article_overview}Article overview{/s}', key: 'o', alt: true , ctrl: true },
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=order}}*/
{ name: '{s name=title/order}Order{/s}', key: 'b', alt: true , ctrl: true },
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=customer}}*/
{ name: '{s name=title/customer}Customer{/s}', key: 'k', alt: true , ctrl: true },
/*{/if}*/
/*{if {acl_is_allowed privilege=read resource=pluginmanager}}*/
{ name: '{s name=title/plugin_manager}Plugin manager{/s}', key: 'p', alt: true , ctrl: true },
/*{/if}*/
/*{if {acl_is_allowed privilege=clear resource=performance}}*/
{ name: '{s name=title/cache_template}Clear template cache{/s}', key: 't', alt: true , ctrl: true },
{ name: '{s name=title/cache_config}Clear config cache{/s}', key: 'x', alt: true , ctrl: true },
{ name: '{s name=title/cache_frontend}Clear shop cache{/s}', key: 'f', alt: true , ctrl: true }
/*{/if}*/
]
}),
tpl = new Ext.XTemplate(
'{literal}<tpl for=".">',
'<div class="row">',
'<span class="title">{name}:</span>',
'<div class="keys">',
// Ctrl key
'<tpl if="ctrl === true">',
'<span class="sprite-key_ctrl_alternative">ctrl</span>',
'</tpl>',
// Alt key
'<tpl if="alt === true">',
'<span class="key_sep">+</span>',
'<span class="sprite-key_alt_alternative">alt</span>',
'</tpl>',
// Output the actual key
'<span class="key_sep">+</span>',
'<span class="sprite-key_{key}">{key}</span>',
'</div>',
'</div>',
'</tpl>{/literal}'
),
emptyTpl = '<span class="no-shortcuts">{s name=shortcuts/no_shortcuts_acl}Due to your permissions, there are no shortcuts available{/s}</span>',
itemCount = store.totalCount,
dataView = Ext.create('Ext.view.View', {
store: store,
tpl: itemCount ? tpl : emptyTpl
});
var win = Ext.create('Ext.window.Window', {
modal: true,
layout: 'fit',
title: '{s name=title/keyboard_shortcuts}Keyboard shortcuts{/s}',
width: 500,
height: 400,
bodyPadding: 20,
autoScroll: true,
cls: Ext.baseCSSPrefix + 'shortcut-overlay',
items: [ dataView ]
});
win.show();
};
/**
* Proxy method which opens up the specific module
* if the user clicks on an entry in the search result.
*
* @public
* @param [string] module - Name of the module
* @param [integer] id - id of the item
* @return [boolean]
*/
openSearchResult = function(module, id) {
// Force the id to be an integer
id = ~~(1 * id);
// Hide search drop down
Ext.defer(function() {
Shopware.searchField.searchDropDown.hide();
}, 100);
switch(module) {
case 'articles':
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Article',
action: 'detail',
params: {
articleId: id
}
});
break;
case 'customers':
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Customer',
action: 'detail',
params: {
customerId: id
}
});
break;
case 'orders':
Shopware.app.Application.addSubApplication({
name: 'Shopware.apps.Order',
params: {
orderId: id
}
});
break;
default:
break;
}
return false;
};
/**
* Proxy method which just shows a growl like
* message with the current version of Shopware.
*
* @public
* @return void
*/
createShopwareVersionMessage = function() {
var aboutWindow = Ext.create('Ext.window.Window', {
autoShow: true,
unstyled: true,
baseCls: Ext.baseCSSPrefix + 'about-shopware',
layout: 'border',
width: 402,
header: false,
height: 302,
resizable: false,
closable: false,
items: [{
region: 'north',
xtype: 'container',
height: 126,
cls: Ext.baseCSSPrefix + 'about-shopware-header-logo'
}, {
height: 35,
xtype: 'container',
region: 'south',
cls: Ext.baseCSSPrefix + 'about-shopware-footer',
html: '<a href="https://www.shopware.com" target="_blank">{s name=about/footer}Copyright © shopware AG. All rights reserved.{/s}</a>'
}, {
xtype: 'container',
region: 'center',
padding: '15 75',
autoScroll: true,
cls: Ext.baseCSSPrefix + 'about-shopware-content',
html: '<p>' +
'<strong>Shopware {$SHOPWARE_VERSION} {$SHOPWARE_VERSION_TEXT}</strong>' +
'<span>Build Rev {$SHOPWARE_REVISION}</span></p>' +
'{if $product == "CE"}<p><strong>Community Edition under <a href="http://www.gnu.org/licenses/agpl.html" target="_blank">AGPL license</a></strong><span>No support included in this shopware package.</span></p>{else}' +
'<p><strong>{if $product == "PE"}Professional Edition{elseif $product == "PP"}Professional Plus Edition{elseif $product == "EE"}Enterprise Edition{elseif $product == "EB"}Enterprise Business Edition{elseif $product == "EC"}Enterprise Cluster Edition{/if} under commercial / proprietary license</strong><span>See <a href="https://api.shopware.com/gtc/en_GB.html" target="_blank">TOS</a> for details</span></p>{/if}' +
'<p><strong>Shopware 5 uses the following components</strong></p>' +
'<p><strong>Enlight 2</strong><span>BSD License</span><span> Origin: shopware AG</span></p>' +
'<p><strong>Zend Framework</strong><span>New BSD License</span><span> Origin: Zend Technologies</span></p>' +
'<p><strong>ExtJS 4</strong><span>GPL v3 License</span><span> Origin: Sencha Corp.</span></p>' +
'<p>If you want to develop proprietary extensions that makes use of ExtJS (ie extensions that are not licensed under the GNU Affero General Public License, version 3, or a compatible license), you´ll need to license shopware SDK to get the necessary rights for the distribution of your extensions / plugins.</p>' +
'<p><strong>Doctrine 2</strong><span>MIT License</span><span> Origin: http://www.doctrine-project.org/</span></p>' +
'<p><strong>TinyMCE 3</strong><span>LGPL 2.1 License</span><span> Origin: Moxiecode Systems AB.</span></p>' +
'<p><strong>Symfony 3</strong><span>MIT License</span><span> Origin: SensioLabs</span></p>' +
'<p><strong>Smarty 3</strong><span>LGPL 2.1 License</span><span> Origin: New Digital Group, Inc.</span></p>' +
'<p><strong>CodeMirror</strong><span>BSD License</span><span> Origin: http://codemirror.net/</span></p>' +
'<p><strong>MPDF</strong><span>GPL License</span><span> Origin: https://mpdf.github.io</span></p>' +
'<p><strong>FPDF</strong><span>License</span><span> Origin: http://www.fpdf.org/</span></p>' +
'<p><strong>Guzzle</strong><span>MIT License</span><span> Origin: http://guzzlephp.org</span></p>' +
'<p><strong>Less.php</strong><span>Apache-2.0</span><span> Origin: http://lessphp.gpeasy.com</span></p>' +
'<p><strong>Monolog</strong><span>MIT License</span><span> Origin: https://github.com/Seldaek/monolog</span></p>' +
'<p><strong>ElasticSearch</strong><span>LGPL License</span><span> Origin: https://github.com/elastic/elasticsearch-php</span></p>' +
'<p><strong>ongr/elasticsearch-dsl</strong><span>License</span><span> Origin: https://github.com/ongr-io/ElasticsearchDSL</span></p>' +
'<p><strong>egulias/email-validator</strong><span>MIT License</span><span> Origin: https://github.com/egulias/EmailValidator</span></p>' +
'<p><strong>Flysystem</strong><span>MIT License</span><span> Origin: http://flysystem.thephpleague.com</span></p>' +
'<p><strong>paragonie/random_compat</strong><span>MIT License</span><span> Origin: https://github.com/paragonie/random_compat</span></p>' +
'<p><strong>beberlei/assert</strong><span>License</span><span> Origin: https://github.com/beberlei/assert</span></p>' +
"</p>"
}]
});
// Add event listener method closes the about window
Ext.getBody().on('click', function() {
this.destroy();
}, aboutWindow, {
single: true
});
};
//{/block}
| agpl-3.0 |
ivansenic/inspectIT | inspectit.shared.cs/src/main/java/rocks/inspectit/shared/cs/ci/sensor/method/impl/HttpSensorConfig.java | 3370 | package rocks.inspectit.shared.cs.ci.sensor.method.impl;
import java.util.Map;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import rocks.inspectit.shared.all.instrumentation.config.PriorityEnum;
import rocks.inspectit.shared.cs.ci.sensor.StringConstraintSensorConfig;
import rocks.inspectit.shared.cs.ci.sensor.method.IMethodSensorConfig;
/**
* HTTP sensor configuration.
*
* @author Ivan Senic
*
*/
@XmlRootElement(name = "http-sensor-config")
public class HttpSensorConfig extends StringConstraintSensorConfig implements IMethodSensorConfig {
/**
* Sensor name.
*/
public static final String SENSOR_NAME = "HTTP Sensor";
/**
* Implementing class name.
*/
public static final String CLASS_NAME = "rocks.inspectit.agent.java.sensor.method.http.HttpSensor";
/**
* Session capture option.
*/
@XmlAttribute(name = "sessionCapture")
private Boolean sessionCapture = Boolean.FALSE;
/**
* Whether attributes should be captured.
*/
@XmlAttribute(name = "attributesCapture")
private Boolean attributesCapture = Boolean.FALSE;
/**
* Whether parameters should be captured.
*/
@XmlAttribute(name = "parametersCapture")
private Boolean parametersCapture = Boolean.FALSE;
/**
* No-args constructor.
*/
public HttpSensorConfig() {
super(500);
}
/**
* {@inheritDoc}
*/
@Override
public String getName() {
return SENSOR_NAME;
}
/**
* {@inheritDoc}
*/
@Override
public String getClassName() {
return CLASS_NAME;
}
/**
* {@inheritDoc}
*/
@Override
public PriorityEnum getPriority() {
return PriorityEnum.MAX;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isAdvanced() {
return false;
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> getParameters() {
Map<String, Object> parameters = super.getParameters();
if (sessionCapture) {
parameters.put("sessioncapture", "true");
}
if (attributesCapture) {
parameters.put("attributescapture", "true");
}
if (parametersCapture) {
parameters.put("parameterscapture", "true");
}
return parameters;
}
/**
* Gets {@link #sessionCapture}.
*
* @return {@link #sessionCapture}
*/
public boolean isSessionCapture() {
return sessionCapture.booleanValue();
}
/**
* Sets {@link #sessionCapture}.
*
* @param sessionCapture
* New value for {@link #sessionCapture}
*/
public void setSessionCapture(boolean sessionCapture) {
this.sessionCapture = Boolean.valueOf(sessionCapture);
}
/**
* Gets {@link #attributesCapture}.
*
* @return {@link #attributesCapture}
*/
public Boolean isAttributesCapture() {
return this.attributesCapture;
}
/**
* Sets {@link #attributesCapture}.
*
* @param attributesCapture
* New value for {@link #attributesCapture}
*/
public void setAttributesCapture(Boolean attributesCapture) {
this.attributesCapture = attributesCapture;
}
/**
* Gets {@link #parametersCapture}.
*
* @return {@link #parametersCapture}
*/
public Boolean isParametersCapture() {
return this.parametersCapture;
}
/**
* Sets {@link #parametersCapture}.
*
* @param parametersCapture
* New value for {@link #parametersCapture}
*/
public void setParametersCapture(Boolean parametersCapture) {
this.parametersCapture = parametersCapture;
}
}
| agpl-3.0 |
ExpandeNegocio/crm | custom/modulebuilder/packages/ExpandeFran/modules/Franquicia/Dashlets/Expan_FranquiciaDashlet/Expan_FranquiciaDashlet.php | 3203 | <?php
if(!defined('sugarEntry') || !sugarEntry) die('Not A Valid Entry Point');
/*********************************************************************************
* SugarCRM Community Edition is a customer relationship management program developed by
* SugarCRM, Inc. Copyright (C) 2004-2013 SugarCRM Inc.
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License version 3 as published by the
* Free Software Foundation with the addition of the following permission added
* to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK
* IN WHICH THE COPYRIGHT IS OWNED BY SUGARCRM, SUGARCRM DISCLAIMS THE WARRANTY
* OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along with
* this program; if not, see http://www.gnu.org/licenses or write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*
* You can contact SugarCRM, Inc. headquarters at 10050 North Wolfe Road,
* SW2-130, Cupertino, CA 95014, USA. or at email address contact@sugarcrm.com.
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Public License version 3.
*
* In accordance with Section 7(b) of the GNU Affero General Public License version 3,
* these Appropriate Legal Notices must retain the display of the "Powered by
* SugarCRM" logo. If the display of the logo is not reasonably feasible for
* technical reasons, the Appropriate Legal Notices must display the words
* "Powered by SugarCRM".
********************************************************************************/
/*********************************************************************************
* Description: Defines the English language pack for the base application.
* Portions created by SugarCRM are Copyright (C) SugarCRM, Inc.
* All Rights Reserved.
* Contributor(s): ______________________________________..
********************************************************************************/
require_once('include/Dashlets/DashletGeneric.php');
require_once('modules/Expan_Franquicia/Expan_Franquicia.php');
class Expan_FranquiciaDashlet extends DashletGeneric {
function Expan_FranquiciaDashlet($id, $def = null) {
global $current_user, $app_strings;
require('modules/Expan_Franquicia/metadata/dashletviewdefs.php');
parent::DashletGeneric($id, $def);
if(empty($def['title'])) $this->title = translate('LBL_HOMEPAGE_TITLE', 'Expan_Franquicia');
$this->searchFields = $dashletData['Expan_FranquiciaDashlet']['searchFields'];
$this->columns = $dashletData['Expan_FranquiciaDashlet']['columns'];
$this->seedBean = new Expan_Franquicia();
}
} | agpl-3.0 |
LinxHQ/LinxCloud | linxbooks/protected/modules/lbBankAccount/controllers/DefaultController.php | 117 | <?php
class DefaultController extends Controller
{
public function actionIndex()
{
$this->render('index');
}
}
| agpl-3.0 |
admpub/nging | vendor/github.com/admpub/frp/pkg/util/metric/metrics.go | 998 | // Copyright 2020 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// HistogramMetric counts individual observations.
type HistogramMetric interface {
Observe(float64)
}
| agpl-3.0 |
arno01/rainloop-webmail | dev/Model/Filter.js | 7436 |
(function () {
'use strict';
var
_ = require('_'),
ko = require('ko'),
Enums = require('Common/Enums'),
Utils = require('Common/Utils'),
Translator = require('Common/Translator'),
Cache = require('Common/Cache'),
FilterConditionModel = require('Model/FilterCondition'),
AbstractModel = require('Knoin/AbstractModel')
;
/**
* @constructor
*/
function FilterModel()
{
AbstractModel.call(this, 'FilterModel');
this.enabled = ko.observable(true);
this.id = '';
this.name = ko.observable('');
this.name.error = ko.observable(false);
this.name.focused = ko.observable(false);
this.conditions = ko.observableArray([]);
this.conditionsType = ko.observable(Enums.FilterRulesType.Any);
// Actions
this.actionValue = ko.observable('');
this.actionValue.error = ko.observable(false);
this.actionValueSecond = ko.observable('');
this.actionValueThird = ko.observable('');
this.actionMarkAsRead = ko.observable(false);
this.actionKeep = ko.observable(true);
this.actionNoStop = ko.observable(false);
this.actionType = ko.observable(Enums.FiltersAction.MoveTo);
this.actionType.subscribe(function () {
this.actionValue('');
this.actionValue.error(false);
this.actionValueSecond('');
this.actionValueThird('');
}, this);
var fGetRealFolderName = function (sFolderFullNameRaw) {
var oFolder = Cache.getFolderFromCacheList(sFolderFullNameRaw);
return oFolder ? oFolder.fullName.replace(
'.' === oFolder.delimiter ? /\./ : /[\\\/]+/, ' / ') : sFolderFullNameRaw;
};
this.nameSub = ko.computed(function () {
var
sResult = '',
sActionValue = this.actionValue()
;
switch (this.actionType())
{
case Enums.FiltersAction.MoveTo:
sResult = Translator.i18n('SETTINGS_FILTERS/SUBNAME_MOVE_TO', {
'FOLDER': fGetRealFolderName(sActionValue)
});
break;
case Enums.FiltersAction.Forward:
sResult = Translator.i18n('SETTINGS_FILTERS/SUBNAME_FORWARD_TO', {
'EMAIL': sActionValue
});
break;
case Enums.FiltersAction.Vacation:
sResult = Translator.i18n('SETTINGS_FILTERS/SUBNAME_VACATION_MESSAGE');
break;
case Enums.FiltersAction.Reject:
sResult = Translator.i18n('SETTINGS_FILTERS/SUBNAME_REJECT');
break;
case Enums.FiltersAction.Discard:
sResult = Translator.i18n('SETTINGS_FILTERS/SUBNAME_DISCARD');
break;
}
return sResult ? '(' + sResult + ')' : '';
}, this);
this.actionTemplate = ko.computed(function () {
var sTemplate = '';
switch (this.actionType())
{
default:
case Enums.FiltersAction.MoveTo:
sTemplate = 'SettingsFiltersActionMoveToFolder';
break;
case Enums.FiltersAction.Forward:
sTemplate = 'SettingsFiltersActionForward';
break;
case Enums.FiltersAction.Vacation:
sTemplate = 'SettingsFiltersActionVacation';
break;
case Enums.FiltersAction.Reject:
sTemplate = 'SettingsFiltersActionReject';
break;
case Enums.FiltersAction.None:
sTemplate = 'SettingsFiltersActionNone';
break;
case Enums.FiltersAction.Discard:
sTemplate = 'SettingsFiltersActionDiscard';
break;
}
return sTemplate;
}, this);
this.regDisposables(this.conditions.subscribe(Utils.windowResizeCallback));
this.regDisposables(this.name.subscribe(function (sValue) {
this.name.error('' === sValue);
}, this));
this.regDisposables(this.actionValue.subscribe(function (sValue) {
this.actionValue.error('' === sValue);
}, this));
this.regDisposables([this.actionNoStop, this.actionTemplate]);
this.deleteAccess = ko.observable(false);
this.canBeDeleted = ko.observable(true);
}
_.extend(FilterModel.prototype, AbstractModel.prototype);
FilterModel.prototype.generateID = function ()
{
this.id = Utils.fakeMd5();
};
FilterModel.prototype.verify = function ()
{
if ('' === this.name())
{
this.name.error(true);
return false;
}
if (0 < this.conditions().length)
{
if (_.find(this.conditions(), function (oCond) {
return oCond && !oCond.verify();
}))
{
return false;
}
}
if ('' === this.actionValue())
{
if (-1 < Utils.inArray(this.actionType(), [
Enums.FiltersAction.MoveTo,
Enums.FiltersAction.Forward,
Enums.FiltersAction.Reject,
Enums.FiltersAction.Vacation
]))
{
this.actionValue.error(true);
return false;
}
}
if (Enums.FiltersAction.Forward === this.actionType() &&
-1 === this.actionValue().indexOf('@'))
{
this.actionValue.error(true);
return false;
}
this.name.error(false);
this.actionValue.error(false);
return true;
};
FilterModel.prototype.toJson = function ()
{
return {
'ID': this.id,
'Enabled': this.enabled() ? '1' : '0',
'Name': this.name(),
'ConditionsType': this.conditionsType(),
'Conditions': _.map(this.conditions(), function (oItem) {
return oItem.toJson();
}),
'ActionValue': this.actionValue(),
'ActionValueSecond': this.actionValueSecond(),
'ActionValueThird': this.actionValueThird(),
'ActionType': this.actionType(),
'Stop': this.actionNoStop() ? '0' : '1',
'Keep': this.actionKeep() ? '1' : '0',
'MarkAsRead': this.actionMarkAsRead() ? '1' : '0'
};
};
FilterModel.prototype.addCondition = function ()
{
this.conditions.push(new FilterConditionModel());
};
FilterModel.prototype.removeCondition = function (oConditionToDelete)
{
this.conditions.remove(oConditionToDelete);
Utils.delegateRunOnDestroy(oConditionToDelete);
};
FilterModel.prototype.parse = function (oItem)
{
var bResult = false;
if (oItem && 'Object/Filter' === oItem['@Object'])
{
this.id = Utils.pString(oItem['ID']);
this.name(Utils.pString(oItem['Name']));
this.enabled(!!oItem['Enabled']);
this.conditionsType(Utils.pString(oItem['ConditionsType']));
this.conditions([]);
if (Utils.isNonEmptyArray(oItem['Conditions']))
{
this.conditions(_.compact(_.map(oItem['Conditions'], function (aData) {
var oFilterCondition = new FilterConditionModel();
return oFilterCondition && oFilterCondition.parse(aData) ?
oFilterCondition : null;
})));
}
this.actionType(Utils.pString(oItem['ActionType']));
this.actionValue(Utils.pString(oItem['ActionValue']));
this.actionValueSecond(Utils.pString(oItem['ActionValueSecond']));
this.actionValueThird(Utils.pString(oItem['ActionValueThird']));
this.actionNoStop(!oItem['Stop']);
this.actionKeep(!!oItem['Keep']);
this.actionMarkAsRead(!!oItem['MarkAsRead']);
bResult = true;
}
return bResult;
};
FilterModel.prototype.cloneSelf = function ()
{
var oClone = new FilterModel();
oClone.id = this.id;
oClone.enabled(this.enabled());
oClone.name(this.name());
oClone.name.error(this.name.error());
oClone.conditionsType(this.conditionsType());
oClone.actionMarkAsRead(this.actionMarkAsRead());
oClone.actionType(this.actionType());
oClone.actionValue(this.actionValue());
oClone.actionValue.error(this.actionValue.error());
oClone.actionValueSecond(this.actionValueSecond());
oClone.actionValueThird(this.actionValueThird());
oClone.actionKeep(this.actionKeep());
oClone.actionNoStop(this.actionNoStop());
oClone.conditions(_.map(this.conditions(), function (oCondition) {
return oCondition.cloneSelf();
}));
return oClone;
};
module.exports = FilterModel;
}()); | agpl-3.0 |
ankithbti/canvas_lms | public/javascripts/learning_outcomes.js | 23237 | /**
* Copyright (C) 2011 Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
define([
'i18n!learning_outcomes',
'jquery' /* $ */,
'find_outcome',
'jquery.ajaxJSON' /* ajaxJSON */,
'jquery.instructure_forms' /* formSubmit, fillFormData, formErrors */,
'jqueryui/dialog',
'jquery.instructure_misc_helpers' /* replaceTags, /\$\.underscore/ */,
'jquery.instructure_misc_plugins' /* confirmDelete, showIf */,
'jquery.loadingImg' /* loadingImage */,
'jquery.templateData' /* fillTemplateData, getTemplateData */,
'compiled/tinymce',
'tinymce.editor_box' /* editorBox */,
'vendor/jquery.scrollTo' /* /\.scrollTo/ */,
'jqueryui/sortable' /* /\.sortable/ */
], function(I18n, $, find_outcome) {
var outcomes = {
ratingCounter: 0,
updateOutcome: function(outcome, $outcome) {
if(!$outcome || $outcome.length === 0) {
$outcome = $("#outcome_" + outcome.id);
}
if(!$outcome || $outcome.length === 0) {
$outcome = $("#outcome_blank").clone(true).removeAttr('id');
$("#outcomes .outcome_group:first").append($outcome.show());
$("#outcomes .outcome_group:first .child_outcomes").sortable('refresh');
}
outcome.asset_string = $.underscore("learning_outcome_" + outcome.id);
$outcome.find("textarea.description").val(outcome.description);
$outcome.fillTemplateData({
data: outcome,
id: "outcome_" + outcome.id,
htmlValues: ['description'],
hrefValues: ['id']
});
$outcome.addClass('loaded');
$outcome.find(".rubric_criterion .rating:not(.blank)").remove();
if(outcome.data && outcome.data.rubric_criterion) {
for(var idx in outcome.data.rubric_criterion.ratings) {
var rating = outcome.data.rubric_criterion.ratings[idx]
var $rating = $outcome.find(".rubric_criterion .rating.blank:first").clone(true).removeClass('blank');
var jdx = outcomes.ratingCounter++;
$rating.find(".description").text(rating.description);
$rating.find(".points").text(rating.points);
$outcome.find(".add_holder").before($rating.show());
}
$outcome.find(".mastery_points").text(outcome.data.rubric_criterion.mastery_points);
$outcome.find(".points_possible").text(outcome.data.rubric_criterion.points_possible);
}
if(outcome.permissions) {
$outcome.find(".edit_outcome_link").showIf(outcome.permissions.update && for_context);
var for_context = (outcome.context_code == $("#find_outcome_dialog .context_code").text());
$outcome.find(".really_delete_outcome_link").showIf(for_context);
$outcome.find(".remove_outcome_link").showIf(!for_context);
}
return $outcome;
},
sizeRatings: function() {
},
hideEditOutcome: function() {
// remove .prev('.outcome') if id is 'outcome_new'
$("#edit_outcome_form textarea").editorBox('destroy');
var $outcome = $("#outcomes #edit_outcome_form").prev(".learning_outcome");
$("body").append($("#edit_outcome_form").hide());
if($outcome.attr('id') == 'outcome_new') {
$outcome.remove();
} else {
$outcome.show();
}
},
editOutcome: function($outcome, $group) {
// set id to "outcome_new"
if($outcome && $outcome.length > 0 && !$outcome.hasClass('loaded')) {
$outcome.find(".show_details_link").triggerHandler('click', function() {
outcomes.editOutcome($outcome, $group);
});
return;
}
outcomes.hideEditOutcome();
if(!$outcome || $outcome.length === 0) {
$outcome = $("#outcome_blank").clone(true).attr('id', 'outcome_new');
if(!$group || $group.length == 0) {
$group = $("#outcomes .outcome_group:first");
}
$('#outcomes .child_outcomes:first').append($outcome.show());
$group.find('.child_outcomes').sortable('refresh');
}
var $form = $("#edit_outcome_form");
$form.attr('action', $outcome.find(".edit_outcome_link").attr('href'));
$form.attr('method', 'PUT');
if($outcome.attr('id') == 'outcome_new') {
$form.attr('action', $("#outcome_links .add_outcome_url").attr('href'));
$form.attr('method', 'POST');
}
var data = $outcome.getTemplateData({textValues: ['short_description', 'description', 'mastery_points']});
// the OR here is because of a wierdness in chrome where .val() is an
// empty string but .html() is the actual imputed html that we want
data.description = $outcome.find("textarea.description").val() || $outcome.find("textarea.description").html();
$form.fillFormData(data, {object_name: 'learning_outcome'});
$form.find("#outcome_include_rubric_example").attr('checked', true).change();
$form.find(".rubric_criterion .rating:not(.blank)").remove();
$outcome.find(".rubric_criterion .rating:not(.blank)").each(function() {
$form.find("#outcome_include_rubric_example").attr('checked', true);
var $rating = $form.find(".rubric_criterion .rating.blank:first").clone(true).removeClass('blank');
var ratingData = $(this).getTemplateData({textValues: ['description', 'points']});
var idx = outcomes.ratingCounter++;
$rating.find(".outcome_rating_description").val(ratingData.description).attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][description]');
$rating.find(".outcome_rating_points").val(ratingData.points).attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][points]');
$form.find(".add_holder").before($rating.show());
});
$form.find(".mastery_points").val(data.mastery_points);
$form.find("#outcome_include_rubric_example").change();
$outcome.after($form.show());
$outcome.hide();
$form.find(":text:visible:first").focus().select();
$form.find("textarea").editorBox();
},
deleteOutcome: function($outcome) {
$outcome.confirmDelete({
message: I18n.t("remove_learning_outcome", "Are you sure you want to remove this learning outcome?"),
url: $outcome.find(".delete_outcome_link").attr('href'),
success: function() {
$(this).slideUp(function() {
$(this).remove();
});
}
});
},
updateOutcomeGroup: function(group, $group) {
if(!$group || $group.length === 0) {
$group = $("#group_" + group.id);
}
if(!$group || $group.length === 0) {
$group = $("#group_blank").clone(true).removeAttr('id');
$("#outcomes .outcome_group:first").append($group.show());
$("#outcomes .outcome_group:first .child_outcomes").sortable('refresh');
$group.find('.child_outcomes').sortable(outcomes.sortableOptions);
$("#outcomes .outcome_group .child_outcomes").sortable('option', 'connectWith', '.child_outcomes');
}
group.asset_string = $.underscore("learning_outcome_group_" + group.id);
$group.find("textarea.description").val(group.description);
$group.fillTemplateData({
data: group,
id: "group_" + group.id,
hrefValues: ['id'],
htmlValues: ['description']
});
return $group;
},
hideEditOutcomeGroup: function() {
// remove .prev('.group') if id is 'group_new'
$("#edit_outcome_group_form textarea").editorBox('destroy');
var $group = $("#outcomes #edit_outcome_group_form").prev(".outcome_group");
$("body").append($("#edit_outcome_group_form").hide());
if($group.attr('id') == 'group_new') {
$group.remove();
} else {
$group.show();
}
},
editOutcomeGroup: function($group) {
// set id to "outcome_new"
outcomes.hideEditOutcomeGroup();
if(!$group || $group.length === 0) {
$group = $("#group_blank").clone(true).attr('id', 'group_new');
$("#outcomes .outcome_group:first .child_outcomes").sortable('refresh');
$("#outcomes .child_outcomes:first").append($group.show());
$group.find('.child_outcomes').sortable(outcomes.sortableOptions);
$("#outcomes .outcome_group .child_outcomes").sortable('option', 'connectWith', '.child_outcomes');
}
var $form = $("#edit_outcome_group_form");
$form.attr('action', $group.find(".edit_group_link").attr('href'));
$form.attr('method', 'PUT');
if($group.attr('id') == 'group_new') {
$form.attr('action', $("#outcome_links .add_outcome_group_url").attr('href'));
$form.attr('method', 'POST');
}
var data = $group.getTemplateData({textValues: ['title', 'description']});
data.description = $group.find("textarea.description").val();
$form.fillFormData(data, {object_name: 'learning_outcome_group'});
$group.after($form.show());
$group.hide();
$form.find(":text:visible:first").focus().select();
$form.find("textarea").editorBox();
},
deleteOutcomeGroup: function($group) {
$group.confirmDelete({
message: I18n.t("remove_outcome_group", "Are you sure you want to remove this learning outcome group and all its outcomes?"),
url: $group.find(".delete_group_link").attr('href'),
success: function() {
$(this).slideUp(function() {
$(this).remove();
});
}
});
},
sortableOptions: {
axis: 'y',
connectWith: '#outcomes .child_outcomes',
containment: '#outcomes',
handle: '.reorder_link',
update: function(event, ui) {
var $group = $(ui.item).parent().closest('.outcome_group'),
id = $group.children('.header').getTemplateData({ textValues: [ 'asset_string', 'id' ] }).id,
data = {},
url = $.replaceTags($("#outcome_links .reorder_items_url").attr('href'), 'id', id),
assets = $group.children('.child_outcomes').children('.learning_outcome, .outcome_group').map(function(i, el){
return $(el).children('.header').getTemplateData({ textValues: [ 'asset_string', 'id' ] }).asset_string;
});
for (var _i = 0, _max = assets.length; _i < _max; _i++){
data['ordering[' + assets[_i] + ']'] = _i;
}
$.ajaxJSON(url, 'POST', data);
}
}
};
$(document).ready(function() {
$("#outcome_information_link").click(function(event) {
event.preventDefault();
$("#outcome_criterion_dialog").dialog({
title: I18n.t("outcome_criterion", "Learning Outcome Criterion"),
width: 400
});
});
$(".show_details_link,.hide_details_link").click(function(event, callback) {
event.preventDefault();
var $outcome = $(this).closest(".learning_outcome");
if($(this).hasClass('show_details_link')) {
if($outcome.hasClass('loaded')) {
$outcome.addClass('expanded');
} else {
var $link = $(this);
$link.text("loading details...");
var url = $outcome.find("a.show_details_link").attr('href');
$.ajaxJSON(url, 'GET', {}, function(data) {
$link.text(I18n.t("show_details", "show details"));
outcomes.updateOutcome(data.learning_outcome, $outcome);
$outcome.addClass('expanded');
if(callback && $.isFunction(callback)) {
callback();
}
}, function(data) {
$link.text(I18n.t("details_failed_to_load", "details failed to load, please try again"));
});
}
} else {
$outcome.removeClass('expanded');
}
});
$('#outcomes .child_outcomes').sortable(outcomes.sortableOptions);
$(".delete_group_link").click(function(event) {
event.preventDefault();
outcomes.deleteOutcomeGroup($(this).closest(".outcome_group"));
});
$(".edit_group_link").click(function(event) {
event.preventDefault();
outcomes.editOutcomeGroup($(this).closest(".outcome_group"));
});
$("#find_outcome_dialog .select_outcomes_link").click(function(event) {
event.preventDefault();
$("#find_outcome_dialog .select_outcome_checkbox:checked").each(function() {
var $outcome_select = $(this).parents(".outcomes_dialog_select");
var id = $outcome_select.getTemplateData({textValues: ['id']}).id;
var $outcome = $("#outcome_dialog_" + id);
var id = $outcome.getTemplateData({textValues: ['id']}).id;
var group_id = $("#outcomes .outcome_group:first > .header").getTemplateData({textValues: ['id']}).id;
var url = $.replaceTags($("#find_outcome_dialog .add_outcome_url").attr('href'), 'learning_outcome_id', id);
url = $.replaceTags(url, 'learning_outcome_group_id', group_id);
var data = $outcome.getTemplateData({textValues: ['id', 'short_description', 'description']});
data.permissions = {};
var $outcome = outcomes.updateOutcome(data);
$("html,body").scrollTo($outcome);
$outcome.loadingImage();
$("#find_outcome_dialog").dialog('close');
$.ajaxJSON(url, 'POST', {}, function(data) {
$outcome.loadingImage('remove');
outcomes.updateOutcome(data.learning_outcome);
}, function() {
$outcome.loadingImage('remove');
$outcome.remove();
});
});
});
$(".edit_outcome_link").click(function(event) {
event.preventDefault();
outcomes.editOutcome($(this).parents(".learning_outcome"));
});
$(".delete_outcome_link").click(function(event) {
event.preventDefault();
outcomes.deleteOutcome($(this).parents(".learning_outcome"));
});
$(".add_outcome_link").click(function(event) {
event.preventDefault();
var $group = $(this).closest(".outcome_group");
if($group.length == 0) { $group = null; }
outcomes.editOutcome(null, $group);
});
$(".add_outcome_group_link").click(function(event) {
event.preventDefault();
outcomes.editOutcomeGroup();
});
$("#edit_outcome_group_form .cancel_button").click(function(event) {
outcomes.hideEditOutcomeGroup();
});
$("#edit_outcome_form .cancel_button").click(function(event) {
outcomes.hideEditOutcome();
});
$("#find_outcome_dialog .outcomes_dialog_select").click(function(event) {
if($(event.target).closest("input").length > 0) { return; }
event.preventDefault();
$("#find_outcome_dialog .outcomes_dialog_select.selected_side_tab").removeClass('selected_side_tab');
$(this).addClass('selected_side_tab');
var id = $(this).getTemplateData({textValues: ['id']}).id;
$("#find_outcome_dialog").find(".outcomes_dialog_outcome").hide().end()
.find("#outcome_dialog_" + id).show();
});
$(".find_outcome_link").click(function(event) {
var $dialog = $("#find_outcome_dialog");
event.preventDefault();
$dialog.dialog({
width: 600,
height: 350,
title: I18n.t("find_existing_outcome", 'Find Existing Outcome')
});
if(!$dialog.hasClass('loaded')) {
$dialog.find(".loading_message").text(I18n.t("loading_outcomes", "Loading outcomes..."));
var url = $dialog.find(".outcomes_url").attr('href');
$.ajaxJSON(url, 'GET', {}, function(data) {
$dialog.find(".loading_message").remove();
if(data.length === 0) {
$dialog.find(".loading_message").text("No outcomes found");
}
for(var idx in data) {
var outcome = data[idx].learning_outcome
var $outcome_select = $dialog.find(".outcomes_dialog_select.blank:first").clone(true);
$outcome_select.fillTemplateData({data: outcome}).removeClass('blank');
$dialog.find(".outcomes_dialog_outcomes_select").append($outcome_select.show());
var $outcome = $dialog.find(".outcomes_dialog_outcome.blank:first").clone(true);
$outcome.removeClass('blank');
$outcome.data('outcome', outcome);
$outcome.find(".criterion.blank").hide();
outcome.outcome_total = outcome.points_possible;
$outcome.fillTemplateData({
data: outcome,
htmlValues: ['description'],
id: 'outcome_dialog_' + outcome.id
});
$dialog.find(".outcomes_dialog_outcomes").append($outcome);
}
$dialog.find(".outcomes_dialog_holder").show();
$dialog.find(".outcomes_dialog_outcomes_select .outcomes_dialog_select:visible:first").click();
$dialog.addClass('loaded');
}, function(data) {
$dialog.find(".loading_message").text(I18n.t("loading_outcomes_failed", "Loading outcomes failed, please try again"));
});
}
});
$("#edit_outcome_form").formSubmit({
processData: function(data) {
data['learning_outcome_group_id'] = $(this).closest(".outcome_group").find(".header").first().getTemplateData({textValues: ['id']}).id;
return data;
},
beforeSubmit: function(data) {
var $outcome = $(this).prev(".outcome");
if($outcome.attr('id') == 'outcome_new') {
$outcome.attr('id', 'outcome_adding');
}
$(this).loadingImage();
},
success: function(data) {
$(this).loadingImage('remove');
outcomes.updateOutcome(data.learning_outcome, $(this).prev(".learning_outcome"));
outcomes.hideEditOutcome();
},
error: function(data) {
$(this).loadingImage('remove');
$(this).formErrors(data);
}
});
$("#edit_outcome_group_form").formSubmit({
processData: function(data) {
var group_id = $(this).parent().closest(".outcome_group").children(".header").getTemplateData({textValues: ['id']}).id;
data['learning_outcome_group[learning_outcome_group_id]'] = group_id;
return data;
},
beforeSubmit: function(data) {
var $group = $(this).prev(".outcome_group");
if($group.attr('id') == 'group_new') {
$group.attr('id', 'group_adding');
}
$(this).loadingImage();
},
success: function(data) {
$(this).loadingImage('remove');
outcomes.updateOutcomeGroup(data.learning_outcome_group, $(this).prev(".outcome_group"));
outcomes.hideEditOutcomeGroup();
},
error: function(data) {
$(this).loadingImage('remove');
$(this).formErrors(data);
}
});
$("#edit_outcome_form .switch_views_link").click(function(event) {
event.preventDefault();
$("#edit_outcome_form textarea:first").editorBox('toggle');
});
$("#outcome_include_rubric_example").change(function() {
var $form = $(this).parents("form");
$form.find(".rubric_criterion").showIf($(this).attr('checked'));
$form.find(".outcome_rating_points:first").blur();
if(!$form.find(".outcome_criterion_title").val()) {
$form.find(".outcome_criterion_title").val($form.find(".outcome_short_description").val());
}
if($form.find(".rating:not(.blank)").length === 0) {
var $rating = $form.find(".rating.blank:first").clone(true).removeClass('blank');
var idx = outcomes.ratingCounter++;
$rating.find(".outcome_rating_description").val(I18n.t("criteria.exceeds_expectations", "Exceeds Expectations")).attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][description]');
$rating.find(".outcome_rating_points").val("5").attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][points]');
$form.find(".add_holder").before($rating.show());
idx = outcomes.ratingCounter++;
$rating = $form.find(".rating.blank:first").clone(true).removeClass('blank');
$rating.find(".outcome_rating_description").val(I18n.t("criteria.meets_expectations", "Meets Expectations")).attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][description]');
$rating.find(".outcome_rating_points").val("3").attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][points]');
$form.find(".add_holder").before($rating.show());
idx = outcomes.ratingCounter++;
$rating = $form.find(".rating.blank:first").clone(true).removeClass('blank');
$rating.find(".outcome_rating_description").val(I18n.t("criteria.does_not_meet_expectations", "Does Not Meet Expectations")).attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][description]');
$rating.find(".outcome_rating_points").val("0").attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][points]');
$form.find(".add_holder").before($rating.show());
$form.find(".mastery_points").val("3");
}
$form.find(".outcome_rating_points:first").blur();
});
$("#edit_outcome_form .outcome_rating_points").blur(function() {
var maxPoints = 0;
$(this).val(parseFloat($(this).val()));
$("#edit_outcome_form .rating:not(.blank) .outcome_rating_points").each(function() {
var points = parseFloat($(this).val(), 10);
if(points) {
maxPoints = Math.max(points, maxPoints);
}
});
$("#edit_outcome_form .points_possible").text(maxPoints);
})
$("#edit_outcome_form .mastery_points").blur(function() {
$(this).val(parseFloat($(this).val()) || 0);
});
$("#edit_outcome_form .add_rating_link").click(function(event) {
event.preventDefault();
var $rating = $(this).parents("table").find("tr.rating:visible:first").clone(true).removeClass('blank');
if($rating.length === 0) {
$rating = $(this).parents("table").find("tr.rating.blank").clone(true).removeClass('blank');
}
$(this).parents("table").find(".criterion_title").after($rating.show());
var idx = outcomes.ratingCounter++;
$rating.find(".outcome_rating_description").attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][description]');
$rating.find(".outcome_rating_points").attr('name', 'learning_outcome[rubric_criterion][ratings][' + idx + '][points]');
$rating.find(".outcome_rating_points").val(parseFloat($rating.find(".outcome_rating_points").val(), 10) + 1);
$rating.find(".outcome_rating_points:first").blur();
outcomes.sizeRatings();
});
$("#edit_outcome_form .delete_rating_link").click(function(event) {
event.preventDefault();
$(this).parents("tr").remove();
outcomes.sizeRatings();
});
});
});
| agpl-3.0 |
rbramwell/runbook | src/web/templates/monitors/http-post.js | 456 | <script type="text/javascript">
$(document).ready(function() {
$.each([
'#http-post-url',
'#http-post-host',
'#http-post-payload',
'#http-post-extra_headers',
'#http-post-response_regex',
'#http-post-response_headers',
], function(_, value) {
$(value).popover({
placement: 'auto bottom',
container: 'body',
trigger: 'click focus'
});
});
});
</script>
| agpl-3.0 |
JimMackin/SuiteCRM | modules/Contacts/ContactsJjwg_MapsLogicHook.php | 811 | <?php
// custom/modules/Contacts/ContactsJjwg_MapsLogicHook.php
if (!defined('sugarEntry') || !sugarEntry) {
die('Not A Valid Entry Point');
}
class ContactsJjwg_MapsLogicHook
{
public $jjwg_Maps;
public function __construct()
{
$this->jjwg_Maps = get_module_info('jjwg_Maps');
}
public function updateGeocodeInfo(&$bean, $event, $arguments)
{
// before_save
if ($this->jjwg_Maps->settings['logic_hooks_enabled']) {
$this->jjwg_Maps->updateGeocodeInfo($bean);
}
}
public function updateRelatedMeetingsGeocodeInfo(&$bean, $event, $arguments)
{
// after_save
if ($this->jjwg_Maps->settings['logic_hooks_enabled']) {
$this->jjwg_Maps->updateRelatedMeetingsGeocodeInfo($bean);
}
}
}
| agpl-3.0 |
kaltura/server | api_v3/lib/types/enums/KalturaBaseEntryCloneOptions.php | 257 | <?php
/**
* @package api
* @subpackage enum
*/
class KalturaBaseEntryCloneOptions extends KalturaDynamicEnum implements BaseEntryCloneOptions
{
public static function getEnumClass()
{
return 'BaseEntryCloneOptions';
}
}
| agpl-3.0 |
marwoodandrew/superdesk-client-core | scripts/apps/desks/controllers/DeskListController.ts | 1718 | import _ from 'lodash';
DeskListController.$inject = ['$scope', 'desks', 'superdesk', 'privileges', 'tasks', 'api', 'betaService'];
export function DeskListController($scope, desks, superdesk, privileges, tasks, api, beta) {
var userDesks;
function sorted(result) {
var items = result._items || [];
items.sort(compareNames);
return items;
function compareNames(a, b) {
return a.name.localeCompare(b.name);
}
}
desks.initialize()
.then(() => {
$scope.desks = desks.desks;
$scope.deskStages = desks.deskStages;
desks.fetchCurrentUserDesks().then((deskList) => {
userDesks = deskList;
});
});
$scope.statuses = tasks.statuses;
$scope.online_users = false;
api('roles').query()
.then((result) => {
$scope.roles = sorted(result);
});
$scope.privileges = privileges.privileges;
beta.isBeta().then((isBeta) => {
var views = ['content', 'users', 'sluglines'];
if (isBeta) {
views = ['content', 'tasks', 'users', 'sluglines'];
}
$scope.$applyAsync(() => {
$scope.views = views;
$scope.view = $scope.views[0];
});
});
$scope.setView = function(view) {
$scope.view = view;
};
$scope.changeOnlineUsers = function(value) {
$scope.online_users = value;
};
$scope.isMemberOf = function(desk) {
return _.find(userDesks, {_id: desk._id}) !== null;
};
$scope.openDeskView = function(desk, target) {
desks.setCurrentDeskId(desk._id);
superdesk.intent('view', target);
};
}
| agpl-3.0 |
Khamull/CommunityServer | module/ASC.Mail.Server/DnsCheckerTests/DnsCheckerTests.cs | 2880 | /*
*
* (c) Copyright Ascensio System Limited 2010-2015
*
* This program is freeware. You can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) version 3 as published by the Free Software Foundation (https://www.gnu.org/copyleft/gpl.html).
* In accordance with Section 7(a) of the GNU GPL its Section 15 shall be amended to the effect that
* Ascensio System SIA expressly excludes the warranty of non-infringement of any third-party rights.
*
* THIS PROGRAM IS DISTRIBUTED WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR
* FITNESS FOR A PARTICULAR PURPOSE. For more details, see GNU GPL at https://www.gnu.org/copyleft/gpl.html
*
* You can contact Ascensio System SIA by email at sales@onlyoffice.com
*
* The interactive user interfaces in modified source and object code versions of ONLYOFFICE must display
* Appropriate Legal Notices, as required under Section 5 of the GNU GPL version 3.
*
* Pursuant to Section 7 § 3(b) of the GNU GPL you must retain the original ONLYOFFICE logo which contains
* relevant author attributions when distributing the software. If the display of the logo in its graphic
* form is not reasonably feasible for technical reasons, you must include the words "Powered by ONLYOFFICE"
* in every copy of the program you distribute.
* Pursuant to Section 7 § 3(e) we decline to grant you any rights under trademark law for use of our trademarks.
*
*/
using ASC.Mail.Server.DnsChecker;
using NUnit.Framework;
namespace DnsCheckerTests
{
[TestFixture]
public class DnsCheckerTests
{
private const string DOMAIN = "teamlab.io";
[Test]
public void CheckMxExistance()
{
Assert.IsTrue(DnsChecker.IsMxRecordCorrect(DOMAIN, "mx2.teamlab.info"));
}
[Test]
public void CheckMxNonExistance()
{
Assert.IsFalse(DnsChecker.IsMxRecordCorrect(DOMAIN, "test"));
}
[Test]
public void CheckSpfExistance()
{
Assert.IsTrue(DnsChecker.IsTxtRecordCorrect(DOMAIN, "v=spf1 +mx ~all"));
}
[Test]
public void CheckSpfNonExistance()
{
Assert.IsFalse(DnsChecker.IsTxtRecordCorrect(DOMAIN, "test"));
}
[Test]
public void CheckDkimExistance()
{
Assert.IsTrue(DnsChecker.IsTxtRecordCorrect("dkim._domainkey." + DOMAIN, "k=rsa; p=MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgLC1+u4z0rZV7tfQm595DXrZ1CDKPgAuIgqg619Li160wvInhgcX5JGjxofoiRS0A6y7SDokaLhXJU1VKTA55g47rwEi2Sd9v630t8NtfkQxKlaMvP+Ai04W1aeqkZM9k4TJTtA5b2wibFxAH1k8fcY671N3cEjYCe0zx25fpxJjAgMBAAE="));
}
[Test]
public void CheckDkimNonExistance()
{
Assert.IsFalse(DnsChecker.IsTxtRecordCorrect("dkim._domainkey." + DOMAIN, "test"));
}
}
}
| agpl-3.0 |
ecino/compassion-modules | crm_compassion/wizards/portal_wizard.py | 1558 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class PortalWizard(models.TransientModel):
""" This class creates analytic accounts for new portal users."""
_inherit = 'portal.wizard'
@api.multi
def action_apply(self):
self.ensure_one()
res = super(PortalWizard, self).action_apply()
for user in self.user_ids:
users = self.env['res.users'].with_context(
lang='en_US').search([('name', '=', user.partner_id.name)])
partner_name = user.partner_id.name
if user.partner_id and user.partner_id.parent_id:
partner_name = user.partner_id.parent_id.name + \
", " + partner_name
analytics_obj = self.env['account.analytic.account'].with_context(
lang='en_US')
acc_ids = analytics_obj.search([('name', '=', partner_name)])
if not acc_ids and users:
partner_tag = self.env.ref('crm_compassion.tag_partners')
analytics_obj.create({
'name': partner_name,
'tag_ids': [(4, partner_tag.id)],
})
return res
| agpl-3.0 |
Chilastra-Reborn/Chilastra-source-code | bin/scripts/mobile/lair/creature_dynamic/lok_spined_snake_pack2_neutral_none.lua | 340 | lok_spined_snake_pack2_neutral_none = Lair:new {
mobiles = {{"giant_spined_snake",1}},
spawnLimit = 15,
buildingsVeryEasy = {},
buildingsEasy = {},
buildingsMedium = {},
buildingsHard = {},
buildingsVeryHard = {},
buildingType = "none"
}
addLairTemplate("lok_spined_snake_pack2_neutral_none", lok_spined_snake_pack2_neutral_none)
| agpl-3.0 |
fritzSF/superdesk-client-core | spec/ingest_settings_spec.js | 5017 |
var openUrl = require('./helpers/utils').open,
ingestSettings = require('./helpers/pages').ingestSettings,
utils = require('./helpers/utils');
describe('ingest_settings', () => {
beforeEach((done) => {
openUrl('/#/settings/ingest').then(done);
});
xit('unselecting options in dropdown lists on the Actions pane', () => {
var deskList, // dropdown list for choosing a desk
macroList, // dropdown list for choosing a macro
stageList, // dropdown list for choosing a desk stage
ruleSettings;
// open the routing scheme edit modal under the Routing tab, add a new
// routing rule and open its Action settings pane
ingestSettings.tabs.routingTab.click();
ingestSettings.newSchemeBtn.click();
ingestSettings.writeTextToSchemeName('Test Scheme');
ingestSettings.newRoutingRuleBtn.click();
ruleSettings = ingestSettings.routingRuleSettings;
ruleSettings.tabAction.click();
// Select values in the three dropdown lists under the FETCH section,
// then try to deselect them, i.e. select an empty option. If the
// latter exists, the value of the selected options in all lists should
// be empty.
ruleSettings.showFetchBtn.click();
deskList = ruleSettings.fetchDeskList;
utils.getListOption(deskList, 2).click();
stageList = ruleSettings.fetchStageList;
utils.getListOption(stageList, 2).click();
macroList = ruleSettings.fetchMacroList;
utils.getListOption(macroList, 2).click();
// now select first options and then check that they are all blank
utils.getListOption(deskList, 1).click();
utils.getListOption(stageList, 1).click();
utils.getListOption(macroList, 1).click();
expect(deskList.$('option:checked').getAttribute('value')).toEqual('');
expect(stageList.$('option:checked').getAttribute('value')).toEqual('');
expect(macroList.$('option:checked').getAttribute('value')).toEqual('');
// We now perform the same check for the dropdown menus under the
// PUBLISH section
ruleSettings.showPublishBtn.click();
deskList = ruleSettings.publishDeskList;
utils.getListOption(deskList, 2).click();
stageList = ruleSettings.publishStageList;
utils.getListOption(stageList, 2).click();
macroList = ruleSettings.publishMacroList;
utils.getListOption(macroList, 2).click();
utils.getListOption(deskList, 1).click();
utils.getListOption(stageList, 1).click();
utils.getListOption(macroList, 1).click();
expect(deskList.$('option:checked').getAttribute('value')).toEqual('');
expect(stageList.$('option:checked').getAttribute('value')).toEqual('');
expect(macroList.$('option:checked').getAttribute('value')).toEqual('');
});
it('contains the Schedule tab for editing routing schedules', () => {
var ruleSettings,
tzOption;
// Open the routing scheme edit modal under the Routing tab and set
// routing scheme name.
// Then add a new routing rule and set its name, and open the Schedule
// settings pane
ingestSettings.tabs.routingTab.click();
ingestSettings.newSchemeBtn.click();
ingestSettings.schemeNameInput.sendKeys('My Routing Scheme');
ruleSettings = ingestSettings.routingRuleSettings;
ingestSettings.newRoutingRuleBtn.click();
ruleSettings.ruleNameInput.sendKeys('Routing Rule 1');
ruleSettings.tabSchedule.click();
// one the Schedule tab now, set a few scheduling options...
// de-select Saturday and Sunday
ruleSettings.daysButtons.sat.click();
ruleSettings.daysButtons.sun.click();
// pick the time zone
ruleSettings.timezoneLabel.click();
ruleSettings.timezoneDeleteBtn.click();
ruleSettings.timezoneInput.sendKeys('Asia/Singapore');
tzOption = ruleSettings.timezoneList.first();
browser.wait(() => ruleSettings.timezoneList.first().isDisplayed(), 3000);
tzOption.click();
// save the routing scheme and check that it was successfull
ingestSettings.saveBtn.click();
utils.assertToastMsg('success', 'Routing scheme saved');
});
it('cannot save a routing scheme with blank rule', () => {
ingestSettings.tabs.routingTab.click();
ingestSettings.newSchemeBtn.click();
ingestSettings.writeTextToSchemeName('Test Scheme');
ingestSettings.newRoutingRuleBtn.click();
expect(ingestSettings.getTextfromRuleName()).toBe('');
expect(ingestSettings.saveBtn.getAttribute('disabled')).toBeTruthy();
ingestSettings.writeTextToRuleName('Test Rule');
expect(ingestSettings.getTextfromRuleName()).toBe('Test Rule');
expect(ingestSettings.saveBtn.getAttribute('disabled')).toBeFalsy();
});
});
| agpl-3.0 |
winni67/AndroidAPS | app/src/main/java/info/nightscout/androidaps/plugins/general/automation/triggers/TriggerBg.java | 5778 | package info.nightscout.androidaps.plugins.general.automation.triggers;
import android.widget.LinearLayout;
import androidx.fragment.app.FragmentManager;
import com.google.common.base.Optional;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import info.nightscout.androidaps.Constants;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.Profile;
import info.nightscout.androidaps.logging.L;
import info.nightscout.androidaps.plugins.general.automation.elements.Comparator;
import info.nightscout.androidaps.plugins.general.automation.elements.InputBg;
import info.nightscout.androidaps.plugins.general.automation.elements.LabelWithElement;
import info.nightscout.androidaps.plugins.general.automation.elements.LayoutBuilder;
import info.nightscout.androidaps.plugins.general.automation.elements.StaticLabel;
import info.nightscout.androidaps.plugins.iob.iobCobCalculator.GlucoseStatus;
import info.nightscout.androidaps.utils.DateUtil;
import info.nightscout.androidaps.utils.JsonHelper;
import info.nightscout.androidaps.utils.T;
public class TriggerBg extends Trigger {
private static Logger log = LoggerFactory.getLogger(L.AUTOMATION);
private InputBg bg = new InputBg();
private Comparator comparator = new Comparator();
public TriggerBg() {
super();
}
private TriggerBg(TriggerBg triggerBg) {
super();
bg = new InputBg(triggerBg.bg);
comparator = new Comparator(triggerBg.comparator);
lastRun = triggerBg.lastRun;
}
public double getValue() {
return bg.getValue();
}
public Comparator getComparator() {
return comparator;
}
public String getUnits() {
return bg.getUnits();
}
public long getLastRun() {
return lastRun;
}
@Override
public synchronized boolean shouldRun() {
GlucoseStatus glucoseStatus = GlucoseStatus.getGlucoseStatusData();
if (lastRun > DateUtil.now() - T.mins(5).msecs()) {
if (L.isEnabled(L.AUTOMATION))
log.debug("NOT ready for execution: " + friendlyDescription());
return false;
}
if (glucoseStatus == null && comparator.getValue().equals(Comparator.Compare.IS_NOT_AVAILABLE)) {
if (L.isEnabled(L.AUTOMATION))
log.debug("Ready for execution: " + friendlyDescription());
return true;
}
if (glucoseStatus == null) {
if (L.isEnabled(L.AUTOMATION))
log.debug("NOT ready for execution: " + friendlyDescription());
return false;
}
boolean doRun = comparator.getValue().check(glucoseStatus.glucose, Profile.toMgdl(bg.getValue(), bg.getUnits()));
if (doRun) {
if (L.isEnabled(L.AUTOMATION))
log.debug("Ready for execution: " + friendlyDescription());
return true;
}
if (L.isEnabled(L.AUTOMATION))
log.debug("NOT ready for execution: " + friendlyDescription());
return false;
}
@Override
public synchronized String toJSON() {
JSONObject o = new JSONObject();
try {
o.put("type", TriggerBg.class.getName());
JSONObject data = new JSONObject();
data.put("bg", bg.getValue());
data.put("lastRun", lastRun);
data.put("comparator", comparator.getValue().toString());
data.put("units", bg.getUnits());
o.put("data", data);
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
return o.toString();
}
@Override
Trigger fromJSON(String data) {
try {
JSONObject d = new JSONObject(data);
bg.setUnits(JsonHelper.safeGetString(d, "units"));
bg.setValue(JsonHelper.safeGetDouble(d, "bg"));
lastRun = JsonHelper.safeGetLong(d, "lastRun");
comparator.setValue(Comparator.Compare.valueOf(JsonHelper.safeGetString(d, "comparator")));
} catch (Exception e) {
log.error("Unhandled exception", e);
}
return this;
}
@Override
public int friendlyName() {
return R.string.glucose;
}
@Override
public String friendlyDescription() {
if (comparator.getValue().equals(Comparator.Compare.IS_NOT_AVAILABLE))
return MainApp.gs(R.string.glucoseisnotavailable);
else {
return MainApp.gs(bg.getUnits().equals(Constants.MGDL) ? R.string.glucosecomparedmgdl : R.string.glucosecomparedmmol, MainApp.gs(comparator.getValue().getStringRes()), bg.getValue(), bg.getUnits());
}
}
@Override
public Optional<Integer> icon() {
return Optional.of(R.drawable.icon_cp_bgcheck);
}
@Override
public Trigger duplicate() {
return new TriggerBg(this);
}
TriggerBg setValue(double value) {
bg.setValue(value);
return this;
}
TriggerBg lastRun(long lastRun) {
this.lastRun = lastRun;
return this;
}
TriggerBg comparator(Comparator.Compare compare) {
this.comparator = new Comparator().setValue(compare);
return this;
}
TriggerBg setUnits(String units) {
bg.setUnits(units);
return this;
}
@Override
public void generateDialog(LinearLayout root, FragmentManager fragmentManager) {
new LayoutBuilder()
.add(new StaticLabel(R.string.glucose))
.add(comparator)
.add(new LabelWithElement(MainApp.gs(R.string.glucose_u, bg.getUnits()), "", bg))
.build(root);
}
}
| agpl-3.0 |
YannickB/vertical-community | __unreviewed__/account_wallet/__openerp__.py | 2328 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron. Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Wallet and Transaction',
'version': '1.0',
'category': 'Accounting',
'author': 'Yannick Buron',
'license': 'AGPL-3',
'description': """
Account Wallet.
===============
Allow each partners to have a wallet and make transactions between them.
------------------------------------------------------------------------
* Transactions between partners
* Based on accounting entries
* Multi-currency and configurable account chart
* Limits management
* Display balances on the partner record, with possible
override of limit and accounts used
""",
'website': 'https://github.com/YannickB/community-management',
'depends': [
'account_accountant',
'account_reversal',
'base_community',
],
'data': [
'security/account_wallet_security.xml',
'security/ir.model.access.csv',
'account_wallet_view.xml',
'res_config_view.xml',
'account_wallet_workflow.xml',
'data/account_wallet_data.xml',
],
'demo': ['data/account_wallet_demo.xml'],
'test': [
'tests/account_wallet_users.yml',
'tests/account_wallet_rights.yml',
'tests/account_wallet_moderator.yml',
'tests/account_wallet_external.yml',
'tests/account_wallet_limits.yml',
'tests/account_wallet_balances.yml',
],
'installable': True,
}
| agpl-3.0 |
wiiam/taylorswift.old | bot/info/User.java | 1454 | package bot.info;
import java.util.ArrayList;
public class User {
protected String host;
protected String user;
protected String modes;
protected String realname;
protected String nickname;
private ArrayList<String> channels;
public User(String nick, String host, String user, String modes, String realname) {
this.host = host;
this.user = user;
this.modes = modes;
this.realname = realname;
this.nickname = nick;
this.channels = new ArrayList<String>();
}
public String getHost(){
return host;
}
public String getModes(){
return modes;
}
public ArrayList<String> getChannels(){
return channels;
}
public String getUser(){
return user;
}
public String getRealname(){
return realname;
}
public String getNick(){
return nickname;
}
public void update(String nick, String host, String user, String modes, String realname){
this.host = host;
this.user = user;
this.modes = modes;
this.realname = realname;
this.nickname = nick;
}
public void removeChan(String chan){
for(int i = 0; i < channels.size(); i++){
if(channels.get(i).equals(chan)) channels.remove(i);
}
}
public void addChan(String chan){
channels.add(chan);
}
public boolean isIn(String chan){
for(int i = 0; i < channels.size(); i++){
if(channels.get(i).equals(chan)) return true;
}
return false;
}
public void changeNick(String to){
nickname = to;
modes.replace("r", "");
}
}
| agpl-3.0 |
feemjmeem/-tg-station | tools/WebhookProcessor/github_webhook_processor.php | 28594 | <?php
/*
* Github webhook In-game PR Announcer and Changelog Generator for /tg/Station13
* Author: MrStonedOne
* For documentation on the changelog generator see https://tgstation13.org/phpBB/viewtopic.php?f=5&t=5157
* To hide prs from being announced in game, place a [s] in front of the title
* All runtime errors are echo'ed to the webhook's logs in github
*/
/**CREDITS:
* GitHub webhook handler template.
*
* @see https://developer.github.com/webhooks/
* @author Miloslav Hula (https://github.com/milo)
*/
//CONFIGS ARE IN SECRET.PHP, THESE ARE JUST DEFAULTS!
$hookSecret = '08ajh0qj93209qj90jfq932j32r';
$apiKey = '209ab8d879c0f987d06a09b9d879c0f987d06a09b9d8787d0a089c';
$repoOwnerAndName = "tgstation/tgstation";
$servers = array();
$enable_live_tracking = true;
$path_to_script = 'tools/WebhookProcessor/github_webhook_processor.php';
$tracked_branch = "master";
$trackPRBalance = true;
$prBalanceJson = '';
$startingPRBalance = 5;
$maintainer_team_id = 133041;
$validation = "org";
$validation_count = 1;
$tracked_branch = 'master';
$require_changelogs = false;
require_once 'secret.php';
//CONFIG END
set_error_handler(function($severity, $message, $file, $line) {
throw new \ErrorException($message, 0, $severity, $file, $line);
});
set_exception_handler(function($e) {
header('HTTP/1.1 500 Internal Server Error');
echo "Error on line {$e->getLine()}: " . htmlSpecialChars($e->getMessage());
file_put_contents('htwebhookerror.log', '['.date(DATE_ATOM).'] '."Error on line {$e->getLine()}: " . $e->getMessage().PHP_EOL, FILE_APPEND);
die();
});
$rawPost = NULL;
if (!$hookSecret || $hookSecret == '08ajh0qj93209qj90jfq932j32r')
throw new \Exception("Hook secret is required and can not be default");
if (!isset($_SERVER['HTTP_X_HUB_SIGNATURE'])) {
throw new \Exception("HTTP header 'X-Hub-Signature' is missing.");
} elseif (!extension_loaded('hash')) {
throw new \Exception("Missing 'hash' extension to check the secret code validity.");
}
list($algo, $hash) = explode('=', $_SERVER['HTTP_X_HUB_SIGNATURE'], 2) + array('', '');
if (!in_array($algo, hash_algos(), TRUE)) {
throw new \Exception("Hash algorithm '$algo' is not supported.");
}
$rawPost = file_get_contents('php://input');
if ($hash !== hash_hmac($algo, $rawPost, $hookSecret)) {
throw new \Exception('Hook secret does not match.');
}
$contenttype = null;
//apache and nginx/fastcgi/phpfpm call this two different things.
if (!isset($_SERVER['HTTP_CONTENT_TYPE'])) {
if (!isset($_SERVER['CONTENT_TYPE']))
throw new \Exception("Missing HTTP 'Content-Type' header.");
else
$contenttype = $_SERVER['CONTENT_TYPE'];
} else {
$contenttype = $_SERVER['HTTP_CONTENT_TYPE'];
}
if (!isset($_SERVER['HTTP_X_GITHUB_EVENT'])) {
throw new \Exception("Missing HTTP 'X-Github-Event' header.");
}
switch ($contenttype) {
case 'application/json':
$json = $rawPost ?: file_get_contents('php://input');
break;
case 'application/x-www-form-urlencoded':
$json = $_POST['payload'];
break;
default:
throw new \Exception("Unsupported content type: $contenttype");
}
# Payload structure depends on triggered event
# https://developer.github.com/v3/activity/events/types/
$payload = json_decode($json, true);
switch (strtolower($_SERVER['HTTP_X_GITHUB_EVENT'])) {
case 'ping':
echo 'pong';
break;
case 'pull_request':
handle_pr($payload);
break;
case 'pull_request_review':
if($payload['action'] == 'submitted'){
$lower_state = strtolower($payload['review']['state']);
if(($lower_state == 'approved' || $lower_state == 'changes_requested') && is_maintainer($payload, $payload['review']['user']['login']))
remove_ready_for_review($payload);
}
break;
default:
header('HTTP/1.0 404 Not Found');
echo "Event:$_SERVER[HTTP_X_GITHUB_EVENT] Payload:\n";
print_r($payload); # For debug only. Can be found in GitHub hook log.
die();
}
function apisend($url, $method = 'GET', $content = NULL) {
global $apiKey;
if (is_array($content))
$content = json_encode($content);
$scontext = array('http' => array(
'method' => $method,
'header' =>
"Content-type: application/json\r\n".
'Authorization: token ' . $apiKey,
'ignore_errors' => true,
'user_agent' => 'tgstation13.org-Github-Automation-Tools'
));
if ($content)
$scontext['http']['content'] = $content;
return file_get_contents($url, false, stream_context_create($scontext));
}
function validate_user($payload) {
global $validation, $validation_count;
$query = array();
if (empty($validation))
$validation = 'org';
switch (strtolower($validation)) {
case 'disable':
return TRUE;
case 'repo':
$query['repo'] = $payload['pull_request']['base']['repo']['full_name'];
break;
default:
$query['user'] = $payload['pull_request']['base']['repo']['owner']['login'];
break;
}
$query['author'] = $payload['pull_request']['user']['login'];
$query['is'] = 'merged';
$querystring = '';
foreach($query as $key => $value)
$querystring .= ($querystring == '' ? '' : '+') . urlencode($key) . ':' . urlencode($value);
$res = apisend('https://api.github.com/search/issues?q='.$querystring);
$res = json_decode($res, TRUE);
return $res['total_count'] >= (int)$validation_count;
}
function get_labels($payload){
$url = $payload['pull_request']['issue_url'] . '/labels';
$existing_labels = json_decode(apisend($url), true);
$existing = array();
foreach($existing_labels as $label)
$existing[] = $label['name'];
return $existing;
}
function check_tag_and_replace($payload, $title_tag, $label, &$array_to_add_label_to){
$title = $payload['pull_request']['title'];
if(stripos($title, $title_tag) !== FALSE){
$array_to_add_label_to[] = $label;
return true;
}
return false;
}
function set_labels($payload, $labels, $remove) {
$existing = get_labels($payload);
$tags = array();
$tags = array_merge($labels, $existing);
$tags = array_unique($tags);
if($remove) {
$tags = array_diff($tags, $remove);
}
$final = array();
foreach($tags as $t)
$final[] = $t;
$url = $payload['pull_request']['issue_url'] . '/labels';
echo apisend($url, 'PUT', $final);
}
//rip bs-12
function tag_pr($payload, $opened) {
//get the mergeable state
$url = $payload['pull_request']['url'];
$payload['pull_request'] = json_decode(apisend($url), TRUE);
if($payload['pull_request']['mergeable'] == null) {
//STILL not ready. Give it a bit, then try one more time
sleep(10);
$payload['pull_request'] = json_decode(apisend($url), TRUE);
}
$tags = array();
$title = $payload['pull_request']['title'];
if($opened) { //you only have one shot on these ones so as to not annoy maintainers
$tags = checkchangelog($payload, false);
if(strpos(strtolower($title), 'refactor') !== FALSE)
$tags[] = 'Refactor';
if(strpos(strtolower($title), 'revert') !== FALSE)
$tags[] = 'Revert';
if(strpos(strtolower($title), 'removes') !== FALSE)
$tags[] = 'Removal';
}
$remove = array();
$mergeable = $payload['pull_request']['mergeable'];
if($mergeable === TRUE) //only look for the false value
$remove[] = 'Merge Conflict';
else if ($mergeable === FALSE)
$tags[] = 'Merge Conflict';
$treetags = array('_maps' => 'Map Edit', 'tools' => 'Tools', 'SQL' => 'SQL', '.github' => 'GitHub');
$addonlytags = array('icons' => 'Sprites', 'sound' => 'Sound', 'config' => 'Config Update', 'code/controllers/configuration/entries' => 'Config Update', 'tgui' => 'UI');
foreach($treetags as $tree => $tag)
if(has_tree_been_edited($payload, $tree))
$tags[] = $tag;
else
$remove[] = $tag;
foreach($addonlytags as $tree => $tag)
if(has_tree_been_edited($payload, $tree))
$tags[] = $tag;
check_tag_and_replace($payload, '[dnm]', 'Do Not Merge', $tags);
if(!check_tag_and_replace($payload, '[wip]', 'Work In Progress', $tags) && check_tag_and_replace($payload, '[ready]', 'Work In Progress', $remove))
$tags[] = 'Needs Review';
return array($tags, $remove);
}
function remove_ready_for_review($payload, $labels = null){
if($labels == null)
$labels = get_labels($payload);
$index = array_search('Needs Review', $labels);
if($index !== FALSE)
unset($labels[$index]);
$url = $payload['pull_request']['issue_url'] . '/labels';
apisend($url, 'PUT', $labels);
}
function dismiss_review($payload, $id, $reason){
$content = array('message' => $reason);
apisend($payload['pull_request']['url'] . '/reviews/' . $id . '/dismissals', 'PUT', $content);
}
function get_reviews($payload){
return json_decode(apisend($payload['pull_request']['url'] . '/reviews'), true);
}
function check_ready_for_review($payload, $labels = null, $remove = array()){
$r4rlabel = 'Needs Review';
$labels_which_should_not_be_ready = array('Do Not Merge', 'Work In Progress', 'Merge Conflict');
$has_label_already = false;
$should_not_have_label = false;
if($labels == null)
$labels = get_labels($payload);
$returned = array($labels, $remove);
//if the label is already there we may need to remove it
foreach($labels as $L){
if(in_array($L, $labels_which_should_not_be_ready))
$should_not_have_label = true;
if($L == $r4rlabel)
$has_label_already = true;
}
if($has_label_already && $should_not_have_label){
$remove[] = $r4rlabel;
return $returned;
}
//find all reviews to see if changes were requested at some point
$reviews = get_reviews($payload);
$reviews_ids_with_changes_requested = array();
$dismissed_an_approved_review = false;
foreach($reviews as $R)
if(is_maintainer($payload, $R['user']['login'])){
$lower_state = strtolower($R['state']);
if($lower_state == 'changes_requested')
$reviews_ids_with_changes_requested[] = $R['id'];
else if ($lower_state == 'approved'){
dismiss_review($payload, $R['id'], 'Out of date review');
$dismissed_an_approved_review = true;
}
}
if(!$dismissed_an_approved_review && count($reviews_ids_with_changes_requested) == 0){
if($has_label_already)
$remove[] = $r4rlabel;
return $returned; //no need to be here
}
if(count($reviews_ids_with_changes_requested) > 0){
//now get the review comments for the offending reviews
$review_comments = json_decode(apisend($payload['pull_request']['review_comments_url']), true);
foreach($review_comments as $C){
//make sure they are part of an offending review
if(!in_array($C['pull_request_review_id'], $reviews_ids_with_changes_requested))
continue;
//review comments which are outdated have a null position
if($C['position'] !== null){
if($has_label_already)
$remove[] = $r4rlabel;
return $returned; //no need to tag
}
}
}
//finally, add it if necessary
if(!$has_label_already){
$labels[] = $r4rlabel;
}
return $returned;
}
function check_dismiss_changelog_review($payload){
global $require_changelog;
global $no_changelog;
if(!$require_changelog)
return;
if(!$no_changelog)
checkchangelog($payload, false);
$review_message = 'Your changelog for this PR is either malformed or non-existent. Please create one to document your changes.';
$reviews = get_reviews($payload);
if($no_changelog){
//check and see if we've already have this review
foreach($reviews as $R)
if($R['body'] == $review_message && strtolower($R['state']) == 'changes_requested')
return;
//otherwise make it ourself
apisend($payload['pull_request']['url'] . '/reviews', 'POST', array('body' => $review_message, 'event' => 'REQUEST_CHANGES'));
}
else
//kill previous reviews
foreach($reviews as $R)
if($R['body'] == $review_message && strtolower($R['state']) == 'changes_requested')
dismiss_review($payload, $R['id'], 'Changelog added/fixed.');
}
function handle_pr($payload) {
global $no_changelog;
$action = 'opened';
$validated = validate_user($payload);
switch ($payload["action"]) {
case 'opened':
list($labels, $remove) = tag_pr($payload, true);
set_labels($payload, $labels, $remove);
if($no_changelog)
check_dismiss_changelog_review($payload);
if(get_pr_code_friendliness($payload) <= 0){
$balances = pr_balances();
$author = $payload['pull_request']['user']['login'];
if(isset($balances[$author]) && $balances[$author] < 0 && !is_maintainer($payload, $author))
create_comment($payload, 'You currently have a negative Fix/Feature pull request delta of ' . $balances[$author] . '. Maintainers may close this PR at will. Fixing issues or improving the codebase will improve this score.');
}
break;
case 'edited':
check_dismiss_changelog_review($payload);
case 'synchronize':
list($labels, $remove) = tag_pr($payload, false);
if($payload['action'] == 'synchronize')
list($labels, $remove) = check_ready_for_review($payload, $labels, $remove);
set_labels($payload, $labels, $remove);
return;
case 'reopened':
$action = $payload['action'];
break;
case 'closed':
if (!$payload['pull_request']['merged']) {
$action = 'closed';
}
else {
$action = 'merged';
auto_update($payload);
checkchangelog($payload, true);
update_pr_balance($payload);
$validated = TRUE; //pr merged events always get announced.
}
break;
default:
return;
}
if (strpos(strtolower($payload['pull_request']['title']), '[s]') !== false) {
echo "PR Announcement Halted; Secret tag detected.\n";
return;
}
if (!$validated) {
echo "PR Announcement Halted; User not validated.\n";
return;
}
$msg = '['.$payload['pull_request']['base']['repo']['full_name'].'] Pull Request '.$action.' by '.htmlSpecialChars($payload['sender']['login']).': <a href="'.$payload['pull_request']['html_url'].'">'.htmlSpecialChars('#'.$payload['pull_request']['number'].' '.$payload['pull_request']['user']['login'].' - '.$payload['pull_request']['title']).'</a>';
sendtoallservers('?announce='.urlencode($msg), $payload);
}
//creates a comment on the payload issue
function create_comment($payload, $comment){
apisend($payload['pull_request']['comments_url'], 'POST', json_encode(array('body' => $comment)));
}
//returns the payload issue's labels as a flat array
function get_pr_labels_array($payload){
$url = $payload['pull_request']['issue_url'] . '/labels';
$issue = json_decode(apisend($url), true);
$result = array();
foreach($issue as $l)
$result[] = $l['name'];
return $result;
}
//helper for getting the path the the balance json file
function pr_balance_json_path(){
global $prBalanceJson;
return $prBalanceJson != '' ? $prBalanceJson : 'pr_balances.json';
}
//return the assoc array of login -> balance for prs
function pr_balances(){
$path = pr_balance_json_path();
if(file_exists($path))
return json_decode(file_get_contents($path), true);
else
return array();
}
//returns the difference in PR balance a pull request would cause
function get_pr_code_friendliness($payload, $oldbalance = null){
global $startingPRBalance;
if($oldbalance == null)
$oldbalance = $startingPRBalance;
$labels = get_pr_labels_array($payload);
//anything not in this list defaults to 0
$label_values = array(
'Fix' => 2,
'Refactor' => 2,
'CI/Tests' => 3,
'Code Improvement' => 1,
'Grammar and Formatting' => 1,
'Priority: High' => 4,
'Priority: CRITICAL' => 5,
'Logging' => 1,
'Feedback' => 1,
'Performance' => 3,
'Feature' => -1,
'Balance/Rebalance' => -1,
'PRB: Reset' => $startingPRBalance - $oldbalance,
);
$affecting = 0;
$is_neutral = FALSE;
$found_something_positive = false;
foreach($labels as $l){
if($l == 'PRB: No Update') { //no effect on balance
$affecting = 0;
break;
}
else if(isset($label_values[$l])) {
$friendliness = $label_values[$l];
if($friendliness > 0)
$found_something_positive = true;
$affecting = $found_something_positive ? max($affecting, $friendliness) : $friendliness;
}
}
return $affecting;
}
function is_maintainer($payload, $author){
global $maintainer_team_id;
$repo_is_org = $payload['pull_request']['base']['repo']['owner']['type'] == 'Organization';
if($maintainer_team_id == null || !$repo_is_org) {
$collaburl = str_replace('{/collaborator}', '/' . $author, $payload['pull_request']['base']['repo']['collaborators_url']) . '/permission';
$perms = json_decode(apisend($collaburl), true);
$permlevel = $perms['permission'];
return $permlevel == 'admin' || $permlevel == 'write';
}
else {
$check_url = 'https://api.github.com/teams/' . $maintainer_team_id . '/memberships/' . $author;
$result = json_decode(apisend($check_url), true);
return isset($result['state']) && $result['state'] == 'active';
}
}
//payload is a merged pull request, updates the pr balances file with the correct positive or negative balance based on comments
function update_pr_balance($payload) {
global $startingPRBalance;
global $trackPRBalance;
if(!$trackPRBalance)
return;
$author = $payload['pull_request']['user']['login'];
$balances = pr_balances();
if(!isset($balances[$author]))
$balances[$author] = $startingPRBalance;
$friendliness = get_pr_code_friendliness($payload, $balances[$author]);
$balances[$author] += $friendliness;
if(!is_maintainer($payload, $author)){ //immune
if($balances[$author] < 0 && $friendliness < 0)
create_comment($payload, 'Your Fix/Feature pull request delta is currently below zero (' . $balances[$author] . '). Maintainers may close future Feature/Tweak/Balance PRs. Fixing issues or helping to improve the codebase will raise this score.');
else if($balances[$author] >= 0 && ($balances[$author] - $friendliness) < 0)
create_comment($payload, 'Your Fix/Feature pull request delta is now above zero (' . $balances[$author] . '). Feel free to make Feature/Tweak/Balance PRs.');
}
$balances_file = fopen(pr_balance_json_path(), 'w');
fwrite($balances_file, json_encode($balances));
fclose($balances_file);
}
$github_diff = null;
function get_diff($payload) {
global $github_diff;
if ($github_diff === null) {
//go to the diff url
$url = $payload['pull_request']['diff_url'];
$github_diff = file_get_contents($url);
}
return $github_diff;
}
function auto_update($payload){
global $enable_live_tracking;
global $path_to_script;
global $repoOwnerAndName;
global $tracked_branch;
global $github_diff;
if(!$enable_live_tracking || !has_tree_been_edited($payload, $path_to_script) || $payload['pull_request']['base']['ref'] != $tracked_branch)
return;
get_diff($payload);
$content = file_get_contents('https://raw.githubusercontent.com/' . $repoOwnerAndName . '/' . $tracked_branch . '/'. $path_to_script);
$content_diff = "### Diff not available. :slightly_frowning_face:";
if($github_diff && preg_match('/(diff --git a\/' . preg_quote($path_to_script, '/') . '.+?)(?:\Rdiff|$)/s', $github_diff, $matches)) {
$script_diff = $matches[1];
if($script_diff) {
$content_diff = "``" . "`DIFF\n" . $script_diff ."\n``" . "`";
}
}
create_comment($payload, "Edit detected. Self updating... \n<details><summary>Here are my changes:</summary>\n\n" . $content_diff . "\n</details>\n<details><summary>Here is my new code:</summary>\n\n``" . "`HTML+PHP\n" . $content . "\n``" . '`\n</details>');
$code_file = fopen(basename($path_to_script), 'w');
fwrite($code_file, $content);
fclose($code_file);
}
function has_tree_been_edited($payload, $tree){
global $github_diff;
get_diff($payload);
//find things in the _maps/map_files tree
//e.g. diff --git a/_maps/map_files/Cerestation/cerestation.dmm b/_maps/map_files/Cerestation/cerestation.dmm
return ($github_diff !== FALSE) && (preg_match('/^diff --git a\/' . preg_quote($tree, '/') . '/m', $github_diff) !== 0);
}
$no_changelog = false;
function checkchangelog($payload, $compile = true) {
global $no_changelog;
if (!isset($payload['pull_request']) || !isset($payload['pull_request']['body'])) {
return;
}
if (!isset($payload['pull_request']['user']) || !isset($payload['pull_request']['user']['login'])) {
return;
}
$body = $payload['pull_request']['body'];
$tags = array();
if(preg_match('/(?i)(fix|fixes|fixed|resolve|resolves|resolved)\s*#[0-9]+/',$body)) //github autoclose syntax
$tags[] = 'Fix';
$body = str_replace("\r\n", "\n", $body);
$body = explode("\n", $body);
$username = $payload['pull_request']['user']['login'];
$incltag = false;
$changelogbody = array();
$currentchangelogblock = array();
$foundcltag = false;
foreach ($body as $line) {
$line = trim($line);
if (substr($line,0,4) == ':cl:' || substr($line,0,1) == '🆑') {
$incltag = true;
$foundcltag = true;
$pos = strpos($line, " ");
if ($pos) {
$tmp = substr($line, $pos+1);
if (trim($tmp) != 'optional name here')
$username = $tmp;
}
continue;
} else if (substr($line,0,5) == '/:cl:' || substr($line,0,6) == '/ :cl:' || substr($line,0,5) == ':/cl:' || substr($line,0,5) == '/🆑' || substr($line,0,6) == '/ 🆑' ) {
$incltag = false;
$changelogbody = array_merge($changelogbody, $currentchangelogblock);
continue;
}
if (!$incltag)
continue;
$firstword = explode(' ', $line)[0];
$pos = strpos($line, " ");
$item = '';
if ($pos) {
$firstword = trim(substr($line, 0, $pos));
$item = trim(substr($line, $pos+1));
} else {
$firstword = $line;
}
if (!strlen($firstword)) {
$currentchangelogblock[count($currentchangelogblock)-1]['body'] .= "\n";
continue;
}
//not a prefix line.
//so we add it to the last changelog entry as a separate line
if (!strlen($firstword) || $firstword[strlen($firstword)-1] != ':') {
if (count($currentchangelogblock) <= 0)
continue;
$currentchangelogblock[count($currentchangelogblock)-1]['body'] .= "\n".$line;
continue;
}
$cltype = strtolower(substr($firstword, 0, -1));
switch ($cltype) {
case 'fix':
case 'fixes':
case 'bugfix':
if($item != 'fixed a few things') {
$tags[] = 'Fix';
$currentchangelogblock[] = array('type' => 'bugfix', 'body' => $item);
}
break;
case 'rsctweak':
case 'tweaks':
case 'tweak':
if($item != 'tweaked a few things') {
$tags[] = 'Tweak';
$currentchangelogblock[] = array('type' => 'tweak', 'body' => $item);
}
break;
case 'soundadd':
if($item != 'added a new sound thingy') {
$tags[] = 'Sound';
$currentchangelogblock[] = array('type' => 'soundadd', 'body' => $item);
}
break;
case 'sounddel':
if($item != 'removed an old sound thingy') {
$tags[] = 'Sound';
$tags[] = 'Removal';
$currentchangelogblock[] = array('type' => 'sounddel', 'body' => $item);
}
break;
case 'add':
case 'adds':
case 'rscadd':
if($item != 'Added new things' && $item != 'Added more things') {
$tags[] = 'Feature';
$currentchangelogblock[] = array('type' => 'rscadd', 'body' => $item);
}
break;
case 'del':
case 'dels':
case 'rscdel':
if($item != 'Removed old things') {
$tags[] = 'Removal';
$currentchangelogblock[] = array('type' => 'rscdel', 'body' => $item);
}
break;
case 'imageadd':
if($item != 'added some icons and images') {
$tags[] = 'Sprites';
$currentchangelogblock[] = array('type' => 'imageadd', 'body' => $item);
}
break;
case 'imagedel':
if($item != 'deleted some icons and images') {
$tags[] = 'Sprites';
$tags[] = 'Removal';
$currentchangelogblock[] = array('type' => 'imagedel', 'body' => $item);
}
break;
case 'typo':
case 'spellcheck':
if($item != 'fixed a few typos') {
$tags[] = 'Grammar and Formatting';
$currentchangelogblock[] = array('type' => 'spellcheck', 'body' => $item);
}
break;
case 'balance':
case 'rebalance':
if($item != 'rebalanced something'){
$tags[] = 'Balance/Rebalance';
$currentchangelogblock[] = array('type' => 'balance', 'body' => $item);
}
break;
case 'tgs':
$currentchangelogblock[] = array('type' => 'tgs', 'body' => $item);
break;
case 'code_imp':
case 'code':
if($item != 'changed some code'){
$tags[] = 'Code Improvement';
$currentchangelogblock[] = array('type' => 'code_imp', 'body' => $item);
}
break;
case 'refactor':
if($item != 'refactored some code'){
$tags[] = 'Refactor';
$currentchangelogblock[] = array('type' => 'refactor', 'body' => $item);
}
break;
case 'config':
if($item != 'changed some config setting'){
$tags[] = 'Config Update';
$currentchangelogblock[] = array('type' => 'config', 'body' => $item);
}
break;
case 'admin':
if($item != 'messed with admin stuff'){
$tags[] = 'Administration';
$currentchangelogblock[] = array('type' => 'admin', 'body' => $item);
}
break;
case 'server':
if($item != 'something server ops should know')
$currentchangelogblock[] = array('type' => 'server', 'body' => $item);
break;
default:
//we add it to the last changelog entry as a separate line
if (count($currentchangelogblock) > 0)
$currentchangelogblock[count($currentchangelogblock)-1]['body'] .= "\n".$line;
break;
}
}
if(!count($changelogbody))
$no_changelog = true;
if ($no_changelog || !$compile)
return $tags;
$file = 'author: "'.trim(str_replace(array("\\", '"'), array("\\\\", "\\\""), $username)).'"'."\n";
$file .= "delete-after: True\n";
$file .= "changes: \n";
foreach ($changelogbody as $changelogitem) {
$type = $changelogitem['type'];
$body = trim(str_replace(array("\\", '"'), array("\\\\", "\\\""), $changelogitem['body']));
$file .= ' - '.$type.': "'.$body.'"';
$file .= "\n";
}
$content = array (
'branch' => $payload['pull_request']['base']['ref'],
'message' => 'Automatic changelog generation for PR #'.$payload['pull_request']['number'].' [ci skip]',
'content' => base64_encode($file)
);
$filename = '/html/changelogs/AutoChangeLog-pr-'.$payload['pull_request']['number'].'.yml';
echo apisend($payload['pull_request']['base']['repo']['url'].'/contents'.$filename, 'PUT', $content);
}
function sendtoallservers($str, $payload = null) {
global $servers;
if (!empty($payload))
$str .= '&payload='.urlencode(json_encode($payload));
foreach ($servers as $serverid => $server) {
$msg = $str;
if (isset($server['comskey']))
$msg .= '&key='.urlencode($server['comskey']);
$rtn = export($server['address'], $server['port'], $msg);
echo "Server Number $serverid replied: $rtn\n";
}
}
function export($addr, $port, $str) {
// All queries must begin with a question mark (ie "?players")
if($str{0} != '?') $str = ('?' . $str);
/* --- Prepare a packet to send to the server (based on a reverse-engineered packet structure) --- */
$query = "\x00\x83" . pack('n', strlen($str) + 6) . "\x00\x00\x00\x00\x00" . $str . "\x00";
/* --- Create a socket and connect it to the server --- */
$server = socket_create(AF_INET,SOCK_STREAM,SOL_TCP) or exit("ERROR");
socket_set_option($server, SOL_SOCKET, SO_SNDTIMEO, array('sec' => 2, 'usec' => 0)); //sets connect and send timeout to 2 seconds
if(!socket_connect($server,$addr,$port)) {
return "ERROR: Connection failed";
}
/* --- Send bytes to the server. Loop until all bytes have been sent --- */
$bytestosend = strlen($query);
$bytessent = 0;
while ($bytessent < $bytestosend) {
//echo $bytessent.'<br>';
$result = socket_write($server,substr($query,$bytessent),$bytestosend-$bytessent);
//echo 'Sent '.$result.' bytes<br>';
if ($result===FALSE)
return "ERROR: " . socket_strerror(socket_last_error());
$bytessent += $result;
}
/* --- Idle for a while until recieved bytes from game server --- */
$result = socket_read($server, 10000, PHP_BINARY_READ);
socket_close($server); // we don't need this anymore
if($result != "") {
if($result{0} == "\x00" || $result{1} == "\x83") { // make sure it's the right packet format
// Actually begin reading the output:
$sizebytes = unpack('n', $result{2} . $result{3}); // array size of the type identifier and content
$size = $sizebytes[1] - 1; // size of the string/floating-point (minus the size of the identifier byte)
if($result{4} == "\x2a") { // 4-byte big-endian floating-point
$unpackint = unpack('f', $result{5} . $result{6} . $result{7} . $result{8}); // 4 possible bytes: add them up together, unpack them as a floating-point
return $unpackint[1];
}
else if($result{4} == "\x06") { // ASCII string
$unpackstr = ""; // result string
$index = 5; // string index
while($size > 0) { // loop through the entire ASCII string
$size--;
$unpackstr .= $result{$index}; // add the string position to return string
$index++;
}
return $unpackstr;
}
}
}
return "";
}
?>
| agpl-3.0 |
flyingSkull/dl-shopware | engine/Shopware/Controllers/Backend/ModelManager.php | 10226 | <?php
/**
* Shopware 4.0
* Copyright © 2012 shopware AG
*
* According to our dual licensing model, this program can be used either
* under the terms of the GNU Affero General Public License, version 3,
* or under a proprietary license.
*
* The texts of the GNU Affero General Public License with an additional
* permission and of our proprietary license can be found at and
* in the LICENSE file you have received along with this program.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* "Shopware" is a registered trademark of shopware AG.
* The licensing of the program under the AGPLv3 does not imply a
* trademark license. Therefore any rights, title and interest in
* our trademarks remain entirely with us.
*
* @category Shopware
* @package Shopware_Controllers
* @subpackage ModelManager
* @copyright Copyright (c) 2012, shopware AG (http://www.shopware.de)
* @version $Id$
* @author Fabian Engels
* @author $Author$
*/
use DoctrineExtensions\Paginate\Paginate;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\Tools\EntityGenerator;
use Doctrine\ORM\Mapping\ClassMetadataFactory;
use Doctrine\ORM\Tools\DisconnectedClassMetadataFactory;
use Doctrine\ORM\Mapping\ClassMetadataInfo;
/**
* Shopware Controller ModelManager
*
* The model manager backend controller handles all actions concerning the model manager backend module
*/
class Shopware_Controllers_Backend_ModelManager extends Enlight_Controller_Action
{
/**
* default init method
*
* @codeCoverageIgnore
* @return void
*/
public function init()
{
$this->Front()->Plugins()->ScriptRenderer()->setRender();
$this->Front()->Plugins()->JsonRequest()
->setParseInput()
->setParseParams(array('group', 'sort'))
->setPadding($this->Request()->targetField);
}
/**
* since only json data will be renderer, set the json renderer as default
*
* @return void
*/
public function preDispatch()
{
if (!in_array($this->Request()->getActionName(), array('index', 'load'))) {
$this->Front()->Plugins()->Json()->setRenderer(true);
}
}
/**
* @codeCoverageIgnore
* @return void|string
*/
public function indexAction()
{
}
/**
* load action for the script renderer.
*/
public function loadAction()
{
}
/**
* this function will generate a query to get tables for the center grid
* per default, it will get all tables starting with "s_"
* if a search was performed, thus, the filter variable is set, it will remove any occurrences of
* "s_" in the search string itself and return all tables starting with "s_" AND also containing the searchstring afterwards
*/
public function getTablesAction()
{
$start = $this->Request()->start;
$limit = $this->Request()->limit;
$searchString = $this->Request()->filter;
$config = Shopware()->Db()->getConfig();
//filter user input starting with "s_"
if (substr($searchString,0,2) == 's_') {
$searchString = substr_replace($searchString,'',0,2);
}
//if no search was performed, display all tables starting with "s_"
//else display all tables containing the searchstring and starting with "s_"
if (isset($searchString)) {
$like = "AND TABLE_NAME LIKE 's\_%" . $searchString ."%'";
} else {
$like = "AND TABLE_NAME LIKE 's\_%' ";
}
$sql= "SELECT TABLE_NAME as name FROM `information_schema`.`TABLES` WHERE `TABLE_SCHEMA` = ? " . $like . " ORDER BY TABLE_NAME";
//add start and limit, if necessary
if (isset($start) && isset($limit)) {
$sql .= " LIMIT {$start}, {$limit}";
}
$data = Shopware()->Db()->fetchAll($sql, array($config['dbname']));
$total = Shopware()->Db()->fetchOne("SELECT COUNT(*) FROM `information_schema`.`TABLES` WHERE `TABLE_SCHEMA` = ? " . $like, array($config['dbname']));
$this->View()->assign(array('success' => true, 'data' => $data, 'total' => $total));
}
/**
* this will generate a doctrine model based on the requested tableName
* to do this, it will take a table name like "s_articles_bundles" and
* convert it into a valid string for the getEntityCode() function to accept (like "SArticlesBundles")
* afterwards, it will replace all invalid php-types with the proper ones, remove the table name prefix,
* the orm prefix and also multiple linebreaks, to get the code, which in turn will be handed to the view
*
*/
public function getDoctrineModelAction()
{
$params = $this->Request()->getParams();
$tableName = $params['tableName'];
if (!empty($tableName)) {
//format the table name, so the getEntityCode function will accept it
//converts "s_articles_bundles" to "SArticlesBundles"
$tableName = str_replace("_"," ",$tableName);
$tableName = ucwords($tableName);
$tableName = str_replace(" ","",$tableName);
//generates the basic model
$code = $this->getEntityCode($tableName);
//since datetime is not a valid php type, replace it with DateTime
$code = str_ireplace('DateTime', '\DateTime', $code);
$code = str_ireplace('@var datetime', '@var \DateTime',$code);
$code = str_ireplace('@param datetime', '@param \DateTime',$code);
$code = str_ireplace('@return datetime', '@return \DateTime',$code);
$code = str_ireplace('@var date', '@var \DateTime',$code);
$code = str_ireplace('@param date', '@param \DateTime',$code);
$code = str_ireplace('@return date', '@return \DateTime',$code);
//since text is not a valid php type, replace it with string
$code = str_ireplace('@var text', '@var string',$code);
$code = str_ireplace('@param text', '@param string',$code);
$code = str_ireplace('@return text', '@return string',$code);
//removes the table prefix, since it's already set in the doctrine connection
$code = str_replace('Table(name="s_s_', 'Table(name="', $code);
//shouldn't be used since models would be automatically updated otherwise
$code = str_replace('use Doctrine\ORM\Mapping as ORM;', '', $code);
//remove the orm prefix
$code = str_replace('ORM\\', '', $code);
//remove multiple linebreaks to get cleaner code
$code = preg_replace("/\n\n+/", "\n\n", $code);
//generate the data array, which will be handed to the view for rendering
$data = array('tableName' => $params['tableName'],'content' => $code);
$this->View()->assign(array('success' => true, 'data' => $data));
}
}
// /**
// * this will generate an ExtJS model based on the requested tableName
// */
// public function getExtJsModel() {
//
// $params = $this->Request()->getParams();
// $tableName = $params['tableName'];
//
// $config = Shopware()->Db()->getConfig();
// $sql = "SELECT COLUMN_NAME, DATA_TYPE FROM `information_schema`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME`= ? ORDER BY POSITION";
// $columns = Shopware()->Db()->fetchAll($sql, array($config['dbname'], $tableName));
//
// array (
// 'datetime' => 'date',
// 'date' => 'date',
// 'varchar' => 'string',
// 'char' => 'string',
// 'longtext' => 'string',
// 'integer' => 'int',
// 'int' => 'int',
// 'decimal' => 'int',
// 'number' => 'int',
// 'varchar' => 'float',
// 'varchar' => 'object',
//
// )
//
//
//
// }
/**
* returns the php code for the passed table name
* expects that the table name has the follow format:
* name in database : "s_articles"
* expected name : "SArticles"
*
* @param $tableName
* @return string
*/
private function getEntityCode($tableName) {
$generator = $this->getGenerator();
$factory = $this->getFactoryWithDatabaseDriver();
$metaData = $factory->getMetadataFor($tableName);
$code = $generator->generateEntityClass($metaData);
return $code;
}
/**
* Creates a disconnected meta data factory with a database mapping driver
* to get the meta data for the extension tables directly from the database.
*
* @return Doctrine\ORM\Tools\DisconnectedClassMetadataFactory
*/
private function getFactoryWithDatabaseDriver() {
$driver = $this->getDatabaseDriver();
Shopware()->Models()->getConfiguration()->setMetadataDriverImpl($driver);
$factory = new DisconnectedClassMetadataFactory();
$factory->setEntityManager(Shopware()->Models());
return $factory;
}
/**
* @return Shopware\Components\Model\DatabaseDriver
*/
private function getDatabaseDriver() {
$platform = Shopware()->Models()->getConnection()->getDatabasePlatform();
$platform->registerDoctrineTypeMapping('enum', 'string');
$driver = new \Shopware\Components\Model\DatabaseDriver(
Shopware()->Models()->getConnection()->getSchemaManager()
);
return $driver;
}
/**
* Generates the Shopware\Components\Model\Generator
* @return Shopware\Components\Model\Generator
*/
private function getGenerator() {
$generator = new \Shopware\Components\Model\Generator();
$generator->setGenerateAnnotations(true);
$generator->setGenerateStubMethods(true);
$generator->setRegenerateEntityIfExists(true);
$generator->setUpdateEntityIfExists(false);
$generator->setBackupExisting(false);
return $generator;
}
} | agpl-3.0 |
kpawelski/ITManager | resources/lang/de/general.php | 8366 | <?php
return [
'accessories' => 'Zubehör',
'activated' => 'Aktiviert',
'accessory' => 'Zubehör',
'accessory_report' => 'Zubehör Bericht',
'action' => 'Aktion',
'activity_report' => 'Aktivitätsreport',
'address' => 'Supplier Address',
'admin' => 'Administrator',
'add_seats' => 'Lizenzen hinzugefügt',
'all_assets' => 'Alle Assets',
'all' => 'Alle',
'archived' => 'Archiviert',
'asset_models' => 'Asset Modelle',
'asset' => 'Asset',
'asset_report' => 'Asset Bericht',
'asset_tag' => 'Asset Tag',
'assets_available' => 'verfügbare Assets',
'assets' => 'Assets',
'avatar_delete' => 'Avatar löschen',
'avatar_upload' => 'Avatar hochladen',
'back' => 'Zurück',
'bad_data' => 'Nichts gefunden. Vielleicht defekte Daten?',
'bulk_checkout' => 'Massen-Checkout',
'cancel' => 'Abbrechen',
'categories' => 'Kategorien',
'category' => 'Kategorie',
'changeemail' => 'E-Mail Adresse ändern',
'changepassword' => 'Passwort ändern',
'checkin' => 'Checkin Asset',
'checkin_from' => 'Einchecken von',
'checkout' => 'Checkout Asset to User',
'city' => 'Stadt',
'companies' => 'Firmen',
'company' => 'Firma',
'component' => 'Komponente',
'components' => 'Komponenten',
'consumable' => 'Verbrauchsmaterial',
'consumables' => 'Verbrauchsmaterialien',
'country' => 'Land',
'create' => 'Create Location',
'created' => 'Eintrag erstellt',
'created_asset' => 'Asset angelegt',
'created_at' => 'Erstellt am',
'currency' => '€', // this is deprecated
'current' => 'Aktuell',
'custom_report' => 'Spezieller Asset Report',
'dashboard' => 'Dashboard',
'date' => 'Purchase Date',
'debug_warning' => 'Warning!',
'debug_warning_text' => 'This application is running in production mode with debugging enabled. This can expose sensitive data if your application is accessible to the outside world. Disable debug mode by setting the <code>APP_DEBUG</code> value in your <code>.env</code> file to <code>false</code>.',
'delete' => 'Löschen',
'deleted' => 'Gelöscht',
'delete_seats' => 'Gelöschte Lizenzen',
'deployed' => 'Herausgegeben',
'depreciation_report' => 'Abschreibunsgreport',
'download' => 'Download',
'depreciation' => 'Abschreibung',
'editprofile' => 'Profil bearbeiten',
'eol' => 'EOL',
'email_domain' => 'E-Mail-Domain',
'email_format' => 'E-Mail-Format',
'email_domain_help' => 'Dieses wird verwendet, um beim importieren E-Mail-Adressen zu generieren',
'filastname_format' => 'Initial des Vornamen + Nachname (jsmith@example.com)',
'firstname_lastname_format' => 'Vorname Nachname (jane.smith@example.com)',
'first' => 'Erstes',
'first_name' => 'Vorname',
'first_name_format' => 'Vorname (jane@example.com)',
'file_name' => 'Datei',
'file_uploads' => 'Datei-Uploads',
'generate' => 'Generieren',
'groups' => 'Gruppen',
'gravatar_email' => 'Gravatar E-Mail Adresse',
'history' => 'Historie',
'history_for' => 'Verlauf für',
'id' => 'Id',
'image_delete' => 'Bild löschen',
'image_upload' => 'Bild hinzufügen',
'import' => 'Import',
'import-history' => 'Import Verlauf',
'asset_maintenance' => 'Asset Wartung',
'asset_maintenance_report' => 'Asset Wartungsbericht',
'asset_maintenances' => 'Asset Wartungen',
'item' => 'Gegenstand',
'insufficient_permissions' => 'Unzureichende Berechtigungen!',
'language' => 'Sprache',
'last' => 'Letztes',
'last_name' => 'Familienname',
'license' => 'Lizenz',
'license_report' => 'Lizenz Report',
'licenses_available' => 'Verfügbare Lizenzen',
'licenses' => 'Lizenzen',
'list_all' => 'Alle auflisten',
'loading' => 'Am laden',
'lock_passwords' => 'Dieses Feld kann in dieser Installation nicht bearbeitet werden.',
'feature_disabled' => 'Dieses Feature wurde für die Demo-Installation deaktiviert.',
'location' => 'Standort',
'locations' => 'Standorte',
'logout' => 'Abmelden',
'lookup_by_tag' => 'Nach Asset Tag suchen',
'manufacturer' => 'Hersteller',
'manufacturers' => 'Hersteller',
'markdown' => 'Dieses Feld erlaubt <a href="https://help.github.com/articles/github-flavored-markdown/"> Github Flavored Markdown</a>.',
'min_amt' => 'Min. Anzahl',
'min_amt_help' => 'Die minimale Anzahl an verfügbaren Artikeln bevor ein Alarm ausgelöst wird.',
'model_no' => 'Modellnr.',
'months' => 'Monate',
'moreinfo' => 'Mehr Informationen',
'name' => 'Location Name',
'next' => 'Nächstes',
'new' => 'Neu!',
'no_depreciation' => 'Do Not Depreciate',
'no_results' => 'Keine Treffer.',
'no' => 'Nein',
'notes' => 'Notizen',
'order_number' => 'Order Number',
'page_menu' => 'zeige _MENU_ Einträge',
'pagination_info' => 'Zeige _START_ bis _END_ von _TOTAL_ Einträgen',
'pending' => 'Pending Asset',
'people' => 'Personen',
'per_page' => 'Ergebnisse pro Seite',
'previous' => 'Vorherige',
'processing' => 'In Arbeit',
'profile' => 'Ihr Profil',
'purchase_cost' => 'Purchase Cost',
'purchase_date' => 'Purchase Date',
'qty' => 'St',
'quantity' => 'Anzahl',
'ready_to_deploy' => 'Fertig zum herausgeben',
'recent_activity' => 'Letzte Aktivität',
'remove_company' => 'Firmenzuordnung entfernen',
'reports' => 'Berichte',
'requested' => 'Angefragt',
'request_canceled' => 'Anfrage abgebrochen',
'save' => 'Speichern',
'select' => 'auswählen',
'search' => 'Suche',
'select_category' => 'Kategorie auswählen',
'select_depreciation' => 'Wähle einen Abschreibungstyp',
'select_location' => 'Wählen Sie einen Standort',
'select_manufacturer' => 'Wählen Sie einen Hersteller',
'select_model' => 'Wählen Sie ein Model',
'select_supplier' => 'wählen Sie einen Lieferant',
'select_user' => 'wähle einen Benutzer',
'select_date' => 'Datum auswählen',
'select_statuslabel' => 'Status auswählen',
'select_company' => 'Firma auswählen',
'select_asset' => 'Asset auswählen',
'settings' => 'Einstellungen',
'sign_in' => 'Einloggen',
'signature' => 'Signature',
'some_features_disabled' => 'Einige Funktionen sind für den DEMO-Modus deaktiviert.',
'site_name' => 'Seitenname',
'state' => 'Zustand',
'status_labels' => 'Statusbezeichnungen',
'status' => 'Status',
'supplier' => 'Supplier',
'suppliers' => 'Lieferanten',
'submit' => 'Abschicken',
'total_assets' => 'Gesamte Assets',
'total_licenses' => 'Lizenzen insgesamt',
'total_accessories' => 'gesamtes Zubehör',
'total_consumables' => 'gesamtes Verbrauchsmaterial',
'type' => 'Typ',
'undeployable' => 'Nicht herausgebbar',
'unknown_admin' => 'Unbekannter Administrator',
'username_format' => 'Format der Benutzernamen',
'update' => 'Aktualisieren',
'uploaded' => 'Hochgeladen',
'user' => 'Nutzer',
'accepted' => 'angenommen',
'declined' => 'abgelehnt',
'unaccepted_asset_report' => 'Nicht akzeptierte Assets',
'users' => 'Benutzer',
'viewassets' => 'Zugeordnete Assets anzeigen',
'website' => 'Webseite',
'welcome' => 'Wilkommen, :name',
'years' => 'Jahre',
'yes' => 'Ja',
'zip' => 'Postal Code',
'noimage' => 'Kein Bild hochgeladen oder kein Bild gefunden.',
'token_expired' => 'Ihre Formularsitzung ist abgelaufen. Bitte versuchen sie es erneut.',
];
| agpl-3.0 |
venturehive/canvas-lms | spec/selenium/helpers/files_common.rb | 6986 | #
# Copyright (C) 2012 - present Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
require File.expand_path(File.dirname(__FILE__) + '/../common')
module FilesCommon
# This method adds the specified file to the course
# Params:
# - fixture: location of the file to be uploaded
# - context: course in which file would be uploaded
# - name: file name
# - folder: course folder it should go under (defaults to root folder)
def add_file(fixture, context, name, folder = Folder.root_folders(context).first)
context.attachments.create! do |attachment|
attachment.uploaded_data = fixture
attachment.filename = name
attachment.folder = folder
end
end
def edit_name_from_cog_icon(file_name_new, row_selected = 0)
ff('.al-trigger-gray')[row_selected].click
fln("Rename").click
expect(f(".ef-edit-name-cancel")).to be_displayed
file_name_textbox_el = f('.ef-edit-name-form__input')
replace_content(file_name_textbox_el, file_name_new)
file_name_textbox_el.send_keys(:return)
end
def delete(row_selected = 0, delete_using = :cog_icon)
if delete_using == :cog_icon
ff('.al-trigger')[row_selected].click
fln("Delete").click
elsif delete_using == :toolbar_menu
ff('.ef-item-row')[row_selected].click
f('.btn-delete').click
end
confirm_delete_on_dialog
end
def move(file_name, row_selected = 0, move_using = :cog_icon, destination = nil)
if move_using == :cog_icon
ff('.al-trigger')[row_selected].click
fln("Move").click
elsif move_using == :toolbar_menu
ff('.ef-item-row')[row_selected].click
f('.btn-move').click
end
expect(f(".ReactModal__Header-Title h4")).to include_text "Where would you like to move #{file_name}?"
if destination.present?
folders = destination.split('/')
folders.each do |folder|
fj(".ReactModal__Body .treeLabel span:contains('#{folder}')").click
end
else
ff(".treeLabel span")[3].click
end
driver.action.send_keys(:return).perform
wait_for_ajaximations
ff(".btn-primary")[1].click
end
def move_multiple_using_toolbar(files = [])
files.each do |file_name|
file = driver.find_element(xpath: "//span[contains(text(), '#{file_name}') and @class='ef-name-col__text']")
.find_element(xpath: "../..")
driver.action.key_down(:control).click(file).key_up(:control).perform
end
wait_for_ajaximations
f('.btn-move').click
wait_for_ajaximations
expect(f(".ReactModal__Header-Title h4").text).to eq "Where would you like to move these #{files.count} items?"
ff(".treeLabel span")[3].click
driver.action.send_keys(:return).perform
wait_for_ajaximations
ff(".btn-primary")[1].click
end
# This method sets permissions on files/folders
def set_item_permissions(permission_type = :publish, restricted_access_option = nil, set_permissions_from = :cloud_icon)
if set_permissions_from == :cloud_icon
f('.btn-link.published-status').click
elsif set_permissions_from == :toolbar_menu
ff('.ef-item-row')[0].click
f('.btn-restrict').click
end
wait_for_ajaximations
if permission_type == :publish
driver.find_elements(:name, 'permissions')[0].click
elsif permission_type == :unpublish
driver.find_elements(:name, 'permissions')[1].click
else
driver.find_elements(:name, 'permissions')[2].click
if restricted_access_option == :available_with_link
driver.find_elements(:name, 'restrict_options')[0].click
else
driver.find_elements(:name, 'restrict_options')[1].click
ff('.ui-datepicker-trigger.btn')[0].click
fln("15").click
ff('.ui-datepicker-trigger.btn')[1].click
fln("25").click
end
end
ff('.btn.btn-primary')[1].click
wait_for_ajaximations
end
def should_make_folders_in_the_menu_droppable
course_with_teacher_logged_in
get "/files"
wait_for_ajaximations
f(".add_folder_link").click
wait_for_ajaximations
expect(f("#files_content .add_folder_form #folder_name")).to be_displayed
f("#files_content .add_folder_form #folder_name").send_keys("my folder\n")
wait_for_ajaximations
expect(f(".node.folder span")).to have_class('ui-droppable')
# also make sure that it has a tooltip of the file name so that you can read really long names
expect(f(".node.folder .name[title='my folder']")).not_to be_nil
end
def should_show_students_link_to_download_zip_of_folder
course_with_student_logged_in
get "/courses/#{@course.id}/files"
link = f(".links a.download_zip_link")
wait_for_ajaximations
expect(link).to be_displayed
expect(link).to have_attribute('href', %r"/courses/#{@course.id}/folders/\d+/download")
end
def confirm_delete_on_dialog
driver.switch_to.alert.accept
wait_for_ajaximations
end
def cancel_delete_on_dialog
driver.switch_to.alert.dismiss
wait_for_ajaximations
end
def add_folder(name = 'new folder')
click_new_folder_button
new_folder = f("input[aria-label='Folder Name']")
new_folder.send_keys(name)
new_folder.send_keys(:return)
wait_for_ajaximations
end
def click_new_folder_button
f("button[aria-label='Add Folder']").click
wait_for_ajaximations
end
def create_new_folder
f('.btn-add-folder').click
f('.ef-edit-name-form').submit
wait_for_ajaximations
all_files_folders.first
end
def all_files_folders
# TODO: switch to ff once specs stop using this to find non-existence of stuff
driver.find_elements(:class, 'ef-item-row')
end
def insert_file_from_rce(insert_into = nil)
if insert_into == :quiz
ff(".ui-tabs-anchor")[6].click
else
file_tab = ff(".ui-tabs-anchor")[1]
expect(file_tab).to be_displayed
ff(".ui-tabs-anchor")[1].click
end
ff(".name.text")[0].click
wait_for_ajaximations
ff(".name.text")[1].click
wait_for_ajaximations
ff(".name.text")[2].click
wait_for_ajaximations
if insert_into == :quiz
ff(".name.text")[3].click
ff(".btn-primary")[3].click
elsif insert_into == :discussion
f("#edit_discussion_form_buttons .btn-primary").click
else
f(".btn-primary").click
end
wait_for_ajaximations
expect(fln("some test file")).to be_displayed
end
end
| agpl-3.0 |
lucee/unoffical-Lucee-no-jre | source/java/core/src/lucee/transformer/bytecode/visitor/WhileVisitor.java | 2391 | /**
* Copyright (c) 2014, the Railo Company Ltd.
* Copyright (c) 2015, Lucee Assosication Switzerland
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
*/
package lucee.transformer.bytecode.visitor;
import lucee.transformer.Position;
import lucee.transformer.bytecode.BytecodeContext;
import lucee.transformer.bytecode.util.ExpressionUtil;
import org.objectweb.asm.Label;
import org.objectweb.asm.Opcodes;
public final class WhileVisitor implements LoopVisitor {
private Label begin;
private Label end;
public void visitBeforeExpression(BytecodeContext bc) {
begin = new Label();
end = new Label();
bc.getAdapter().visitLabel(begin);
}
public void visitAfterExpressionBeforeBody(BytecodeContext bc) {
bc.getAdapter().ifZCmp(Opcodes.IFEQ, end);
}
public void visitAfterBody(BytecodeContext bc,Position endline) {
bc.getAdapter().visitJumpInsn(Opcodes.GOTO, begin);
bc.getAdapter().visitLabel(end);
ExpressionUtil.visitLine(bc, endline);
}
/**
*
* @see lucee.transformer.bytecode.visitor.LoopVisitor#visitContinue(org.objectweb.asm.commons.GeneratorAdapter)
*/
@Override
public void visitContinue(BytecodeContext bc) {
bc.getAdapter().visitJumpInsn(Opcodes.GOTO, begin);
}
/**
*
* @see lucee.transformer.bytecode.visitor.LoopVisitor#visitBreak(org.objectweb.asm.commons.GeneratorAdapter)
*/
@Override
public void visitBreak(BytecodeContext bc) {
bc.getAdapter().visitJumpInsn(Opcodes.GOTO, end);
}
/**
*
* @see lucee.transformer.bytecode.visitor.LoopVisitor#getContinueLabel()
*/
@Override
public Label getContinueLabel() {
return begin;
}
/**
*
* @see lucee.transformer.bytecode.visitor.LoopVisitor#getBreakLabel()
*/
@Override
public Label getBreakLabel() {
return end;
}
} | lgpl-2.1 |
KDE/android-qt-creator | src/plugins/qmlprofiler/codaqmlprofilerrunner.cpp | 3032 | /**************************************************************************
**
** This file is part of Qt Creator
**
** Copyright (c) 2012 Nokia Corporation and/or its subsidiary(-ies).
**
** Contact: Nokia Corporation (qt-info@nokia.com)
**
**
** GNU Lesser General Public License Usage
**
** This file may be used under the terms of the GNU Lesser General Public
** License version 2.1 as published by the Free Software Foundation and
** appearing in the file LICENSE.LGPL included in the packaging of this file.
** Please review the following information to ensure the GNU Lesser General
** Public License version 2.1 requirements will be met:
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Nokia gives you certain additional
** rights. These rights are described in the Nokia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** Other Usage
**
** Alternatively, this file may be used in accordance with the terms and
** conditions contained in a signed written agreement between you and Nokia.
**
** If you have questions regarding the use of this file, please contact
** Nokia at qt-info@nokia.com.
**
**************************************************************************/
#include "codaqmlprofilerrunner.h"
#include <utils/qtcassert.h>
#include <projectexplorer/projectexplorerconstants.h>
#include <projectexplorer/target.h>
#include <extensionsystem/pluginmanager.h>
#include <qt4projectmanager/qt-s60/s60deployconfiguration.h>
#include <projectexplorer/runconfiguration.h>
#include <analyzerbase/analyzerconstants.h>
#include <qt4projectmanager/qt-s60/codaruncontrol.h>
using namespace ProjectExplorer;
using namespace Qt4ProjectManager;
using namespace QmlProfiler::Internal;
CodaQmlProfilerRunner::CodaQmlProfilerRunner(S60DeviceRunConfiguration *configuration,
QObject *parent) :
AbstractQmlProfilerRunner(parent),
m_configuration(configuration),
m_runControl(new CodaRunControl(configuration, QmlProfilerRunMode))
{
connect(m_runControl, SIGNAL(finished()), this, SIGNAL(stopped()));
connect(m_runControl,
SIGNAL(appendMessage(ProjectExplorer::RunControl*,QString,Utils::OutputFormat)),
this, SLOT(appendMessage(ProjectExplorer::RunControl*,QString,Utils::OutputFormat)));
}
CodaQmlProfilerRunner::~CodaQmlProfilerRunner()
{
delete m_runControl;
}
void CodaQmlProfilerRunner::start()
{
QTC_ASSERT(m_runControl, return);
m_runControl->start();
}
void CodaQmlProfilerRunner::stop()
{
QTC_ASSERT(m_runControl, return);
m_runControl->stop();
}
void CodaQmlProfilerRunner::appendMessage(ProjectExplorer::RunControl *, const QString &message,
Utils::OutputFormat format)
{
emit appendMessage(message, format);
}
quint16 QmlProfiler::Internal::CodaQmlProfilerRunner::debugPort() const
{
return m_configuration->debuggerAspect()->qmlDebugServerPort();
}
| lgpl-2.1 |
tjizep/treestore1 | src/poco-1.4.6p1-all/Foundation/src/HexBinaryDecoder.cpp | 3050 | //
// HexBinaryDecoder.cpp
//
// $Id: //poco/1.4/Foundation/src/HexBinaryDecoder.cpp#2 $
//
// Library: Foundation
// Package: Streams
// Module: HexBinary
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
#include "Poco/HexBinaryDecoder.h"
#include "Poco/Exception.h"
namespace Poco {
HexBinaryDecoderBuf::HexBinaryDecoderBuf(std::istream& istr):
_buf(*istr.rdbuf())
{
}
HexBinaryDecoderBuf::~HexBinaryDecoderBuf()
{
}
int HexBinaryDecoderBuf::readFromDevice()
{
int c;
int n;
if ((n = readOne()) == -1) return -1;
if (n >= '0' && n <= '9')
c = n - '0';
else if (n >= 'A' && n <= 'F')
c = n - 'A' + 10;
else if (n >= 'a' && n <= 'f')
c = n - 'a' + 10;
else throw DataFormatException();
c <<= 4;
if ((n = readOne()) == -1) throw DataFormatException();
if (n >= '0' && n <= '9')
c |= n - '0';
else if (n >= 'A' && n <= 'F')
c |= n - 'A' + 10;
else if (n >= 'a' && n <= 'f')
c |= n - 'a' + 10;
else throw DataFormatException();
return c;
}
int HexBinaryDecoderBuf::readOne()
{
int ch = _buf.sbumpc();
while (ch == ' ' || ch == '\r' || ch == '\t' || ch == '\n')
ch = _buf.sbumpc();
return ch;
}
HexBinaryDecoderIOS::HexBinaryDecoderIOS(std::istream& istr): _buf(istr)
{
poco_ios_init(&_buf);
}
HexBinaryDecoderIOS::~HexBinaryDecoderIOS()
{
}
HexBinaryDecoderBuf* HexBinaryDecoderIOS::rdbuf()
{
return &_buf;
}
HexBinaryDecoder::HexBinaryDecoder(std::istream& istr): HexBinaryDecoderIOS(istr), std::istream(&_buf)
{
}
HexBinaryDecoder::~HexBinaryDecoder()
{
}
} // namespace Poco
| lgpl-2.1 |
modius/railo | railo-java/railo-core/src/railo/transformer/cfml/evaluator/func/impl/IsDefined.java | 2052 | package railo.transformer.cfml.evaluator.func.impl;
import railo.commons.lang.StringList;
import railo.runtime.exp.TemplateException;
import railo.runtime.interpreter.VariableInterpreter;
import railo.runtime.type.Collection;
import railo.runtime.type.scope.Scope;
import railo.runtime.type.util.ArrayUtil;
import railo.transformer.bytecode.expression.Expression;
import railo.transformer.bytecode.expression.type.CollectionKey;
import railo.transformer.bytecode.expression.type.CollectionKeyArray;
import railo.transformer.bytecode.expression.var.Argument;
import railo.transformer.bytecode.expression.var.BIF;
import railo.transformer.bytecode.literal.LitDouble;
import railo.transformer.bytecode.literal.LitString;
import railo.transformer.cfml.evaluator.FunctionEvaluator;
import railo.transformer.library.function.FunctionLibFunction;
public class IsDefined implements FunctionEvaluator{
public void evaluate(BIF bif, FunctionLibFunction flf) throws TemplateException {
Argument arg = bif.getArguments()[0];
Expression value = arg.getValue();
if(value instanceof LitString) {
String str=((LitString)value).getString();
StringList sl = VariableInterpreter.parse(str,false);
if(sl!=null){
// scope
str=sl.next();
int scope = VariableInterpreter.scopeString2Int(str);
if(scope==Scope.SCOPE_UNDEFINED)sl.reset();
// keys
String[] arr=sl.toArray();
ArrayUtil.trim(arr);
// update first arg
arg.setValue(LitDouble.toExprDouble(scope),"number");
// add second argument
if(arr.length==1){
Expression expr = new CollectionKey(arr[0]);//LitString.toExprString(str);
arg=new Argument(expr,Collection.Key.class.getName());
bif.addArgument(arg);
}
else {
CollectionKeyArray expr=new CollectionKeyArray(arr);
//LiteralStringArray expr = new LiteralStringArray(arr);
arg=new Argument(expr,Collection.Key[].class.getName());
bif.addArgument(arg);
}
}
}
//print.out("bif:"+arg.getValue().getClass().getName());
}
}
| lgpl-2.1 |
echosting/xe-core14 | modules/module/lang/es.lang.php | 7311 | <?php
/**
* @archivo modules/module/lang/es.lang.php
* @autor NHN (developers@xpressengine.com)
* @sumario Paquete del idioma español
**/
$lang->virtual_site = "Virtual Site";
$lang->module_list = "Lista de Módulos";
$lang->module_index = "Lista de Módulos";
$lang->module_category = "Categoría de Módulos ";
$lang->module_info = "Información";
$lang->add_shortcut = "Añadir en el menú del administrador";
$lang->module_action = "Acción";
$lang->module_maker = "Módulo del desarrollador";
$lang->module_license = 'License';
$lang->module_history = "Historia de actualización";
$lang->category_title = "Título de categoría";
$lang->header_text = 'Encabezado';
$lang->footer_text = 'Pie de página';
$lang->use_category = 'Usar categoría';
$lang->category_title = 'Título de categoría';
$lang->checked_count = 'Múmero de los documentos selecionados';
$lang->skin_default_info = 'información del tema predefinido';
$lang->skin_author = 'Desarrollador de tema';
$lang->skin_license = 'License';
$lang->skin_history = 'Historia de actualización';
$lang->module_copy = "Copy Module";
$lang->module_selector = "Module Selector";
$lang->do_selected = "선택된 것들을...";
$lang->bundle_setup = "일괄 기본 설정";
$lang->bundle_addition_setup = "일괄 추가 설정";
$lang->bundle_grant_setup = "일괄 권한 설정";
$lang->lang_code = "언어 코드";
$lang->filebox = "파일박스";
$lang->access_type = '접속 방법';
$lang->access_domain = 'Domain 접속';
$lang->access_vid = 'Site ID 접속';
$lang->about_domain = 'Para crear un dominio privado de la site requiere. <br/> Independiente de dominio o subdominio, y XE está instalado, el camino que en su conjunto. <br /> Ej.) www.xpressengine.com / zbxe';
$lang->about_vid = '별도의 도메인이 아닌 http://XE주소/ID 로 접속할 수 있습니다. 모듈명(mid)와 중복될 수 없습니다.<br/>첫글자는 영문으로 시작해야 하고 영문과 숫자 그리고 _ 만 사용할 수 있습니다';
$lang->msg_already_registed_vid = '이미 등록된 사이트 ID 입니다. 게시판등의 mid와도 중복이 되지 않습니다. 다른 ID를 입력해주세요.';
$lang->msg_already_registed_domain = 'Ya es de dominio registrado. Por favor, use un dominio diferente';
$lang->header_script = "헤더 스크립트";
$lang->about_header_script = "html의 <header>와 </header> 사이에 들어가는 코드를 직접 입력할 수 있습니다.<br /><script, <style 또는 <meta 태그등을 이용하실 수 있습니다";
$lang->grant_access = "Access";
$lang->grant_manager = "Management";
$lang->grant_to_all = "All users";
$lang->grant_to_login_user = "Logged users";
$lang->grant_to_site_user = "Joined users";
$lang->grant_to_group = "Specification group users";
$lang->cmd_add_shortcut = "añadir acceso directo";
$lang->cmd_install = "Instalar";
$lang->cmd_update = "Actualizar";
$lang->cmd_manage_category = 'Manejo de categorías';
$lang->cmd_manage_grant = 'Manejo de atribuciones';
$lang->cmd_manage_skin = 'Manejo de temas';
$lang->cmd_manage_document = 'Manejo de documentos';
$lang->cmd_find_module = '모듈 찾기';
$lang->cmd_find_langcode = 'Find lang code';
$lang->msg_new_module = "Crear un nuevo módulo";
$lang->msg_update_module = "Modificar el módulo";
$lang->msg_module_name_exists = "El nombre ya existe. Por favor tratar con otro nombre.";
$lang->msg_category_is_null = 'No existe categoría registrada.';
$lang->msg_grant_is_null = 'No existe el objetivo de atribución registrado.';
$lang->msg_no_checked_document = 'No existe documento seleccionado.';
$lang->msg_move_failed = 'No pudo moverse';
$lang->msg_cannot_delete_for_child = 'No puede eliminar la categoría si posee subcategoría.';
$lang->msg_limit_mid ='모듈이름은 영문+[영문+숫자+_] 만 가능합니다.';
$lang->msg_extra_name_exists = '이미 존재하는 확장변수 이름입니다. 다른 이름을 입력해주세요.';
$lang->about_browser_title = "Esto es el valor que se mostrará en el título del navegador. También es usado en RSS/Trackback.";
$lang->about_mid = "El nombre del módulo será usado como http://direccion/?mid=NombreMódulo.\n(sólo alfabeto español+[alfabeto español, números y el gión abajo(_)] son permitidos. The maximum length is 40.)";
$lang->about_default = "Si selecciona esta opción, se mostrara de manera predefinida sin el valor de mid.";
$lang->about_module_category = "Es posible manejar a traves de categoría.\n EL URL para en manejo del módulo de categoría es <a href=\"./?module=admin&act=dispModuleAdminCategory\">Manejo de módulo > Módulo de categoría </a>.";
$lang->about_description= 'Descripción usada para la administración.';
$lang->about_default = 'Si selecciona esta opción, se mostrará de manera predefinida sin el valor de mid.';
$lang->about_header_text = 'El contenido se mostrará en la parte superior del módulo.(tags de html permitido)';
$lang->about_footer_text = 'El contenido se mostrará en la parte inferior del módulo.(tags de html permitido)';
$lang->about_skin = 'Usted puede elegir un tema del módulo.';
$lang->about_use_category = 'Si selecciona esta opción, la función de categoría sera activada.';
$lang->about_list_count = 'Usted puede definir el número límite de los documentos a mostrar en una página.(Predefinido es 20)';
$lang->about_search_list_count = 'Usted puede configurar el número de artículos que se exponen cuando se utiliza la función de búsqueda o categoría. (Por defecto es 20)';
$lang->about_page_count = 'Usted puede definir el número de página enlazada para mover páginas en un botón de la página.(Predefinido es 10)';
$lang->about_admin_id = 'Usted puede definir el administrador de atribuciones superiores al módulo.\n Usted puede asignar múltiples IDs.';
$lang->about_grant = 'Si usted desea desactivar a todos los objetos teniendo atribuciones especificas, incluso el usuario no conectado pueden tener atribuciones.';
$lang->about_grant_deatil = '가입한 사용자는 cafeXE등 분양형 가상 사이트에 가입을 한 로그인 사용자를 의미합니다';
$lang->about_module = "XE consiste de módulos excepto la librería básica.\n Módulo del Manejo del Módulo muestra todos los módulos instalados y ayuda el manejo de ellos.";
$lang->about_extra_vars_default_value = 'Si múltiples valores predefinidos son requeridos, usted puede enlazar con la coma(,).';
$lang->about_search_virtual_site = "가상 사이트(카페XE등)의 도메인을 입력하신 후 검색하세요.<br/>가상 사이트이외의 모듈은 내용을 비우고 검색하시면 됩니다. (http:// 는 제외)";
$lang->about_langcode = "언어별로 다르게 설정하고 싶으시면 언어코드 찾기를 이용해주세요";
$lang->about_file_extension= "%s 파일만 가능합니다.";
?>
| lgpl-2.1 |
ghostsintju/hivedb | src/main/java/org/hivedb/util/validators/NonNegativeValidator.java | 879 | package org.hivedb.util.validators;
import org.hivedb.HiveRuntimeException;
import org.hivedb.util.classgen.ReflectionTools;
public class NonNegativeValidator implements Validator {
public boolean isValid(Object instance, String propertyName) {
Object obj = ReflectionTools.invokeGetter(instance, propertyName);
if (!(obj instanceof Number))
throw new HiveRuntimeException("Expected an instance of type Number, but got " + obj.getClass().getSimpleName());
Number num = (Number)obj;
return obj.getClass().equals(long.class) || num.getClass().equals(Long.class) ? num.longValue() >= 0l : num.doubleValue() >= 0.0;
}
public void throwInvalid(Object instance, String propertyName) {
throw new HiveRuntimeException(String.format("Property %s of class %s is negative for instance %s", propertyName, instance.getClass().getSimpleName(), instance.toString()));
}
}
| lgpl-2.1 |
TheTimmy/spack | var/spack/repos/builtin/packages/xf86miscproto/package.py | 1753 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xf86miscproto(AutotoolsPackage):
"""This package includes the protocol definitions of the "XFree86-Misc"
extension to the X11 protocol. The "XFree86-Misc" extension is
supported by the XFree86 X server and versions of the Xorg X server
prior to Xorg 1.6."""
homepage = "http://cgit.freedesktop.org/xorg/proto/xf86miscproto"
url = "https://www.x.org/archive/individual/proto/xf86miscproto-0.9.3.tar.gz"
version('0.9.3', 'c6432f04f84929c94fa05b3a466c489d')
| lgpl-2.1 |
fjardon/motif | config/util/mergelib.cpp | 2426 | XCOMM!/bin/sh
XCOMM
XCOMM $TOG: mergelib.cpp /main/5 1998/02/06 11:24:31 kaleb $
XCOMM
XCOMM Motif
XCOMM
XCOMM Copyright (c) 1987-2012, The Open Group. All rights reserved.
XCOMM
XCOMM These libraries and programs are free software; you can
XCOMM redistribute them and/or modify them under the terms of the GNU
XCOMM Lesser General Public License as published by the Free Software
XCOMM Foundation; either version 2 of the License, or (at your option)
XCOMM any later version.
XCOMM
XCOMM These libraries and programs are distributed in the hope that
XCOMM they will be useful, but WITHOUT ANY WARRANTY; without even the
XCOMM implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
XCOMM PURPOSE. See the GNU Lesser General Public License for more
XCOMM details.
XCOMM
XCOMM You should have received a copy of the GNU Lesser General Public
XCOMM License along with these librararies and programs; if not, write
XCOMM to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
XCOMM Floor, Boston, MA 02110-1301 USA
XCOMM
XCOMM Author: Jim Fulton, MIT X Consortium
XCOMM
XCOMM mergelib - merge one library into another; this is commonly used by X
XCOMM to add the extension library into the base Xlib.
XCOMM
usage="usage: $0 to-library from-library [object-filename-prefix]"
objprefix=_
case $# in
2) ;;
3) objprefix=$3 ;;
*) echo "$usage" 1>&2; exit 1 ;;
esac
tolib=$1
fromlib=$2
if [ ! -f $fromlib ]; then
echo "$0: no such from-library $fromlib" 1>&2
exit 1
fi
if [ ! -f $tolib ]; then
echo "$0: no such to-library $tolib" 1>&2
exit 1
fi
XCOMM
XCOMM Create a temp directory, and figure out how to reference the
XCOMM object files from it (i.e. relative vs. absolute path names).
XCOMM
tmpdir=tmp.$$
origdir=..
mkdir $tmpdir
if [ ! -d $tmpdir ]; then
echo "$0: unable to create temporary directory $tmpdir" 1>&2
exit 1
fi
case "$fromlib" in
/?*) upfrom= ;;
*) upfrom=../ ;;
esac
case "$tolib" in
/?*) upto= ;;
*) upto=../ ;;
esac
XCOMM
XCOMM In the temp directory, extract all of the object files and prefix
XCOMM them with some symbol to avoid name clashes with the base library.
XCOMM
cd $tmpdir
ar x ${upfrom}$fromlib
for i in *.o; do
mv $i ${objprefix}$i
done
XCOMM
XCOMM Merge in the object modules, ranlib (if appropriate) and cleanup
XCOMM
ARCMD ${upto}$tolib *.o
RANLIB ${upto}$tolib
cd $origdir
rm -rf $tmpdir
| lgpl-2.1 |
bcosenza/patus | src/cetus/hir/Label.java | 1910 | package cetus.hir;
import java.io.*;
import java.lang.reflect.*;
import java.util.*;
/**
* Represents a label for use with goto statements.
*/
public class Label extends Statement
{
private static Method class_print_method;
static
{
Class<?>[] params = new Class<?>[2];
try {
params[0] = Label.class;
params[1] = PrintWriter.class;
class_print_method = params[0].getMethod("defaultPrint", params);
} catch (NoSuchMethodException e) {
throw new InternalError();
}
}
/**
* Creates a new label with the specified name ID.
*
* @param name the name of the label.
* @throws IllegalArgumentException if <b>name</b> is null.
* @throws NotAnOrphanException if <b>name</b> has a parent.
*/
public Label(IDExpression name)
{
object_print_method = class_print_method;
if (name != null && !(name instanceof NameID))
name = new NameID(name.toString());
addChild(name);
}
/**
* Prints a label to a stream.
*
* @param l The label to print.
* @param o The writer on which to print the label.
*/
public static void defaultPrint(Label l, PrintWriter o)
{
l.getName().print(o);
o.print(":");
}
/**
* Overrides the class print method, so that all subsequently
* created objects will use the supplied method.
*
* @param m The new print method.
*/
static public void setClassPrintMethod(Method m)
{
class_print_method = m;
}
/**
* Returns the string for this Label
*/
public IDExpression getName()
{
return (IDExpression)children.get(0);
}
/**
* Sets the name of the label with the specified name.
*
* @param name - name of the label.
* @throws IllegalArgumentException if <b>name</b> is null.
* @throws NotAnOrphanException if <b>name</b> has a parent.
*/
public void setName(IDExpression name)
{
setChild(0, name);
}
}
| lgpl-2.1 |
davidfoerster/javaFlacEncoder | src/javaFlacEncoder/MetadataBlockHeader.java | 3099 | /*
* Copyright (C) 2010 Preston Lacey http://javaflacencoder.sourceforge.net/
* All Rights Reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package javaFlacEncoder;
/**
* The MetadataBlockHeader class is used to creat FLAC compliant Metadata Block
* Headers. See the FLAC specification for more information.
*
* @author Preston Lacey
*/
public class MetadataBlockHeader {
//boolean lastMetadataBlockFlag;//1 bit used
//byte blockType;//7 bits used
//int length;//24 bits used
/**
* Enum containing the different Metadata block types. See the FLAC spec
* for more information on the various types.
*/
public enum MetadataBlockType {
/** A meta-block containing stream configuration information */
STREAMINFO,
/** A meta-block to pad the stream, allowing other meta-data to be
* written in the future without re-writing the entire stream.
*/
PADDING,
/** Application meta-block*/
APPLICATION,
/** A meta-block which aids in seeking in the stream */
SEEKTABLE,
/** A meta-block for tags/comments */
VORBIS_COMMENT,
/** Cuesheet meta-block */
CUESHEET,
/** A meta-block to store an image, such as cover-art */
PICTURE
};
/**
* Constructor. This class defines no instance variables and only static
* methods.
*/
public MetadataBlockHeader() {
}
/**
* Create a meta-data block header of the given type, and return the result
* in a new EncodedElement(so data is ready to be placed directly in FLAC
* stream)
*
* @param lastBlock True if this is the last meta-block in the stream. False
* otherwise.
*
* @param type enum indicating which type of block we're creating.
* @param length Length of the meta-data block which follows this header.
* @return EncodedElement containing the header.
*/
public static EncodedElement getMetadataBlockHeader(boolean lastBlock,
MetadataBlockType type, int length) {
EncodedElement ele = new EncodedElement(4, 0);
int encodedLastBlock = (lastBlock) ? 1:0;
ele.addInt(encodedLastBlock, 1);
int encodedType = 0;
MetadataBlockType[] vals = MetadataBlockType.values();
for(int i = 0; i < vals.length; i++) {
if(vals[i] == type) {
encodedType = i;
break;
}
}
ele.addInt(encodedType, 7);
ele.addInt(length, 24);
return ele;
}
}
| lgpl-2.1 |
rpelisse/rhq-sync-tool | src/main/java/org/jboss/rhq/sync/tool/cli/QualifierTypeConverter.java | 1509 | /*
*
* RHQ Sync Tool
* Copyright (C) 2012-2013 Red Hat, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License,
* version 2.1, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License and the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* and the GNU Lesser General Public License along with this program;
* if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
package org.jboss.rhq.sync.tool.cli;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.ParameterException;
/**
*
* @author Romain PELISSE - <belaran@redhat.com>
*
*/
public class QualifierTypeConverter implements IStringConverter<QualifierType> {
@Override
public QualifierType convert(String parameter) {
if ( parameter == null || "".equals(parameter) )
throw new ParameterException(new NullPointerException());
for ( QualifierType qualifier : QualifierType.values())
if ( qualifier.toString().equalsIgnoreCase(parameter) )
return qualifier;
throw new UnsupportedOperationException("Unsupported qualifier:" + parameter);
}
}
| lgpl-2.1 |
4aiman/MineClone | mods/death/init.lua | 494 | --if minetest.setting_get("keepInventory") == false then
minetest.register_on_dieplayer(function(player)
local inv = player:get_inventory()
local pos = player:getpos()
for i,stack in ipairs(inv:get_list("main")) do
local x = math.random(0, 9)/3
local z = math.random(0, 9)/3
pos.x = pos.x + x
pos.z = pos.z + z
minetest.env:add_item(pos, stack)
stack:clear()
inv:set_stack("main", i, stack)
pos.x = pos.x - x
pos.z = pos.z - z
end
end)
--end | lgpl-2.1 |
martinlaz/lenskit | lenskit-eval/src/main/java/org/grouplens/lenskit/eval/traintest/ExternalEvalJob.java | 12635 | /*
* LensKit, an open source recommender systems toolkit.
* Copyright 2010-2014 LensKit Contributors. See CONTRIBUTORS.md.
* Work on LensKit has been funded by the National Science Foundation under
* grants IIS 05-34939, 08-08692, 08-12148, and 10-17697.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.grouplens.lenskit.eval.traintest;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import it.unimi.dsi.fastutil.longs.Long2DoubleMap;
import it.unimi.dsi.fastutil.longs.Long2DoubleOpenHashMap;
import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.StrTokenizer;
import org.grouplens.lenskit.Recommender;
import org.grouplens.lenskit.RecommenderBuildException;
import org.grouplens.lenskit.data.dao.EventDAO;
import org.grouplens.lenskit.data.dao.UserEventDAO;
import org.grouplens.lenskit.data.history.History;
import org.grouplens.lenskit.data.history.UserHistory;
import org.grouplens.lenskit.data.source.TextDataSource;
import org.grouplens.lenskit.data.text.DelimitedColumnEventFormat;
import org.grouplens.lenskit.data.text.EventFormat;
import org.grouplens.lenskit.eval.data.traintest.GenericTTDataSet;
import org.grouplens.lenskit.eval.data.traintest.TTDataSet;
import org.grouplens.lenskit.eval.metrics.topn.ItemSelector;
import org.grouplens.lenskit.scored.ScoredId;
import org.grouplens.lenskit.util.io.LoggingStreamSlurper;
import org.grouplens.lenskit.util.table.writer.CSVWriter;
import org.grouplens.lenskit.util.table.writer.TableWriter;
import org.grouplens.lenskit.vectors.ImmutableSparseVector;
import org.grouplens.lenskit.vectors.SparseVector;
import org.lenskit.data.events.Event;
import org.lenskit.data.ratings.Rating;
import org.lenskit.util.io.LineStream;
import org.lenskit.util.io.ObjectStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.UUID;
/**
* Job implementation for exeternal algorithms.
* @author <a href="http://www.grouplens.org">GroupLens Research</a>
*/
class ExternalEvalJob extends TrainTestJob {
private static final Logger logger = LoggerFactory.getLogger(ExternalEvalJob.class);
private final ExternalAlgorithm algorithm;
private final UUID key;
private Long2ObjectMap<SparseVector> userPredictions;
// References to hold on to the user event DAOs for test users
private UserEventDAO userTrainingEvents, userTestEvents;
public ExternalEvalJob(TrainTestEvalTask task,
@Nonnull ExternalAlgorithm algo,
@Nonnull TTDataSet ds) {
super(task, algo, ds);
algorithm = algo;
key = UUID.randomUUID();
}
@Override
protected void buildRecommender() throws RecommenderBuildException {
Preconditions.checkState(userPredictions == null, "recommender already built");
File dir = getStagingDir();
logger.info("using output/staging directory {}", dir);
if (!dir.exists()) {
logger.info("creating directory {}", dir);
dir.mkdirs();
}
final File train;
try {
train = trainingFile(dataSet);
} catch (IOException e) {
throw new RecommenderBuildException("error preparing training file", e);
}
final File test;
try {
test = testFile(dataSet);
} catch (IOException e) {
throw new RecommenderBuildException("error preparing test file", e);
}
final File output = getFile("predictions.csv");
List<String> args = Lists.transform(algorithm.getCommand(), new Function<String, String>() {
@Nullable
@Override
public String apply(@Nullable String input) {
if (input == null) {
throw new NullPointerException("command element");
}
String s = input.replace("{OUTPUT}", output.getAbsolutePath());
s = s.replace("{TRAIN_DATA}", train.getAbsolutePath());
s = s.replace("{TEST_DATA}", test.getAbsolutePath());
return s;
}
});
logger.info("running {}", StringUtils.join(args, " "));
Process proc;
try {
proc = new ProcessBuilder().command(args)
.directory(algorithm.getWorkDir())
.start();
} catch (IOException e) {
throw new RecommenderBuildException("error creating process", e);
}
Thread listen = new LoggingStreamSlurper("external-algo", proc.getErrorStream(),
logger, "external: ");
listen.start();
int result = -1;
boolean done = false;
while (!done) {
try {
result = proc.waitFor();
done = true;
} catch (InterruptedException e) {
logger.info("thread interrupted, killing subprocess");
proc.destroy();
throw new RecommenderBuildException("recommender build interrupted", e);
}
}
if (result != 0) {
logger.error("external command exited with status {}", result);
throw new RecommenderBuildException("recommender exited with code " + result);
}
Long2ObjectMap<SparseVector> vectors;
try {
vectors = readPredictions(output);
} catch (FileNotFoundException e) {
logger.error("cannot find expected output file {}", output);
throw new RecommenderBuildException("recommender produced no output", e);
}
userPredictions = vectors;
userTrainingEvents = dataSet.getTrainingData().getUserEventDAO();
userTestEvents = dataSet.getTestData().getUserEventDAO();
}
@Override
protected TestUser getUserResults(long uid) {
Preconditions.checkState(userPredictions != null, "recommender not built");
return new TestUserImpl(uid);
}
@Override
protected void cleanup() {
userPredictions = null;
userTrainingEvents = null;
userTestEvents = null;
}
private File getStagingDir() {
String dirName = String.format("%s-%s", algorithm.getName(), key);
return new File(algorithm.getWorkDir(), dirName);
}
/**
* Create a fully-qualified algorithm file name.
* @param fn The file name.
* @return A file in the working directory.
*/
private File getFile(String fn) {
return new File(getStagingDir(), fn);
}
private File trainingFile(TTDataSet data) throws IOException {
try {
GenericTTDataSet gds = (GenericTTDataSet) data;
TextDataSource csv = (TextDataSource) gds.getTrainingData();
EventFormat fmt = csv.getFormat();
String delim = fmt instanceof DelimitedColumnEventFormat
? ((DelimitedColumnEventFormat) fmt).getDelimiter()
: null;
if (",".equals(delim)) {
File file = csv.getFile();
logger.debug("using training file {}", file);
return file;
}
} catch (ClassCastException e) {
/* No-op - this is fine, we will make a file. */
}
File file = makeCSV(data.getTrainingDAO(), getFile("train.csv"), true);
logger.debug("wrote training file {}", file);
return file;
}
private File testFile(TTDataSet data) throws IOException {
File file = makeCSV(data.getTestDAO(), getFile("test.csv"), false);
logger.debug("wrote test file {}", file);
return file;
}
private File makeCSV(EventDAO dao, File file, boolean writeRatings) throws IOException {
// TODO Make this not re-copy data unnecessarily
Object[] row = new Object[writeRatings ? 3 : 2];
TableWriter table = CSVWriter.open(file, null);
try {
ObjectStream<Rating> ratings = dao.streamEvents(Rating.class);
try {
for (Rating r: ratings) {
if (r.hasValue()) {
row[0] = r.getUserId();
row[1] = r.getItemId();
if (writeRatings) {
row[2] = r.getValue();
}
table.writeRow(row);
}
}
} finally {
ratings.close();
}
} finally {
table.close();
}
return file;
}
private Long2ObjectMap<SparseVector> readPredictions(File predFile) throws FileNotFoundException, RecommenderBuildException {
Long2ObjectMap<Long2DoubleMap> data = new Long2ObjectOpenHashMap<Long2DoubleMap>();
StrTokenizer tok = new StrTokenizer((String) null, algorithm.getOutputDelimiter());
ObjectStream<List<String>> lines = LineStream.openFile(predFile)
.tokenize(tok);
try {
int n = 0;
for (List<String> row: lines) {
n++;
if (row.size() == 0 || row.size() == 1 && row.get(0).equals("")) {
continue;
}
if (row.size() < 3) {
logger.error("predictions line {}: invalid row {}", n,
StringUtils.join(row, ","));
throw new RecommenderBuildException("invalid prediction row");
}
long uid = Long.parseLong(row.get(0));
long iid = Long.parseLong(row.get(1));
double pred = Double.parseDouble(row.get(2));
Long2DoubleMap user = data.get(uid);
if (user == null) {
user = new Long2DoubleOpenHashMap();
data.put(uid, user);
}
user.put(iid, pred);
}
} finally {
lines.close();
}
Long2ObjectMap<SparseVector> vectors = new Long2ObjectOpenHashMap<SparseVector>(data.size());
for (Long2ObjectMap.Entry<Long2DoubleMap> entry: data.long2ObjectEntrySet()) {
vectors.put(entry.getLongKey(), ImmutableSparseVector.create(entry.getValue()));
}
return vectors;
}
/**
* External algorithmInfo implementation of TestUser.
* @author <a href="http://www.grouplens.org">GroupLens Research</a>
*/
class TestUserImpl extends AbstractTestUser {
private final long userId;
public TestUserImpl(long uid) {
userId = uid;
}
@Override
public UserHistory<Event> getTrainHistory() {
UserHistory<Event> events = userTrainingEvents.getEventsForUser(userId);
if(events == null){
return History.forUser(userId); //Creates an empty history for this particular user.
} else {
return events;
}
}
@Override
public UserHistory<Event> getTestHistory() {
return userTestEvents.getEventsForUser(userId);
}
@Override
public SparseVector getPredictions() {
return userPredictions.get(userId);
}
@Override
public List<ScoredId> getRecommendations(int n, ItemSelector candSel, ItemSelector exclSel) {
return null;
}
@Override
public Recommender getRecommender() {
return null;
}
}
}
| lgpl-2.1 |
heuermh/biojava | biojava-structure/src/main/java/org/biojava/nbio/structure/DatabasePDBRevRecord.java | 1513 | package org.biojava.nbio.structure;
import org.biojava.nbio.structure.io.cif.CifBean;
/**
* Represents revision records for use by {@link PDBHeader}.
* @author Sebastian Bittrich
* @since 6.0.0
*/
public class DatabasePDBRevRecord implements CifBean {
private static final long serialVersionUID = 1L;
private String revNum;
private String type;
private String details;
public DatabasePDBRevRecord() {
}
public DatabasePDBRevRecord(String revNum, String type, String details) {
this.revNum = revNum;
this.type = type;
this.details = details;
}
public DatabasePDBRevRecord(org.rcsb.cif.schema.mm.DatabasePDBRevRecord cif, int row) {
this(cif.getDetails().get(row),
cif.getRevNum().getStringData(row),
cif.getType().get(row));
}
public String getRevNum() {
return revNum;
}
public void setRevNum(String revNum) {
this.revNum = revNum;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getDetails() {
return details;
}
public void setDetails(String details) {
this.details = details;
}
@Override
public String toString() {
return "DatabasePDBRevRecord{" +
"revNum='" + revNum + '\'' +
", type='" + type + '\'' +
", details='" + details + '\'' +
'}';
}
}
| lgpl-2.1 |
dianhu/Kettle-Research | src-db/org/pentaho/di/core/database/DatabaseFactoryInterface.java | 986 | /*
* Copyright (c) 2010 Pentaho Corporation. All rights reserved.
* This software was developed by Pentaho Corporation and is provided under the terms
* of the GNU Lesser General Public License, Version 2.1. You may not use
* this file except in compliance with the license. If you need a copy of the license,
* please go to http://www.gnu.org/licenses/lgpl-2.1.txt. The Original Code is Pentaho
* Data Integration. The Initial Developer is Pentaho Corporation.
*
* Software distributed under the GNU Lesser Public License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. Please refer to
* the license for the specific language governing your rights and limitations.
*/
package org.pentaho.di.core.database;
import org.pentaho.di.core.exception.KettleDatabaseException;
public interface DatabaseFactoryInterface {
public String getConnectionTestReport(DatabaseMeta databaseMeta) throws KettleDatabaseException;
}
| lgpl-2.1 |
uugaa/hibernate-ogm | cassandra/src/main/java/org/hibernate/ogm/datastore/cassandra/logging/impl/Log.java | 1699 | /*
* Hibernate OGM, Domain model persistence for NoSQL datastores
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.ogm.datastore.cassandra.logging.impl;
import static org.jboss.logging.Logger.Level.INFO;
import org.hibernate.HibernateException;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* Log message abstraction for i18n.
*
* @author Jonathan Halliday
*/
@MessageLogger(projectCode = "OGM")
public interface Log extends org.hibernate.ogm.util.impl.Log {
// for cassandra backend use uniq ids up from 1601...
@LogMessage(level = INFO)
@Message(id = 1601, value = "Connecting to Cassandra at %1$s:%2$d")
void connectingToCassandra(String host, int port);
@LogMessage(level = INFO)
@Message(id = 1602, value = "Closing connection to Cassandra")
void disconnectingFromCassandra();
@Message(id = 1603, value = "Unable to initialize Cassandra driver")
HibernateException unableToInitializeCassandra(@Cause RuntimeException e);
@Message(id = 1604, value = "Failed to create table %1$s")
HibernateException failedToCreateTable(String table, @Cause RuntimeException e);
@Message(id = 1605, value = "Failed to create index on table %1$s")
HibernateException failedToCreateIndex(String table, @Cause RuntimeException e);
@Message(id = 1606, value = "Failed to execute CQL operation %1$s")
HibernateException failToExecuteCQL(String cqlStatement, @Cause RuntimeException e);
}
| lgpl-2.1 |
ace20022/libbluray | src/libbluray/bdj/java/org/bluray/ti/TitleImpl.java | 3297 | package org.bluray.ti;
import javax.tv.locator.Locator;
import javax.tv.service.SIException;
import javax.tv.service.SIRequest;
import javax.tv.service.SIRequestor;
import javax.tv.service.ServiceType;
import org.bluray.net.BDLocator;
import org.davic.net.InvalidLocatorException;
import org.videolan.Libbluray;
import org.videolan.TitleInfo;
import org.videolan.bdjo.Bdjo;
public class TitleImpl implements Title {
public TitleImpl(int titleNum) throws SIException {
this.titleNum = titleNum;
this.ti = Libbluray.getTitleInfo(titleNum);
if (ti == null)
throw new SIException("Title " + titleNum + " does not exist in disc index");
if (ti.isBdj()) {
bdjo = Libbluray.getBdjo(ti.getBdjoName());
if (bdjo == null)
throw new SIException("title " + titleNum + ": Failed loading " + ti.getBdjoName() + ".bdjo");
}
}
public PlayList[] getPlayLists() {
if (bdjo == null)
return new PlayList[0];
String[] playlistNames = bdjo.getAccessiblePlaylists().getPlayLists();
PlayList[] playlists = new PlayList[playlistNames.length];
for (int i = 0; i < playlistNames.length; i++)
playlists[i] = new PlayListImpl(playlistNames[i], this);
return playlists;
}
public boolean hasAutoPlayList() {
if (bdjo == null)
return false;
return bdjo.getAccessiblePlaylists().isAutostartFirst();
}
public Locator getLocator() {
String url = "bd://" + Integer.toString(titleNum, 16);
try {
return new BDLocator(url);
} catch (InvalidLocatorException ex) {
return null;
}
}
public String getName() {
if (titleNum == 0)
return "Top Menu";
if (titleNum == 65535)
return "First Playback";
if (titleNum == 65534)
return "Suspended Title";
return "Title " + titleNum;
}
public ServiceType getServiceType() {
switch (ti.getPlaybackType()) {
case TitleInfo.HDMV_PLAYBACK_TYPE_MOVIE:
return TitleType.HDMV_MOVIE;
case TitleInfo.HDMV_PLAYBACK_TYPE_INTERACTIVE:
return TitleType.HDMV_INTERACTIVE;
case TitleInfo.BDJ_PLAYBACK_TYPE_MOVIE:
return TitleType.BDJ_MOVIE;
case TitleInfo.BDJ_PLAYBACK_TYPE_INTERACTIVE:
return TitleType.BDJ_INTERACTIVE;
}
return TitleType.UNKNOWN;
}
public boolean equals(Object obj) {
if (!(obj instanceof TitleImpl)) {
return false;
}
TitleImpl other = (TitleImpl)obj;
int otherNum = other.getTitleNum();
return otherNum == titleNum;
}
public int hashCode() {
return titleNum % 1023;
}
public boolean hasMultipleInstances() {
return false;
}
public SIRequest retrieveDetails(SIRequestor requestor) {
//TODO
org.videolan.Logger.unimplemented(TitleImpl.class.getName(), "retrieveDetails");
return null;
}
public int getTitleNum() {
return titleNum;
}
public TitleInfo getTitleInfo() {
return ti;
}
private int titleNum;
private TitleInfo ti;
private Bdjo bdjo = null;
}
| lgpl-2.1 |
modius/railo | railo-java/railo-core/src/railo/runtime/tag/Servlet.java | 1853 | package railo.runtime.tag;
import railo.runtime.exp.ExpressionException;
import railo.runtime.ext.tag.TagImpl;
/**
* Executes a Java servlet on a JRun engine. This tag is used in conjunction with the
* cfserletparam tag, which passes data to the servlet.
*
*
*
**/
public final class Servlet extends TagImpl {
private boolean debug;
private String code;
private boolean writeoutput;
private double timeout;
private String jrunproxy;
/**
* constructor for the tag class
* @throws ExpressionException
**/
public Servlet() throws ExpressionException {
throw new ExpressionException("tag cfservlet is deprecated");
}
/** set the value debug
* Boolean specifying whether additional information about the JRun connection status and
* activity is to be written to the JRun error log
* @param debug value to set
**/
public void setDebug(boolean debug) {
this.debug=debug;
}
/** set the value code
* The class name of the Java servlet to execute.
* @param code value to set
**/
public void setCode(String code) {
this.code=code;
}
/** set the value writeoutput
* @param writeoutput value to set
**/
public void setWriteoutput(boolean writeoutput) {
this.writeoutput=writeoutput;
}
/** set the value timeout
* Specifies how many seconds JRun waits for the servlet to complete before timing out.
* @param timeout value to set
**/
public void setTimeout(double timeout) {
this.timeout=timeout;
}
/** set the value jrunproxy
* @param jrunproxy value to set
**/
public void setJrunproxy(String jrunproxy) {
this.jrunproxy=jrunproxy;
}
@Override
public int doStartTag() {
return SKIP_BODY;
}
@Override
public int doEndTag() {
return EVAL_PAGE;
}
@Override
public void release() {
super.release();
debug=false;
code="";
writeoutput=false;
timeout=0d;
jrunproxy="";
}
} | lgpl-2.1 |
litalidev/sipxtapi | sipXportLib/src/test/utl/UtlDListIterator.cpp | 18656 | //
// Copyright (C) 2004-2006 SIPfoundry Inc.
// Licensed by SIPfoundry under the LGPL license.
//
// Copyright (C) 2004-2006 Pingtel Corp. All rights reserved.
// Licensed to SIPfoundry under a Contributor Agreement.
//
// $$
///////////////////////////////////////////////////////////////////////////////
#include <cppunit/extensions/HelperMacros.h>
#include <cppunit/TestCase.h>
#include <string.h>
#include <stdlib.h>
#include <cstdarg>
#include <os/OsDefs.h>
#include <utl/UtlInt.h>
#include <utl/UtlString.h>
#include <utl/UtlDList.h>
#include <utl/UtlDListIterator.h>
#include <sipxunit/TestUtilities.h>
using namespace std ;
class UtlDListIteratorTests : public CppUnit::TestCase
{
CPPUNIT_TEST_SUITE(UtlDListIteratorTests);
CPPUNIT_TEST(testAdvancingOperator) ;
CPPUNIT_TEST(testFindNext) ;
CPPUNIT_TEST(testLast) ;
CPPUNIT_TEST(testInsertAfterPoint_EmptyList) ;
CPPUNIT_TEST(testInsertAfterPoint) ;
CPPUNIT_TEST_SUITE_END();
private:
static const int INDEX_NOT_EXIST ;
static const int commonEntriesCount ;
UtlDList commonList ;
UtlDList emptyList ;
UtlString commonString1 ;
UtlString commonString2 ;
UtlString commonString3 ;
UtlInt commonInt1 ;
UtlInt commonInt2 ;
UtlInt commonInt3 ;
UtlString commonString1_clone;
UtlString commonString2_clone ;
UtlString commonString3_clone ;
UtlInt commonInt1_clone ;
UtlInt commonInt2_clone ;
UtlInt commonInt3_clone ;
static const char* longAlphaNumString ;
static const char* regularString ;
UtlContainable** commonContainables;
UtlContainable** commonContainables_Clone;
enum IndexOrContains { TEST_INDEX, TEST_FIND, TEST_CONTAINS, TEST_CONTAINS_REF } ;
enum TestInsertOrAppend {TEST_APPEND, TEST_INSERT} ;
enum RemoveType {TEST_REMOVE, TEST_REMOVE_REF } ;
public:
void setUp()
{
commonString1 = UtlString(regularString) ;
commonString1_clone = UtlString(regularString) ;
commonString2 = UtlString("") ;
commonString2_clone = UtlString("") ;
commonString3 = UtlString(longAlphaNumString) ;
commonString3_clone = UtlString(longAlphaNumString) ;
commonInt1 = UtlInt(0) ;
commonInt1_clone = UtlInt(0) ;
commonInt2 = UtlInt(INT_MAX) ;
commonInt2_clone = UtlInt(INT_MAX) ;
commonInt3 = UtlInt(INT_MIN) ;
commonInt3_clone = UtlInt(INT_MIN) ;
commonList.append(&commonString1) ;
commonContainables[0] = &commonString1 ;
commonContainables_Clone[0] = &commonString1_clone ;
commonList.append(&commonInt1) ;
commonContainables[1] = &commonInt1 ;
commonContainables_Clone[1] = &commonInt1_clone ;
commonList.append(&commonInt2) ;
commonContainables[2] = &commonInt2 ;
commonContainables_Clone[2] = &commonInt2_clone;
commonList.append(&commonString2) ;
commonContainables[3] = &commonString2 ;
commonContainables_Clone[3] = &commonString2_clone ;
commonList.append(&commonInt3) ;
commonContainables[4] = &commonInt3 ;
commonContainables_Clone[4] = &commonInt3_clone ;
commonList.append(&commonString3) ;
commonContainables[5] = &commonString3 ;
commonContainables_Clone[5] = &commonString3_clone ;
}
void tearDown()
{
}
/* Sandbox - please ignore
*/
void DynaTest()
{
}
UtlDListIteratorTests()
{
commonContainables = new UtlContainable*[commonEntriesCount] ;
commonContainables_Clone = new UtlContainable*[commonEntriesCount] ;
}
~UtlDListIteratorTests()
{
delete[] commonContainables ;
delete[] commonContainables_Clone ;
}
/*!a Test case for the () operator.
*
* The test data for this test is :-
* 1) The next entry is a UtlString
* 2) The next entry is a UtlInt
* 3) The next entry is the last entry
* 4) All entries have been read
*/
void testAdvancingOperator()
{
const int testCount = 4 ;
const char* prefix = "Verify the () operator for an iterator when " ;
const char* Msgs[] = { \
"the first entry is a UtlString", \
"the first entry is a UtlInt", \
"when the list has only one entry", \
"when the list is empty" \
} ;
const char* suffix1 = " :- verify return value" ;
const char* suffix2 = " :- verify number of entries in the list" ;
UtlDList testList ;
testList.append(&commonString1) ;
testList.append(&commonInt1) ;
testList.append(&commonString2) ;
UtlDListIterator iter(testList) ;
UtlContainable* exp[] = { \
&commonString1 , &commonInt1, &commonString2, NULL \
} ;
int expEntries = 3 ;
for (int i = 0 ; i < testCount ; i++)
{
UtlContainable* act = iter() ;
string msg ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[i], suffix1) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), exp[i], act) ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[i], suffix2);
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), expEntries, (int)testList.entries()) ;
}
// Test the () operator for an empty list
UtlDListIterator emptyIter(emptyList) ;
UtlContainable* act = emptyIter() ;
CPPUNIT_ASSERT_EQUAL_MESSAGE("Test the () operator for an empty list iterator" , (void*)NULL, (void*)act) ;
} //testAdvancingOperator()
/*!a Test case for the findNext() method
*
* The test data for this test case are :-
* a) When the match is the first element.
* b) When the match is the last element.
* c) When the match is a mid element(unique).
* d) When the match has two value matches (but a single ref match)
* e) When the match has two ref matches.
* f) When there is no match at all!
* g) When the match is after the current find.
*/
void testFindNext()
{
const int testCount = 7 ;
const char* prefixFind = "Test the find() method when the match " ;
const char* Msgs[] = { \
"is the first element ", \
"is the last element ", \
"is a mid element (unique match) ", \
"has two value matches but a single ref match ", \
"has two ref matches", \
"has a value match but no ref match", \
"has no match at all" \
} ;
// insert a clone of the 4th element to the 1st position
commonList.insertAt(1, (UtlContainable*)commonContainables_Clone[4]) ;
// The new index for a value match of commonContainables[4] must be 1.
// insert another copy of the 3rd element to the 2nd position.
commonList.insertAt(2, (UtlContainable*)commonContainables[3]) ;
// The new index for commonContainables[3] must be 2) ;
// what used to be the second element has now moved to 4.
UtlString noExist("This cannot and should not exist!!!") ;
const UtlContainable* searchValuesForFind[] = { \
commonContainables[0], commonContainables[5], commonContainables[2], \
commonContainables[4], commonContainables[3], \
commonContainables_Clone[1], &noExist \
} ;
const UtlContainable* expValuesForFind[] = { \
commonContainables[0], commonContainables[5], commonContainables[2], \
commonContainables_Clone[4], commonContainables[3], \
commonContainables[1], NULL \
} ;
UtlDListIterator iter(commonList) ;
for (int i = 0 ; i < testCount ; i++)
{
string msg ;
const UtlContainable* act = iter.findNext(searchValuesForFind[i]) ;
const UtlContainable* exp = expValuesForFind[i] ;
TestUtilities::createMessage(2, &msg, prefixFind, Msgs[i]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), exp, act) ;
iter.reset() ;
}
// Now test the case where the iterator is 'past' the index
iter.reset() ;
iter() ;
iter() ;
iter() ;
iter() ;
iter() ;
UtlContainable* act = iter.findNext(commonContainables[1]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE("test findNext() when the iterator has moved past the search index", (void*)NULL, (void*)act) ;
}//testFindNext
/*!a Test case to test the toLast() and atLast() methods.
*
* The test data for this test case is :-
* a) An empty list
* b) A non-empty list iterator that is in its initial position
* c) A non-empty list iterator that is in its last position.
* d) A non-empty list that is in its mid position
*/
void testLast()
{
UtlDList testList ;
const char* prefix1 = "Test the toLast() method for " ;
const char* prefix2 = "Test the atLast() method(after calling toLast()) for " ;
string msg ;
UtlContainable* uAct ;
bool isAtLast = false ;
const char* Msgs[] = { \
"an empty list iterator ", \
"a list iterator that is in its zeroth position ", \
"a non empty list that is already in its last position ", \
"a non empty list that is in its mid position " \
} ;
UtlDListIterator iter(emptyList) ;
UtlDListIterator iter2(commonList) ;
// since this test requires adifferent test setup for each
// of the test data, tests are done individually rather than using
// the test array style of testing.
// Test#1 - Test the methods for an empty list.
int ti = 0 ;
iter.toLast() ;
isAtLast = (TRUE == iter.atLast()) ;
uAct = iter() ;
TestUtilities::createMessage(2, &msg, prefix1, Msgs[ti]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)NULL, (void*)uAct) ;
TestUtilities::createMessage(2, &msg, prefix2, Msgs[ti]) ;
// since the list is empty, any position is __PAST__ the last position
CPPUNIT_ASSERT_MESSAGE(msg.data(), !isAtLast) ;
// Test#2 - Test the methods for a list that is not empty.
ti++ ;
iter2.reset() ;
iter2.toLast() ;
isAtLast = (TRUE == iter2.atLast()) ;
uAct = iter2() ;
TestUtilities::createMessage(2, &msg, prefix1, Msgs[ti]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)NULL, (void*)uAct) ;
TestUtilities::createMessage(2, &msg, prefix2, Msgs[ti]) ;
CPPUNIT_ASSERT_MESSAGE(msg.data(), isAtLast) ;
// Test#3 - Test the methods for a list that is not empty when
// the list is already in its last position.
ti++ ;
iter2.reset() ;
iter2.toLast() ;
iter2.toLast() ;
isAtLast = (TRUE == iter2.atLast()) ;
uAct = iter2() ;
TestUtilities::createMessage(2, &msg, prefix1, Msgs[ti]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)NULL, (void*)uAct) ;
TestUtilities::createMessage(2, &msg, prefix2, Msgs[ti]) ;
CPPUNIT_ASSERT_MESSAGE(msg.data(), isAtLast) ;
// Test#4 - Test the methods for a list that is not empty when the
// list has been iterated to somewhere in its middle position.
ti++ ;
iter2.reset() ;
iter2() ;
iter2() ;
iter2.toLast() ;
isAtLast = (TRUE == iter2.atLast()) ;
uAct = iter2() ;
TestUtilities::createMessage(2, &msg, prefix1, Msgs[ti]) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)NULL, (void*)uAct) ;
TestUtilities::createMessage(2, &msg, prefix2, Msgs[ti]) ;
CPPUNIT_ASSERT_MESSAGE(msg.data(), isAtLast) ;
} //testFirst_And_Last
/*!a Test case for the insertAfterPoint() method when the
* the list is empty
*/
void testInsertAfterPoint_EmptyList()
{
const char* prefix = "Test the insertAfterPoint() method for an empty list " ;
const char* suffix1 = ":- Verify return value" ;
const char* suffix2 = ":- Verify that the entry has been added" ;
string msg ;
UtlDListIterator iter(emptyList) ;
const UtlContainable* uReturn = iter.insertAfterPoint((UtlContainable*)commonContainables[0]) ;
TestUtilities::createMessage(2, &msg, prefix, suffix1) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)commonContainables[0], (void*)uReturn) ;
iter.reset() ;
UtlContainable* uAppended = iter() ;
TestUtilities::createMessage(2, &msg, prefix, suffix2) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)commonContainables[0], (void*)uAppended) ;
}
/*!a Test case for the insertAfterPoint() method.
*
* The test data is :-
* a) Insert when the iterator is the starting position
* b) Insert when the iterator is at mid position
* c) Insert when the iterator is at the last position
* d) Insert to an empty Iterator.
*/
void testInsertAfterPoint()
{
const char* prefix = "Test the insertAfterPoint() method when " ;
const char* Msgs[] = {\
"the iterator is the starting position " , \
"the iterator is at mid-position ", \
"the iterator is at the last position " \
} ;
const char* suffix1 = ":- Verify return value" ;
const char* suffix2 = ":- Verify value is inserted" ;
const char* suffix3 = ":- Verify that previous value is not lost" ;
UtlDListIterator iter(commonList) ;
const UtlContainable* uReturn ;
UtlContainable* uAppended ;
UtlContainable* uOrig ;
string msg ;
UtlString newColString1("Insert at starting position") ;
UtlInt newColInt2(101) ;
UtlString newColString3 ("Insert at last position") ;
UtlContainable* insertValues[] = { \
&newColString1, &newColInt2, &newColString3 \
};
const UtlContainable* oldValues[] = { \
commonContainables[0], commonContainables[1], commonContainables[5] \
} ;
// Since this test requires different steps for the different test data,
// steps are executed individually rather than the regular technique of
// iterating through the test-array loop
//Test#1 - Verify the case when the iterator has been reset
int ti = 0 ;
iter.reset() ;
uReturn = iter.insertAfterPoint(insertValues[ti]) ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix1) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE (msg.data(), (void*)insertValues[ti], (void*)uReturn) ;
// The item is inserted at first position
// old[0] is now @ pos1. old[1] is now @ pos2
iter.reset() ;
uAppended = iter() ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix2) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)insertValues[ti], (void*)uAppended) ;
// Verify that the original item is still retained.
uOrig = iter() ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix2) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)oldValues[ti], (void*)uOrig) ;
//Test#2 - inserting at mid position
ti = 1;
iter.reset() ;
iter() ; //moves cursor to 0
iter() ; //moves cursor to 1
iter() ; //moves cursor to 2
// old[1] stays at pos2
// Value is now inserted at pos3
uReturn = iter.insertAfterPoint(insertValues[ti]) ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix1) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE (msg.data(), (void*)insertValues[ti], (void*)uReturn) ;
iter.reset() ;
iter() ; // moves cursor to 0
iter() ; // moves cursor to 1
// Verify that the original item is still retained.
uOrig = iter() ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix3) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)oldValues[ti], (void*)uOrig) ;
// The item is inserted just after the position.
uAppended = iter() ; //moves cursor to pos3 and returns item at pos2
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix2) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)insertValues[ti], (void*)uAppended) ;
// Test#3 - Now verify when the cursor is at the last position.
ti = 2 ;
iter.reset() ;
iter.toLast() ;
uReturn = iter.insertAfterPoint(insertValues[ti]) ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix1) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)insertValues[ti], (void*)uReturn) ;
iter.reset() ;
// now move the cursor all the way to the penultimate position
for (size_t i = 0 ; i < commonList.entries() - 1; i++)
{
uOrig = iter() ;
}
// verify original is still retained.
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix3) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE(msg.data(), (void*)oldValues[ti], (void*)uOrig) ;
uAppended = iter() ;
TestUtilities::createMessage(3, &msg, prefix, Msgs[ti], suffix2) ;
CPPUNIT_ASSERT_EQUAL_MESSAGE( msg.data(), (void*)insertValues[ti], (void*)uAppended) ;
} //testInsertAfterPoint
};
const int UtlDListIteratorTests::INDEX_NOT_EXIST = -1;
const int UtlDListIteratorTests::commonEntriesCount = 6;
const char* UtlDListIteratorTests::longAlphaNumString = \
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvwzyz"
"abcdefghijklmnopqrstuvw" ;
const char* UtlDListIteratorTests::regularString = "This makes sense" ;
CPPUNIT_TEST_SUITE_REGISTRATION(UtlDListIteratorTests);
| lgpl-2.1 |
deadcyclo/nuxeo-features | nuxeo-automation/nuxeo-automation-client/src/main/java/org/nuxeo/ecm/automation/client/AdapterFactory.java | 971 | /*
* Copyright (c) 2006-2011 Nuxeo SA (http://nuxeo.com/) and others.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* bstefanescu
*/
package org.nuxeo.ecm.automation.client;
/**
* A factory for adapters. Adapters can be used to adapt client and session objects.
* For example you can contribute an adapter on the session to have an API suited
* for your needs.
* <p>
* To register adapters use {@link AutomationClient#registerAdapter(AdapterFactory)}.
*
*
* @author <a href="mailto:bs@nuxeo.com">Bogdan Stefanescu</a>
*/
public interface AdapterFactory<T> {
/**
* Adapt the given object and return the adapter instance.
* @param toAdapt
* @return
*/
T getAdapter(Session session, Class<T> clazz);
}
| lgpl-2.1 |
bawn92/kurento-java | kurento-integration-tests/kurento-test/src/test/java/org/kurento/test/stability/webrtc/WebRtcStabilitySwitchRtpH264Test.java | 5165 | /*
* (C) Copyright 2015 Kurento (http://kurento.org/)
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Lesser General Public License
* (LGPL) version 2.1 which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/lgpl-2.1.html
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
*/
package org.kurento.test.stability.webrtc;
import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runners.Parameterized.Parameters;
import org.kurento.client.MediaPipeline;
import org.kurento.client.RtpEndpoint;
import org.kurento.client.WebRtcEndpoint;
import org.kurento.test.base.KurentoClientTest;
import org.kurento.test.base.StabilityTest;
import org.kurento.test.client.BrowserClient;
import org.kurento.test.client.BrowserType;
import org.kurento.test.client.Client;
import org.kurento.test.client.WebRtcChannel;
import org.kurento.test.client.WebRtcMode;
import org.kurento.test.config.BrowserScope;
import org.kurento.test.config.BrowserConfig;
import org.kurento.test.config.TestScenario;
import org.kurento.test.latency.LatencyController;
import org.kurento.test.latency.VideoTagType;
import org.kurento.test.sdp.SdpUtils;
/**
* <strong>Description</strong>: Stability test for switching a WebRTC connected
* to RTP performing H264 transcoding.<br/>
* <strong>Pipeline(s)</strong>:
* <ul>
* <li>WebRtcEndpoint -> RtpEndpoint1</li>
* <li>RtpEndpoint1 -> RtpEndpoint2 (RTP session)</li>
* <li>RtpEndpoint2 -> WebRtcEndpoint</li>
* </ul>
* <strong>Pass criteria</strong>:
* <ul>
* <li>Media should be received in the remote video tag</li>
* <li>Color change should be detected on local and remote video tags</li>
* <li>Test fail when 3 consecutive latency errors (latency > 3sec) are detected
* </li>
* </ul>
*
* @author Boni Garcia (bgarcia@gsyc.es)
* @since 5.1.0
*/
public class WebRtcStabilitySwitchRtpH264Test extends StabilityTest {
private static final int DEFAULT_PLAYTIME = 30; // minutes
private static final String[] REMOVE_CODECS = { "H263-1998", "VP8",
"MP4V-ES" };
public WebRtcStabilitySwitchRtpH264Test(TestScenario testScenario) {
super(testScenario);
}
@Parameters(name = "{index}: {0}")
public static Collection<Object[]> data() {
String videoPath = KurentoClientTest.getPathTestFiles()
+ "/video/15sec/rgbHD.y4m";
TestScenario test = new TestScenario();
test.addBrowser(
BrowserConfig.BROWSER,
new BrowserClient.Builder().client(Client.WEBRTC)
.browserType(BrowserType.CHROME)
.scope(BrowserScope.LOCAL).video(videoPath).build());
return Arrays.asList(new Object[][] { { test } });
}
@Test
public void testWebRtcStabilitySwitchRtpH264() throws Exception {
final int playTime = Integer.parseInt(System.getProperty(
"test.webrtc.stability.switch.webrtc2rtp.playtime",
String.valueOf(DEFAULT_PLAYTIME)));
// Media Pipeline
MediaPipeline mp = kurentoClient.createMediaPipeline();
WebRtcEndpoint webRtcEndpoint = new WebRtcEndpoint.Builder(mp).build();
RtpEndpoint rtpEndpoint1 = new RtpEndpoint.Builder(mp).build();
RtpEndpoint rtpEndpoint2 = new RtpEndpoint.Builder(mp).build();
webRtcEndpoint.connect(rtpEndpoint1);
rtpEndpoint2.connect(webRtcEndpoint);
// RTP session (rtpEndpoint1 --> rtpEndpoint2)
String sdpOffer = rtpEndpoint1.generateOffer();
log.info("SDP offer in rtpEndpoint1\n{}", sdpOffer);
// SDP mangling
sdpOffer = SdpUtils.mangleSdp(sdpOffer, REMOVE_CODECS);
log.info("SDP offer in rtpEndpoint1 after mangling\n{}", sdpOffer);
String sdpAnswer1 = rtpEndpoint2.processOffer(sdpOffer);
log.info("SDP answer in rtpEndpoint2\n{}", sdpAnswer1);
String sdpAnswer2 = rtpEndpoint1.processAnswer(sdpAnswer1);
log.info("SDP answer in rtpEndpoint1\n{}", sdpAnswer2);
// Latency controller
LatencyController cs = new LatencyController();
// WebRTC
getBrowser().subscribeEvents("playing");
getBrowser().initWebRtc(webRtcEndpoint, WebRtcChannel.VIDEO_ONLY,
WebRtcMode.SEND_RCV);
// Assertion: wait to playing event in browser
Assert.assertTrue("Not received media (timeout waiting playing event)",
getBrowser().waitForEvent("playing"));
// Latency assessment
getBrowser().activateLatencyControl(VideoTagType.LOCAL.getId(),
VideoTagType.REMOTE.getId());
cs.checkLocalLatencyInBackground(playTime, TimeUnit.MINUTES,
getBrowser());
// Connect-disconnect each second
for (int i = 0; i < DEFAULT_PLAYTIME * 60; i++) {
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
rtpEndpoint2.disconnect(webRtcEndpoint);
rtpEndpoint2.connect(webRtcEndpoint);
}
// Release Media Pipeline
mp.release();
// Draw latency results (PNG chart and CSV file)
cs.drawChart(getDefaultOutputFile(".png"), 500, 270);
cs.writeCsv(getDefaultOutputFile(".csv"));
cs.logLatencyErrorrs();
}
}
| lgpl-2.1 |
timvideos/flumotion | flumotion/component/bouncers/base.py | 834 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
BOUNCER_ALGORITHM_SOCKET = ('flumotion.component.bouncers.'
'algorithms.BouncerAlgorithm')
BOUNCER_SOCKET = 'flumotion.component.bouncers.plug.BouncerPlug'
| lgpl-2.1 |
nate250/jrs-rest-java-client | src/test/java/com/jaspersoft/jasperserver/jaxrs/client/apiadapters/permissions/SinglePermissionRecipientRequestAdapterTest.java | 11451 | package com.jaspersoft.jasperserver.jaxrs.client.apiadapters.permissions;
import com.jaspersoft.jasperserver.dto.permissions.RepositoryPermission;
import com.jaspersoft.jasperserver.jaxrs.client.core.Callback;
import com.jaspersoft.jasperserver.jaxrs.client.core.JerseyRequest;
import com.jaspersoft.jasperserver.jaxrs.client.core.RequestExecution;
import com.jaspersoft.jasperserver.jaxrs.client.core.SessionStorage;
import com.jaspersoft.jasperserver.jaxrs.client.core.operationresult.OperationResult;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.testng.PowerMockTestCase;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.util.concurrent.atomic.AtomicInteger;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.MockitoAnnotations.initMocks;
import static org.powermock.api.mockito.PowerMockito.doReturn;
import static org.powermock.api.mockito.PowerMockito.mockStatic;
import static org.powermock.api.mockito.PowerMockito.spy;
import static org.powermock.api.mockito.PowerMockito.verifyPrivate;
import static org.powermock.api.mockito.PowerMockito.verifyStatic;
import static org.powermock.api.mockito.PowerMockito.when;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertSame;
/**
* Unit tests for {@link SinglePermissionRecipientRequestAdapter}
*/
@PrepareForTest({SinglePermissionRecipientRequestAdapter.class, JerseyRequest.class})
public class SinglePermissionRecipientRequestAdapterTest extends PowerMockTestCase {
@Mock
private SessionStorage storageMock;
@Mock
private JerseyRequest<RepositoryPermission> requestMock;
@Mock
private OperationResult<RepositoryPermission> resultMock;
@Mock
private RepositoryPermission permissionMock;
@BeforeMethod
public void after() {
initMocks(this);
}
@Test
public void should_return_a_proper_op_result_with_repo_permission() throws Exception {
// Given
SinglePermissionRecipientRequestAdapter adapterSpy = spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
doReturn(requestMock).when(adapterSpy, "getBuilder", RepositoryPermission.class);
doReturn(resultMock).when(requestMock).get();
// When
OperationResult<RepositoryPermission> retrieved = adapterSpy.get();
// Then
verifyPrivate(adapterSpy, times(1)).invoke("getBuilder", eq(RepositoryPermission.class));
verify(requestMock, times(1)).get();
assertNotNull(retrieved);
}
@Test
public void should_create_or_update_RepositoryPermission_and_return_result_of_operation() throws Exception {
// Given
SinglePermissionRecipientRequestAdapter adapterSpy = spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
doReturn(requestMock).when(adapterSpy, "getBuilder", RepositoryPermission.class);
doReturn(resultMock).when(requestMock).put(permissionMock);
// When
OperationResult<RepositoryPermission> retrieved = adapterSpy.createOrUpdate(permissionMock);
// Then
verifyPrivate(adapterSpy, times(1)).invoke("getBuilder", eq(RepositoryPermission.class));
verify(requestMock, times(1)).put(permissionMock);
assertNotNull(retrieved);
assertSame(retrieved, resultMock);
}
@Test
public void should_delete_RepositoryPermission_and_return_result() throws Exception {
// Given
OperationResult opResultMock = PowerMockito.mock(OperationResult.class);
SinglePermissionRecipientRequestAdapter adapterSpy = spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
doReturn(requestMock).when(adapterSpy, "getBuilder", Object.class);
doReturn(opResultMock).when(requestMock).delete();
// When
OperationResult<RepositoryPermission> retrieved = adapterSpy.delete();
// Then
verifyPrivate(adapterSpy, times(1)).invoke("getBuilder", eq(Object.class));
verify(requestMock, times(1)).delete();
assertNotNull(retrieved);
assertSame(retrieved, opResultMock);
}
@Test
public void should_invoke_private_method_only_once() throws Exception {
// Given
mockStatic(JerseyRequest.class);
when(JerseyRequest.buildRequest(eq(storageMock), eq(RepositoryPermission.class), eq(new String[]{"/permissions", "resourceUri"}))).thenReturn(requestMock);
when(requestMock.get()).thenReturn(resultMock);
SinglePermissionRecipientRequestAdapter spy = spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
// When
OperationResult<RepositoryPermission> retrieved = spy.get();
// Then
verifyStatic(times(1));
JerseyRequest.buildRequest(eq(storageMock), eq(RepositoryPermission.class), eq(new String[]{"/permissions", "resourceUri"}));
// Verify that private print is called only once.
verifyPrivate(spy, times(1)).invoke("getBuilder", RepositoryPermission.class);
// Verify that addMatrixParam print is called with the specified parameters.
verify(requestMock).addMatrixParam(eq("recipient"), eq("recipient"));
assertSame(retrieved, resultMock);
}
@Test(expectedExceptions = NullPointerException.class)
public void should_throw_NPE_exception_when_session_is_null() throws Exception {
// Given
final SessionStorage NULL_STORAGE = null;
SinglePermissionRecipientRequestAdapter spy = spy(new SinglePermissionRecipientRequestAdapter(NULL_STORAGE, "resourceUri", "recipient"));
// When
spy.get();
// Then expect NPE
}
@Test
public void should_retrieve_permission_asynchronously() throws InterruptedException {
/* Given */
PowerMockito.mockStatic(JerseyRequest.class);
PowerMockito.when(JerseyRequest.buildRequest(eq(storageMock), eq(RepositoryPermission.class), eq(new String[]{"/permissions", "resourceUri"}))).thenReturn(requestMock);
PowerMockito.doReturn(resultMock).when(requestMock).get();
SinglePermissionRecipientRequestAdapter adapterSpy = PowerMockito.spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
final AtomicInteger newThreadId = new AtomicInteger();
final int currentThreadId = (int) Thread.currentThread().getId();
final Callback<OperationResult<RepositoryPermission>, Void> callback = PowerMockito.spy(new Callback<OperationResult<RepositoryPermission>, Void>() {
@Override
public Void execute(OperationResult<RepositoryPermission> data) {
newThreadId.set((int) Thread.currentThread().getId());
synchronized (this) {
this.notify();
}
return null;
}
});
PowerMockito.doReturn(null).when(callback).execute(resultMock);
/* When */
RequestExecution retrieved = adapterSpy.asyncGet(callback);
synchronized (callback) {
callback.wait(1000);
}
/* Then */
Mockito.verify(requestMock).get();
Mockito.verify(callback).execute(resultMock);
Assert.assertNotNull(retrieved);
Assert.assertNotSame(currentThreadId, newThreadId.get());
}
@Test
public void should_delete_permission_asynchronously() throws InterruptedException {
/* Given */
JerseyRequest<Object> requestMock = (JerseyRequest<Object>) PowerMockito.mock(JerseyRequest.class);
PowerMockito.mockStatic(JerseyRequest.class);
PowerMockito.when(JerseyRequest.buildRequest(eq(storageMock), eq(Object.class), eq(new String[]{"/permissions", "resourceUri"}))).thenReturn(requestMock);
PowerMockito.doReturn(resultMock).when(requestMock).delete();
SinglePermissionRecipientRequestAdapter adapterSpy = PowerMockito.spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
final AtomicInteger newThreadId = new AtomicInteger();
final int currentThreadId = (int) Thread.currentThread().getId();
final Callback<OperationResult, Void> callback = PowerMockito.spy(new Callback<OperationResult, Void>() {
@Override
public Void execute(OperationResult data) {
newThreadId.set((int) Thread.currentThread().getId());
synchronized (this) {
this.notify();
}
return null;
}
});
PowerMockito.doReturn(null).when(callback).execute(resultMock);
/* When */
RequestExecution retrieved = adapterSpy.asyncDelete(callback);
synchronized (callback) {
callback.wait(1000);
}
/* Then */
Mockito.verify(requestMock).delete();
Mockito.verify(callback).execute(resultMock);
Assert.assertNotNull(retrieved);
Assert.assertNotSame(currentThreadId, newThreadId.get());
}
@Test
public void should_create_resource_asynchronously() throws InterruptedException {
PowerMockito.mockStatic(JerseyRequest.class);
PowerMockito.when(JerseyRequest.buildRequest(eq(storageMock), eq(RepositoryPermission.class), eq(new String[]{"/permissions", "resourceUri"}))).thenReturn(requestMock);
PowerMockito.doReturn(resultMock).when(requestMock).put(permissionMock);
SinglePermissionRecipientRequestAdapter adapterSpy = PowerMockito.spy(new SinglePermissionRecipientRequestAdapter(storageMock, "resourceUri", "recipient"));
final AtomicInteger newThreadId = new AtomicInteger();
final int currentThreadId = (int) Thread.currentThread().getId();
final Callback<OperationResult<RepositoryPermission>, Void> callback = PowerMockito.spy(new Callback<OperationResult<RepositoryPermission>, Void>() {
@Override
public Void execute(OperationResult<RepositoryPermission> data) {
newThreadId.set((int) Thread.currentThread().getId());
synchronized (this) {
this.notify();
}
return null;
}
});
PowerMockito.doReturn(null).when(callback).execute(resultMock);
/* When */
RequestExecution retrieved = adapterSpy.asyncCreateOrUpdate(permissionMock, callback);
/* Wait */
synchronized (callback) {
callback.wait(1000);
}
/* Then */
Assert.assertNotNull(retrieved);
Assert.assertNotSame(currentThreadId, newThreadId.get());
Mockito.verify(requestMock).put(permissionMock);
Mockito.verify(requestMock).addMatrixParam(anyString(), anyString());
Mockito.verify(callback).execute(resultMock);
}
@AfterMethod
public void before() {
reset(requestMock, resultMock, storageMock, permissionMock);
}
} | lgpl-3.0 |
eenbp/OpenNaaS-0.14-Marketplace | utils/old-cim/NetworkPortCapabilities.java | 7031 | /**
* This file was auto-generated by mofcomp -j version 1.0.0 on Wed Jan 12
* 09:21:06 CET 2011.
*/
package org.opennaas.extensions.router.model;
import java.io.*;
import java.lang.Exception;
/**
* This Class contains accessor and mutator methods for all properties defined in the CIM class NetworkPortCapabilities as well as methods comparable
* to the invokeMethods defined for this class. This Class implements the NetworkPortCapabilitiesBean Interface. The CIM class NetworkPortCapabilities
* is described as follows:
*
* NetworkPortCapabilities describes the capabilities supported for properties that are configurable in NetworkPort.
*/
public class NetworkPortCapabilities extends LogicalPortCapabilities
implements Serializable {
/**
* This constructor creates a NetworkPortCapabilitiesBeanImpl Class which implements the NetworkPortCapabilitiesBean Interface, and encapsulates
* the CIM class NetworkPortCapabilities in a Java Bean. The CIM class NetworkPortCapabilities is described as follows:
*
* NetworkPortCapabilities describes the capabilities supported for properties that are configurable in NetworkPort.
*/
public NetworkPortCapabilities() {
};
/**
* The following constants are defined for use with the ValueMap/Values qualified property speedConfigurable.
*/
private boolean speedConfigurable;
/**
* This method returns the NetworkPortCapabilities.speedConfigurable property value. This property is described as follows:
*
* Boolean that indicates whether the Speed can be configured.
*
* @return boolean current speedConfigurable property value
* @exception Exception
*/
public boolean isSpeedConfigurable() {
return this.speedConfigurable;
} // getSpeedConfigurable
/**
* This method sets the NetworkPortCapabilities.speedConfigurable property value. This property is described as follows:
*
* Boolean that indicates whether the Speed can be configured.
*
* @param boolean new speedConfigurable property value
* @exception Exception
*/
public void setSpeedConfigurable(boolean speedConfigurable) {
this.speedConfigurable = speedConfigurable;
} // setSpeedConfigurable
/**
* The following constants are defined for use with the ValueMap/Values qualified property portSpeedsSupported.
*/
private long[] portSpeedsSupported;
/**
* This method returns the NetworkPortCapabilities.portSpeedsSupported property value. This property is described as follows:
*
* List of supported port speeds that can be configured on NetworkPort.Speed.
*
* @return long[] current portSpeedsSupported property value
* @exception Exception
*/
public long[] getPortSpeedsSupported() {
return this.portSpeedsSupported;
} // getPortSpeedsSupported
/**
* This method sets the NetworkPortCapabilities.portSpeedsSupported property value. This property is described as follows:
*
* List of supported port speeds that can be configured on NetworkPort.Speed.
*
* @param long[] new portSpeedsSupported property value
* @exception Exception
*/
public void setPortSpeedsSupported(long[] portSpeedsSupported) {
this.portSpeedsSupported = portSpeedsSupported;
} // setPortSpeedsSupported
/**
* The following constants are defined for use with the ValueMap/Values qualified property networkIDsConfigurable.
*/
private boolean networkIDsConfigurable;
/**
* This method returns the NetworkPortCapabilities.networkIDsConfigurable property value. This property is described as follows:
*
* Boolean indicating whether the NetworkPort can be configured to connect to specific Networks.
*
* @return boolean current networkIDsConfigurable property value
* @exception Exception
*/
public boolean isNetworkIDsConfigurable() {
return this.networkIDsConfigurable;
} // getNetworkIDsConfigurable
/**
* This method sets the NetworkPortCapabilities.networkIDsConfigurable property value. This property is described as follows:
*
* Boolean indicating whether the NetworkPort can be configured to connect to specific Networks.
*
* @param boolean new networkIDsConfigurable property value
* @exception Exception
*/
public void setNetworkIDsConfigurable(boolean networkIDsConfigurable) {
this.networkIDsConfigurable = networkIDsConfigurable;
} // setNetworkIDsConfigurable
/**
* The following constants are defined for use with the ValueMap/Values qualified property NetworkIDsFormat.
*/
public enum NetworkIDsFormat {
VIRTUAL_FABRIC_ID,
DMTF_RESERVED,
VENDOR_RESERVED
}
private NetworkIDsFormat networkIDsFormat;
/**
* This method returns the NetworkPortCapabilities.networkIDsFormat property value. This property is described as follows:
*
* The format that is expected to populate the NetworkIds for the associated NetworkPortSettings.list of supported LinkTechnologies of the
* NetworkPort is defined in the NetworkPortCapabilities.
*
* @return int current networkIDsFormat property value
* @exception Exception
*/
public NetworkIDsFormat getNetworkIDsFormat() {
return this.networkIDsFormat;
} // getNetworkIDsFormat
/**
* This method sets the NetworkPortCapabilities.networkIDsFormat property value. This property is described as follows:
*
* The format that is expected to populate the NetworkIds for the associated NetworkPortSettings.list of supported LinkTechnologies of the
* NetworkPort is defined in the NetworkPortCapabilities.
*
* @param int new networkIDsFormat property value
* @exception Exception
*/
public void setNetworkIDsFormat(NetworkIDsFormat networkIDsFormat) {
this.networkIDsFormat = networkIDsFormat;
} // setNetworkIDsFormat
/**
* The following constants are defined for use with the ValueMap/Values qualified property LinkTechnologiesSupported.
*/
public enum LinkTechnologiesSupported {
ETHERNET,
IB,
FC,
FDDI,
ATM,
TOKEN_RING,
FRAME_RELAY,
INFRARED,
BLUETOOTH,
WIRELESS_LAN,
DMTF_RESERVED
}
private LinkTechnologiesSupported linkTechnologiesSupported;
/**
* This method returns the NetworkPortCapabilities.linkTechnologiesSupported property value. This property is described as follows:
*
* List of the LinkTechnologies supported by the the NetworkPort.
*
* @return int current linkTechnologiesSupported property value
* @exception Exception
*/
public LinkTechnologiesSupported getLinkTechnologiesSupported() {
return this.linkTechnologiesSupported;
} // getLinkTechnologiesSupported
/**
* This method sets the NetworkPortCapabilities.linkTechnologiesSupported property value. This property is described as follows:
*
* List of the LinkTechnologies supported by the the NetworkPort.
*
* @param int new linkTechnologiesSupported property value
* @exception Exception
*/
public void setLinkTechnologiesSupported(LinkTechnologiesSupported
linkTechnologiesSupported) {
this.linkTechnologiesSupported = linkTechnologiesSupported;
} // setLinkTechnologiesSupported
} // Class NetworkPortCapabilities
| lgpl-3.0 |
David-Desmaisons/MVVM.CEF.Glue | Tests/JavascriptFramework/Vuejs/Vue.Navigation.ChromiumFx.Tests/DoubleNavigation_Vue_Cfx_Tests.cs | 464 | using Tests.Universal.NavigationTests;
using Vue.Navigation.ChromiumFx.Tests.Infra;
using Xunit;
using Xunit.Abstractions;
namespace Vue.Navigation.ChromiumFx.Tests.Tests
{
[Collection("Cfx Window Integrated")]
public class DoubleNavigation_Vue_Cfx_Tests : DoubleNavigationTests
{
public DoubleNavigation_Vue_Cfx_Tests(CfxVueContext context, ITestOutputHelper testOutputHelper) : base(context, testOutputHelper)
{
}
}
}
| lgpl-3.0 |
TrimbleSolutionsCorporation/VSSonarQubeExtension | VSSonarExtensionUi/Model/Helpers/BindingProxy.cs | 2705 | // --------------------------------------------------------------------------------------------------------------------
// <copyright file="BindingProxy.cs" company="Copyright © 2014 Tekla Corporation. Tekla is a Trimble Company">
// Copyright (C) 2014 [Jorge Costa, Jorge.Costa@tekla.com]
// </copyright>
// --------------------------------------------------------------------------------------------------------------------
// This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free
// Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
// --------------------------------------------------------------------------------------------------------------------
namespace VSSonarExtensionUi.Model.Helpers
{
using System.Windows;
/// <summary>
/// The binding proxy.
/// </summary>
public class BindingProxy : Freezable
{
// Using a DependencyProperty as the backing store for Data. This enables animation, styling, binding, etc...
#region Static Fields
/// <summary>
/// The data property.
/// </summary>
public static readonly DependencyProperty DataProperty = DependencyProperty.Register(
"Data",
typeof(object),
typeof(BindingProxy),
new UIPropertyMetadata(null));
#endregion
#region Public Properties
/// <summary>
/// Gets or sets the data.
/// </summary>
public object Data
{
get
{
return this.GetValue(DataProperty);
}
set
{
this.SetValue(DataProperty, value);
}
}
#endregion
#region Methods
/// <summary>
/// The create instance core.
/// </summary>
/// <returns>
/// The <see cref="Freezable"/>.
/// </returns>
protected override Freezable CreateInstanceCore()
{
return new BindingProxy();
}
#endregion
}
} | lgpl-3.0 |
whilei/go-ethereum | eth/backend_test.go | 2715 | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"math/big"
"testing"
"github.com/ethereumproject/go-ethereum/common"
"github.com/ethereumproject/go-ethereum/core"
"github.com/ethereumproject/go-ethereum/core/types"
"github.com/ethereumproject/go-ethereum/core/vm"
"github.com/ethereumproject/go-ethereum/ethdb"
)
func TestMipmapUpgrade(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
addr := common.BytesToAddress([]byte("jeff"))
genesis := core.WriteGenesisBlockForTesting(db)
chain, receipts := core.GenerateChain(core.DefaultConfigMorden.ChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {
var receipts types.Receipts
switch i {
case 1:
receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 2:
receipt := types.NewReceipt(nil, new(big.Int))
receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
}
// store the receipts
err := core.WriteReceipts(db, receipts)
if err != nil {
t.Fatal(err)
}
})
for i, block := range chain {
core.WriteBlock(db, block)
if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}
err := addMipmapBloomBins(db)
if err != nil {
t.Fatal(err)
}
bloom := core.GetMipmapBloom(db, 1, core.MIPMapLevels[0])
if (bloom == types.Bloom{}) {
t.Error("got empty bloom filter")
}
data, _ := db.Get([]byte("setting-mipmap-version"))
if len(data) == 0 {
t.Error("setting-mipmap-version not written to database")
}
}
| lgpl-3.0 |
kidaa/Awakening-Core3 | bin/scripts/commands/creatureAreaPoison.lua | 2371 | --Copyright (C) 2007 <SWGEmu>
--This File is part of Core3.
--This program is free software; you can redistribute
--it and/or modify it under the terms of the GNU Lesser
--General Public License as published by the Free Software
--Foundation; either version 2 of the License,
--or (at your option) any later version.
--This program is distributed in the hope that it will be useful,
--but WITHOUT ANY WARRANTY; without even the implied warranty of
--MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
--See the GNU Lesser General Public License for
--more details.
--You should have received a copy of the GNU Lesser General
--Public License along with this program; if not, write to
--the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
--Linking Engine3 statically or dynamically with other modules
--is making a combined work based on Engine3.
--Thus, the terms and conditions of the GNU Lesser General Public License
--cover the whole combination.
--In addition, as a special exception, the copyright holders of Engine3
--give you permission to combine Engine3 program with free software
--programs or libraries that are released under the GNU LGPL and with
--code included in the standard release of Core3 under the GNU LGPL
--license (or modified versions of such code, with unchanged license).
--You may copy and distribute such a system following the terms of the
--GNU LGPL for Engine3 and the licenses of the other code concerned,
--provided that you include the source code of that other code when
--and as the GNU LGPL requires distribution of source code.
--Note that people who make modified versions of Engine3 are not obligated
--to grant this special exception for their modified versions;
--it is their choice whether to do so. The GNU Lesser General Public License
--gives permission to release a modified version without this exception;
--this exception also makes it possible to release a modified version
--which carries forward this exception.
--true = 1, false = 0
CreatureAreaPoisonCommand = {
name = "creatureareapoison",
combatSpam = "attack",
dotEffects = {
DotEffect(
POISONED_EFFECT,
{ "resistance_poison", "poison_disease_resist" },
HEALTH,
true,
125,
50,
50,
120
)
},
areaRange = 25,
areaAction = true
}
AddCommand(CreatureAreaPoisonCommand)
| lgpl-3.0 |
aurelieladier/openturns | python/doc/pyplots/VisualTest_DrawQQplot.py | 257 | import openturns as ot
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
size = 100
normal = ot.Normal(1)
sample = normal.getSample(size)
QQPlot = ot.VisualTest_DrawQQplot(sample, normal)
View(QQPlot, figure_kwargs={'figsize': (4.5, 4.5)})
| lgpl-3.0 |
JerryCao1985/apitest | src/solutions/dynamicstreamingsoln.cpp | 63 | #include "pch.h"
#include "solutions/dynamicstreamingsoln.h"
| unlicense |
persandstrom/home-assistant | homeassistant/components/device_tracker/tesla.py | 1787 | """
Support for the Tesla platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tesla/
"""
import logging
from homeassistant.components.tesla import DOMAIN as TESLA_DOMAIN
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['tesla']
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the Tesla tracker."""
TeslaDeviceTracker(
hass, config, see,
hass.data[TESLA_DOMAIN]['devices']['devices_tracker'])
return True
class TeslaDeviceTracker:
"""A class representing a Tesla device."""
def __init__(self, hass, config, see, tesla_devices):
"""Initialize the Tesla device scanner."""
self.hass = hass
self.see = see
self.devices = tesla_devices
self._update_info()
track_utc_time_change(
self.hass, self._update_info, second=range(0, 60, 30))
def _update_info(self, now=None):
"""Update the device info."""
for device in self.devices:
device.update()
name = device.name
_LOGGER.debug("Updating device position: %s", name)
dev_id = slugify(device.uniq_name)
location = device.get_location()
if location:
lat = location['latitude']
lon = location['longitude']
attrs = {
'trackr_id': dev_id,
'id': dev_id,
'name': name
}
self.see(
dev_id=dev_id, host_name=name,
gps=(lat, lon), attributes=attrs
)
| apache-2.0 |
NightOwl888/lucenenet | src/Lucene.Net.Analysis.Kuromoji/TokenAttributes/BaseFormAttributeImpl.cs | 1870 | using Lucene.Net.Util;
namespace Lucene.Net.Analysis.Ja.TokenAttributes
{
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/// <summary>
/// Attribute for <see cref="Token.GetBaseForm()"/>.
/// </summary>
public class BaseFormAttribute : Attribute, IBaseFormAttribute // LUCENENET specific: Not implementing ICloneable per Microsoft's recommendation
{
private Token token;
public virtual string GetBaseForm()
{
return token?.GetBaseForm();
}
public virtual void SetToken(Token token)
{
this.token = token;
}
public override void Clear()
{
token = null;
}
public override void CopyTo(IAttribute target)
{
BaseFormAttribute t = (BaseFormAttribute)target;
t.SetToken(token);
}
public override void ReflectWith(IAttributeReflector reflector)
{
reflector.Reflect(typeof(BaseFormAttribute), "baseForm", GetBaseForm());
}
}
}
| apache-2.0 |
qiulim/myBatis-framework-4eclipse | src/test/java/org/apache/ibatis/submitted/usesjava8/default_method/DefaultMethodTest.java | 2675 | /**
* Copyright 2009-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ibatis.submitted.usesjava8.default_method;
import static org.junit.Assert.*;
import java.io.Reader;
import java.sql.Connection;
import org.apache.ibatis.io.Resources;
import org.apache.ibatis.jdbc.ScriptRunner;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.session.SqlSessionFactoryBuilder;
import org.apache.ibatis.submitted.usesjava8.default_method.Mapper.SubMapper;
import org.junit.BeforeClass;
import org.junit.Test;
public class DefaultMethodTest {
private static SqlSessionFactory sqlSessionFactory;
@BeforeClass
public static void setUp() throws Exception {
// create an SqlSessionFactory
Reader reader = Resources.getResourceAsReader(
"org/apache/ibatis/submitted/usesjava8/default_method/mybatis-config.xml");
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
reader.close();
// populate in-memory database
SqlSession session = sqlSessionFactory.openSession();
Connection conn = session.getConnection();
reader = Resources.getResourceAsReader(
"org/apache/ibatis/submitted/usesjava8/default_method/CreateDB.sql");
ScriptRunner runner = new ScriptRunner(conn);
runner.setLogWriter(null);
runner.runScript(reader);
reader.close();
session.close();
}
@Test
public void shouldInvokeDefaultMethod() {
SqlSession sqlSession = sqlSessionFactory.openSession();
try {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = mapper.defaultGetUser(1);
assertEquals("User1", user.getName());
} finally {
sqlSession.close();
}
}
@Test
public void shouldInvokeDefaultMethodOfSubclass() {
SqlSession sqlSession = sqlSessionFactory.openSession();
try {
SubMapper mapper = sqlSession.getMapper(SubMapper.class);
User user = mapper.defaultGetUser("User1", 1);
assertEquals("User1", user.getName());
} finally {
sqlSession.close();
}
}
}
| apache-2.0 |
google-code-export/elmah-loganalyzer | src/ElmahLogAnalyzer.Core/Domain/ICsvParser.cs | 201 | using System;
using System.Collections.Generic;
namespace ElmahLogAnalyzer.Core.Domain
{
public interface ICsvParser
{
IEnumerable<KeyValuePair<Uri, DateTime>> Parse(string content);
}
} | apache-2.0 |
pickettd/code-dot-org | apps/src/craft/game/CommandQueue/CommandQueue.js | 2421 | import BaseCommand from "./BaseCommand";
import CommandState from "./CommandState.js";
export default class CommandQueue {
constructor(gameController) {
this.gameController = gameController;
this.game = gameController.game;
this.reset();
}
addCommand(command) {
// if we're handling a while command, add to the while command's queue instead of this queue
if (this.whileCommandQueue) {
this.whileCommandQueue.addCommand(command);
} else {
this.commandList_.push(command);
}
}
setWhileCommandInsertState(queue) {
this.whileCommandQueue = queue;
}
begin() {
this.state = CommandState.WORKING;
if (this.gameController.DEBUG) {
console.log("Debug Queue: BEGIN");
}
}
reset() {
this.state = CommandState.NOT_STARTED;
this.currentCommand = null;
this.commandList_ = [];
if (this.whileCommandQueue) {
this.whileCommandQueue.reset();
}
this.whileCommandQueue = null;
}
tick() {
if (this.state === CommandState.WORKING) {
if (!this.currentCommand) {
if (this.commandList_.length === 0) {
this.state = CommandState.SUCCESS;
return;
}
this.currentCommand = this.commandList_.shift();
}
if (!this.currentCommand.isStarted()) {
this.currentCommand.begin();
} else {
this.currentCommand.tick();
}
// check if command is done
if (this.currentCommand.isSucceeded()) {
this.currentCommand = null;
} else if (this.currentCommand.isFailed()) {
this.state = CommandState.FAILURE;
}
}
}
getLength() {
return this.commandList_ ? this.commandList_.length : 0;
}
/**
* Whether the command has started working.
* @returns {boolean}
*/
isStarted() {
return this.state !== CommandState.NOT_STARTED;
}
/**
* Whether the command has succeeded or failed, and is
* finished with its work.
* @returns {boolean}
*/
isFinished() {
return this.isSucceeded() || this.isFailed();
}
/**
* Whether the command has finished with its work and reported success.
* @returns {boolean}
*/
isSucceeded() {
return this.state === CommandState.SUCCESS;
}
/**
* Whether the command has finished with its work and reported failure.
* @returns {boolean}
*/
isFailed() {
return this.state === CommandState.FAILURE;
}
}
| apache-2.0 |
lihongqiang/kettle-4.4.0-stable | src-ui/org/pentaho/di/ui/job/entries/createfile/JobEntryCreateFileDialog.java | 12137 | /*******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.ui.job.entries.createfile;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.ModifyEvent;
import org.eclipse.swt.events.ModifyListener;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.ShellAdapter;
import org.eclipse.swt.events.ShellEvent;
import org.eclipse.swt.layout.FormAttachment;
import org.eclipse.swt.layout.FormData;
import org.eclipse.swt.layout.FormLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.FileDialog;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.MessageBox;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.swt.widgets.Text;
import org.pentaho.di.core.Const;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.job.JobMeta;
import org.pentaho.di.job.entries.createfile.JobEntryCreateFile;
import org.pentaho.di.job.entry.JobEntryDialogInterface;
import org.pentaho.di.job.entry.JobEntryInterface;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.ui.core.gui.WindowProperty;
import org.pentaho.di.ui.core.widget.TextVar;
import org.pentaho.di.ui.job.dialog.JobDialog;
import org.pentaho.di.ui.job.entry.JobEntryDialog;
import org.pentaho.di.ui.trans.step.BaseStepDialog;
/**
* This dialog allows you to edit the Create File job entry settings.
*
* @author Sven Boden
* @since 28-01-2007
*/
public class JobEntryCreateFileDialog extends JobEntryDialog implements JobEntryDialogInterface
{
private static Class<?> PKG = JobEntryCreateFile.class; // for i18n purposes, needed by Translator2!! $NON-NLS-1$
private static final String[] FILETYPES = new String[] {
BaseMessages.getString(PKG, "JobCreateFile.Filetype.All") };
private Label wlName;
private Text wName;
private FormData fdlName, fdName;
private Label wlFilename;
private Button wbFilename;
private TextVar wFilename;
private FormData fdlFilename, fdbFilename, fdFilename;
private Label wlAbortExists;
private Button wAbortExists;
private FormData fdlAbortExists, fdAbortExists;
private Label wlAddFilenameToResult;
private Button wAddFilenameToResult;
private FormData fdlAddFilenameToResult, fdAddFilenameToResult;
private Button wOK, wCancel;
private Listener lsOK, lsCancel;
private JobEntryCreateFile jobEntry;
private Shell shell;
private SelectionAdapter lsDef;
private boolean changed;
public JobEntryCreateFileDialog(Shell parent, JobEntryInterface jobEntryInt, Repository rep, JobMeta jobMeta)
{
super(parent, jobEntryInt, rep, jobMeta);
jobEntry = (JobEntryCreateFile) jobEntryInt;
if (this.jobEntry.getName() == null)
this.jobEntry.setName(BaseMessages.getString(PKG, "JobCreateFile.Name.Default"));
}
public JobEntryInterface open()
{
Shell parent = getParent();
Display display = parent.getDisplay();
shell = new Shell(parent, props.getJobsDialogStyle());
props.setLook(shell);
JobDialog.setShellImage(shell, jobEntry);
ModifyListener lsMod = new ModifyListener()
{
public void modifyText(ModifyEvent e)
{
jobEntry.setChanged();
}
};
changed = jobEntry.hasChanged();
FormLayout formLayout = new FormLayout ();
formLayout.marginWidth = Const.FORM_MARGIN;
formLayout.marginHeight = Const.FORM_MARGIN;
shell.setLayout(formLayout);
shell.setText(BaseMessages.getString(PKG, "JobCreateFile.Title"));
int middle = props.getMiddlePct();
int margin = Const.MARGIN;
// Filename line
wlName=new Label(shell, SWT.RIGHT);
wlName.setText(BaseMessages.getString(PKG, "JobCreateFile.Name.Label"));
props.setLook(wlName);
fdlName=new FormData();
fdlName.left = new FormAttachment(0, 0);
fdlName.right= new FormAttachment(middle, -margin);
fdlName.top = new FormAttachment(0, margin);
wlName.setLayoutData(fdlName);
wName=new Text(shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
props.setLook(wName);
wName.addModifyListener(lsMod);
fdName=new FormData();
fdName.left = new FormAttachment(middle, 0);
fdName.top = new FormAttachment(0, margin);
fdName.right= new FormAttachment(100, 0);
wName.setLayoutData(fdName);
// Filename line
wlFilename=new Label(shell, SWT.RIGHT);
wlFilename.setText(BaseMessages.getString(PKG, "JobCreateFile.Filename.Label"));
props.setLook(wlFilename);
fdlFilename=new FormData();
fdlFilename.left = new FormAttachment(0, 0);
fdlFilename.top = new FormAttachment(wName, margin);
fdlFilename.right= new FormAttachment(middle, -margin);
wlFilename.setLayoutData(fdlFilename);
wbFilename=new Button(shell, SWT.PUSH| SWT.CENTER);
props.setLook(wbFilename);
wbFilename.setText(BaseMessages.getString(PKG, "System.Button.Browse"));
fdbFilename=new FormData();
fdbFilename.right= new FormAttachment(100, 0);
fdbFilename.top = new FormAttachment(wName, 0);
wbFilename.setLayoutData(fdbFilename);
wFilename=new TextVar(jobMeta, shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
props.setLook(wFilename);
wFilename.addModifyListener(lsMod);
fdFilename=new FormData();
fdFilename.left = new FormAttachment(middle, 0);
fdFilename.top = new FormAttachment(wName, margin);
fdFilename.right= new FormAttachment(wbFilename, -margin);
wFilename.setLayoutData(fdFilename);
// Whenever something changes, set the tooltip to the expanded version:
wFilename.addModifyListener(new ModifyListener()
{
public void modifyText(ModifyEvent e)
{
wFilename.setToolTipText(jobMeta.environmentSubstitute( wFilename.getText() ) );
}
}
);
wbFilename.addSelectionListener
(
new SelectionAdapter()
{
public void widgetSelected(SelectionEvent e)
{
FileDialog dialog = new FileDialog(shell, SWT.SAVE);
dialog.setFilterExtensions(new String[] {"*"});
if (wFilename.getText()!=null)
{
dialog.setFileName(jobMeta.environmentSubstitute(wFilename.getText()) );
}
dialog.setFilterNames(FILETYPES);
if (dialog.open()!=null)
{
wFilename.setText(dialog.getFilterPath()+Const.FILE_SEPARATOR+dialog.getFileName());
}
}
}
);
wlAbortExists = new Label(shell, SWT.RIGHT);
wlAbortExists.setText(BaseMessages.getString(PKG, "JobCreateFile.FailIfExists.Label"));
props.setLook(wlAbortExists);
fdlAbortExists = new FormData();
fdlAbortExists.left = new FormAttachment(0, 0);
fdlAbortExists.top = new FormAttachment(wFilename, margin);
fdlAbortExists.right = new FormAttachment(middle, -margin);
wlAbortExists.setLayoutData(fdlAbortExists);
wAbortExists = new Button(shell, SWT.CHECK);
props.setLook(wAbortExists);
wAbortExists.setToolTipText(BaseMessages.getString(PKG, "JobCreateFile.FailIfExists.Tooltip"));
fdAbortExists = new FormData();
fdAbortExists.left = new FormAttachment(middle, 0);
fdAbortExists.top = new FormAttachment(wFilename, margin);
fdAbortExists.right = new FormAttachment(100, 0);
wAbortExists.setLayoutData(fdAbortExists);
wAbortExists.addSelectionListener(new SelectionAdapter()
{
public void widgetSelected(SelectionEvent e)
{
jobEntry.setChanged();
}
});
// Add filenames to result filenames...
wlAddFilenameToResult = new Label(shell, SWT.RIGHT);
wlAddFilenameToResult.setText(BaseMessages.getString(PKG, "JobCreateFile.AddFilenameToResult.Label"));
props.setLook(wlAddFilenameToResult);
fdlAddFilenameToResult = new FormData();
fdlAddFilenameToResult.left = new FormAttachment(0, 0);
fdlAddFilenameToResult.top = new FormAttachment(wAbortExists, margin);
fdlAddFilenameToResult.right = new FormAttachment(middle, -margin);
wlAddFilenameToResult.setLayoutData(fdlAddFilenameToResult);
wAddFilenameToResult = new Button(shell, SWT.CHECK);
wAddFilenameToResult.setToolTipText(BaseMessages.getString(PKG, "JobCreateFile.AddFilenameToResult.Tooltip"));
props.setLook(wAddFilenameToResult);
fdAddFilenameToResult = new FormData();
fdAddFilenameToResult.left = new FormAttachment(middle, 0);
fdAddFilenameToResult.top = new FormAttachment(wAbortExists, margin);
fdAddFilenameToResult.right = new FormAttachment(100, 0);
wAddFilenameToResult.setLayoutData(fdAddFilenameToResult);
wOK = new Button(shell, SWT.PUSH);
wOK.setText(BaseMessages.getString(PKG, "System.Button.OK"));
wCancel = new Button(shell, SWT.PUSH);
wCancel.setText(BaseMessages.getString(PKG, "System.Button.Cancel"));
BaseStepDialog.positionBottomButtons(shell, new Button[] { wOK, wCancel }, margin, wAddFilenameToResult);
// Add listeners
lsCancel = new Listener() { public void handleEvent(Event e) { cancel(); } };
lsOK = new Listener() { public void handleEvent(Event e) { ok(); } };
wCancel.addListener(SWT.Selection, lsCancel);
wOK.addListener (SWT.Selection, lsOK );
lsDef=new SelectionAdapter() { public void widgetDefaultSelected(SelectionEvent e) { ok(); } };
wName.addSelectionListener( lsDef );
wFilename.addSelectionListener( lsDef );
// Detect X or ALT-F4 or something that kills this window...
shell.addShellListener( new ShellAdapter() { public void shellClosed(ShellEvent e) { cancel(); } } );
getData();
BaseStepDialog.setSize(shell);
shell.open();
while (!shell.isDisposed())
{
if (!display.readAndDispatch()) display.sleep();
}
return jobEntry;
}
public void dispose()
{
WindowProperty winprop = new WindowProperty(shell);
props.setScreen(winprop);
shell.dispose();
}
/**
* Copy information from the meta-data input to the dialog fields.
*/
public void getData()
{
if (jobEntry.getName() != null) wName.setText( jobEntry.getName() );
wName.selectAll();
if (jobEntry.getFilename()!= null) wFilename.setText( jobEntry.getFilename() );
wAbortExists.setSelection(jobEntry.isFailIfFileExists());
wAddFilenameToResult.setSelection(jobEntry.isAddFilenameToResult());
}
private void cancel()
{
jobEntry.setChanged(changed);
jobEntry=null;
dispose();
}
private void ok()
{
if(Const.isEmpty(wName.getText()))
{
MessageBox mb = new MessageBox(shell, SWT.OK | SWT.ICON_ERROR );
mb.setText(BaseMessages.getString(PKG, "System.StepJobEntryNameMissing.Title"));
mb.setMessage(BaseMessages.getString(PKG, "System.JobEntryNameMissing.Msg"));
mb.open();
return;
}
jobEntry.setName(wName.getText());
jobEntry.setFilename(wFilename.getText());
jobEntry.setFailIfFileExists(wAbortExists.getSelection());
jobEntry.setAddFilenameToResult(wAddFilenameToResult.getSelection());
dispose();
}
public boolean evaluates()
{
return true;
}
public boolean isUnconditional()
{
return false;
}
} | apache-2.0 |
bmwshop/brooklyn | utils/common/src/main/java/brooklyn/util/text/Strings.java | 27063 | /*
* Copyright (c) 2009-2013 Cloudsoft Corporation Ltd.
*/
package brooklyn.util.text;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.Collections;
import java.util.Map;
import java.util.StringTokenizer;
import javax.annotation.Nullable;
import brooklyn.util.collections.MutableMap;
import brooklyn.util.time.Time;
import com.google.common.base.CharMatcher;
import com.google.common.base.Functions;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.Ordering;
public class Strings {
/**
* Checks if the given string is null or is an empty string.
* Useful for pre-String.isEmpty. And useful for StringBuilder etc.
*
* @param s the String to check
* @return true if empty or null, false otherwise.
*
* @see #isNonEmpty(CharSequence)
* @see #isBlank(CharSequence)
* @see #isNonBlank(CharSequence)
*/
public static boolean isEmpty(CharSequence s) {
// Note guava has com.google.common.base.Strings.isNullOrEmpty(String),
// but that is just for String rather than CharSequence
return s == null || s.length()==0;
}
/**
* Checks if the given string is empty or only consists of whitespace.
*
* @param s the String to check
* @return true if blank, empty or null, false otherwise.
*
* @see #isEmpty(CharSequence)
* @see #isNonEmpty(CharSequence)
* @see #isNonBlank(CharSequence)
*/
public static boolean isBlank(CharSequence s) {
return isEmpty(s) || CharMatcher.WHITESPACE.matchesAllOf(s);
}
/**
* The inverse of {@link #isEmpty(CharSequence)}.
*
* @param s the String to check
* @return true if non empty, false otherwise.
*
* @see #isEmpty(CharSequence)
* @see #isBlank(CharSequence)
* @see #isNonBlank(CharSequence)
*/
public static boolean isNonEmpty(CharSequence s) {
return !isEmpty(s);
}
/**
* The inverse of {@link #isBlank(CharSequence)}.
*
* @param s the String to check
* @return true if non blank, false otherwise.
*
* @see #isEmpty(CharSequence)
* @see #isNonEmpty(CharSequence)
* @see #isBlank(CharSequence)
*/
public static boolean isNonBlank(CharSequence s) {
return !isBlank(s);
}
/** throws IllegalArgument if string not empty; cf. guava Preconditions.checkXxxx */
public static void checkNonEmpty(CharSequence s) {
if (s==null) throw new IllegalArgumentException("String must not be null");
if (s.length()==0) throw new IllegalArgumentException("String must not be empty");
}
/** throws IllegalArgument if string not empty; cf. guava Preconditions.checkXxxx */
public static void checkNonEmpty(CharSequence s, String message) {
if (isEmpty(s)) throw new IllegalArgumentException(message);
}
/** removes the first suffix in the list which is present at the end of string
* and returns that string; ignores subsequent suffixes if a matching one is found;
* returns the original string if no suffixes are at the end
*/
public static String removeFromEnd(String string, String ...suffixes) {
if (isEmpty(string)) return string;
for (String suffix : suffixes)
if (suffix!=null && string.endsWith(suffix)) return string.substring(0, string.length() - suffix.length());
return string;
}
/** as removeFromEnd, but repeats until all such suffixes are gone */
public static String removeAllFromEnd(String string, String ...suffixes) {
boolean anotherLoopNeeded = true;
while (anotherLoopNeeded) {
if (isEmpty(string)) return string;
anotherLoopNeeded = false;
for (String suffix : suffixes)
if (string.endsWith(suffix)) {
string = string.substring(0, string.length() - suffix.length());
anotherLoopNeeded = true;
break;
}
}
return string;
}
/** removes the first prefix in the list which is present at the start of string
* and returns that string; ignores subsequent prefixes if a matching one is found;
* returns the original string if no prefixes match
*/
public static String removeFromStart(String string, String ...prefixes) {
if (isEmpty(string)) return string;
for (String prefix : prefixes)
if (string.startsWith(prefix)) return string.substring(prefix.length());
return string;
}
/** as removeFromStart, but repeats until all such suffixes are gone */
public static String removeAllFromStart(String string, String ...prefixes) {
boolean anotherLoopNeeded = true;
while (anotherLoopNeeded) {
if (isEmpty(string)) return string;
anotherLoopNeeded = false;
for (String prefix : prefixes)
if (string.startsWith(prefix)) {
string = string.substring(prefix.length());
anotherLoopNeeded = true;
break;
}
}
return string;
}
/** convenience for {@link com.google.common.base.Joiner} */
public static String join(Iterable<? extends Object> list, String seperator) {
boolean app = false;
StringBuilder out = new StringBuilder();
for (Object s: list) {
if (app) out.append(seperator);
out.append(s);
app = true;
}
return out.toString();
}
/** convenience for {@link com.google.common.base.Joiner} */
public static String join(Object[] list, String seperator) {
boolean app = false;
StringBuilder out = new StringBuilder();
for (Object s: list) {
if (app) out.append(seperator);
out.append(s);
app = true;
}
return out.toString();
}
/** replaces all key->value entries from the replacement map in source (non-regex) */
@SuppressWarnings("rawtypes")
public static String replaceAll(String source, Map replacements) {
for (Object rr: replacements.entrySet()) {
Map.Entry r = (Map.Entry)rr;
source = replaceAllNonRegex(source, ""+r.getKey(), ""+r.getValue());
}
return source;
}
/** NON-REGEX replaceAll -
* replaces all instances in source, of the given pattern, with the given replacement
* (not interpreting any arguments as regular expressions)
*/
public static String replaceAll(String source, String pattern, String replacement) {
if (source==null) return source;
StringBuilder result = new StringBuilder(source.length());
for (int i=0; i<source.length(); ) {
if (source.substring(i).startsWith(pattern)) {
result.append(replacement);
i += pattern.length();
} else {
result.append(source.charAt(i));
i++;
}
}
return result.toString();
}
/** NON-REGEX replacement -- explicit method name for reabaility, doing same as Strings.replaceAll */
public static String replaceAllNonRegex(String source, String pattern, String replacement) {
return replaceAll(source, pattern, replacement);
}
/** REGEX replacement -- explicit method name for reabaility, doing same as String.replaceAll */
public static String replaceAllRegex(String source, String pattern, String replacement) {
return source.replaceAll(pattern, replacement);
}
/** Valid non alphanumeric characters for filenames. */
public static final String VALID_NON_ALPHANUM_FILE_CHARS = "-_.";
/**
* Returns a valid filename based on the input.
*
* A valid filename starts with the first alphanumeric character, then include
* all alphanumeric characters plus those in {@link #VALID_NON_ALPHANUM_FILE_CHARS},
* with any runs of invalid characters being replaced by {@literal _}.
*
* @throws NullPointerException if the input string is null.
* @throws IllegalArgumentException if the input string is blank.
*/
public static String makeValidFilename(String s) {
Preconditions.checkNotNull(s, "Cannot make valid filename from null string");
Preconditions.checkArgument(isNonBlank(s), "Cannot make valid filename from blank string");
return CharMatcher.anyOf(VALID_NON_ALPHANUM_FILE_CHARS).or(CharMatcher.JAVA_LETTER_OR_DIGIT)
.negate()
.trimAndCollapseFrom(s, '_');
}
/**
* A {@link CharMatcher} that matches valid Java identifier characters.
*
* @see Character#isJavaIdentifierPart(char)
*/
public static final CharMatcher IS_JAVA_IDENTIFIER_PART = CharMatcher.forPredicate(new Predicate<Character>() {
@Override
public boolean apply(@Nullable Character input) {
return input != null && Character.isJavaIdentifierPart(input);
}
});
/**
* Returns a valid Java identifier name based on the input.
*
* Removes certain characterss (like apostrophe), replaces one or more invalid
* characterss with {@literal _}, and prepends {@literal _} if the first character
* is only valid as an identifier part (not start).
* <p>
* The result is usually unique to s, though this isn't guaranteed, for example if
* all characters are invalid. For a unique identifier use {@link #makeValidUniqueJavaName(String)}.
*
* @see #makeValidUniqueJavaName(String)
*/
public static String makeValidJavaName(String s) {
if (s==null) return "__null";
if (s.length()==0) return "__empty";
String name = IS_JAVA_IDENTIFIER_PART.negate().collapseFrom(CharMatcher.is('\'').removeFrom(s), '_');
if (!Character.isJavaIdentifierStart(s.charAt(0))) return "_" + name;
return name;
}
/**
* Returns a unique valid java identifier name based on the input.
*
* Translated as per {@link #makeValidJavaName(String)} but with {@link String#hashCode()}
* appended where necessary to guarantee uniqueness.
*
* @see #makeValidJavaName(String)
*/
public static String makeValidUniqueJavaName(String s) {
String name = makeValidJavaName(s);
if (isEmpty(s) || IS_JAVA_IDENTIFIER_PART.matchesAllOf(s) || CharMatcher.is('\'').matchesNoneOf(s)) {
return name;
} else {
return name + "_" + s.hashCode();
}
}
/** @see {@link Identifiers#makeRandomId(int)} */
public static String makeRandomId(int l) {
return Identifiers.makeRandomId(l);
}
/** pads the string with 0's at the left up to len; no padding if i longer than len */
public static String makeZeroPaddedString(int i, int len) {
return makePaddedString(""+i, len, "0", "");
}
/** pads the string with "pad" at the left up to len; no padding if base longer than len */
public static String makePaddedString(String base, int len, String left_pad, String right_pad) {
String s = ""+(base==null ? "" : base);
while (s.length()<len) s=left_pad+s+right_pad;
return s;
}
public static void trimAll(String[] s) {
for (int i=0; i<s.length; i++)
s[i] = (s[i]==null ? "" : s[i].trim());
}
/** creates a string from a real number, with specified accuracy (more iff it comes for free, ie integer-part);
* switches to E notation if needed to fit within maxlen; can be padded left up too (not useful)
* @param x number to use
* @param maxlen maximum length for the numeric string, if possible (-1 to suppress)
* @param prec number of digits accuracy desired (more kept for integers)
* @param leftPadLen will add spaces at left if necessary to make string this long (-1 to suppress) [probably not usef]
* @return such a string
*/
public static String makeRealString(double x, int maxlen, int prec, int leftPadLen) {
return makeRealString(x, maxlen, prec, leftPadLen, 0.00000000001, true);
}
/** creates a string from a real number, with specified accuracy (more iff it comes for free, ie integer-part);
* switches to E notation if needed to fit within maxlen; can be padded left up too (not useful)
* @param x number to use
* @param maxlen maximum length for the numeric string, if possible (-1 to suppress)
* @param prec number of digits accuracy desired (more kept for integers)
* @param leftPadLen will add spaces at left if necessary to make string this long (-1 to suppress) [probably not usef]
* @param skipDecimalThreshhold if positive it will not add a decimal part if the fractional part is less than this threshhold
* (but for a value 3.00001 it would show zeroes, e.g. with 3 precision and positive threshhold <= 0.00001 it would show 3.00);
* if zero or negative then decimal digits are always shown
* @param useEForSmallNumbers whether to use E notation for numbers near zero
* @return such a string
*/
public static String makeRealString(double x, int maxlen, int prec, int leftPadLen, double skipDecimalThreshhold, boolean useEForSmallNumbers) {
NumberFormat df = DecimalFormat.getInstance();
//df.setMaximumFractionDigits(maxlen);
df.setMinimumFractionDigits(0);
//df.setMaximumIntegerDigits(prec);
df.setMinimumIntegerDigits(1);
df.setGroupingUsed(false);
String s;
if (x==0) {
if (skipDecimalThreshhold>0 || prec<=1) s="0";
else {
s="0.0";
while (s.length()<prec+1) s+="0";
}
} else {
// long bits= Double.doubleToLongBits(x);
// int s = ((bits >> 63) == 0) ? 1 : -1;
// int e = (int)((bits >> 52) & 0x7ffL);
// long m = (e == 0) ?
// (bits & 0xfffffffffffffL) << 1 :
// (bits & 0xfffffffffffffL) | 0x10000000000000L;
// //s*m*2^(e-1075);
int log = (int)Math.floor(Math.log10(x));
int numFractionDigits = (log>=prec ? 0 : prec-log-1);
if (numFractionDigits>0) { //need decimal digits
if (skipDecimalThreshhold>0) {
int checkFractionDigits = 0;
double multiplier = 1;
while (checkFractionDigits < numFractionDigits) {
if (Math.abs(x - Math.rint(x*multiplier)/multiplier)<skipDecimalThreshhold)
break;
checkFractionDigits++;
multiplier*=10;
}
numFractionDigits = checkFractionDigits;
}
df.setMinimumFractionDigits(numFractionDigits);
df.setMaximumFractionDigits(numFractionDigits);
} else {
//x = Math.rint(x);
df.setMaximumFractionDigits(0);
}
s = df.format(x);
if (maxlen>0 && s.length()>maxlen) {
//too long:
double signif = x/Math.pow(10,log);
if (s.indexOf('.')>=0) {
//have a decimal point; either we are very small 0.000001
//or prec is larger than maxlen
if (Math.abs(x)<1 && useEForSmallNumbers) {
//very small-- use alternate notation
s = makeRealString(signif, -1, prec, -1) + "E"+log;
} else {
//leave it alone, user error or E not wanted
}
} else {
//no decimal point, integer part is too large, use alt notation
s = makeRealString(signif, -1, prec, -1) + "E"+log;
}
}
}
if (leftPadLen>s.length())
return makePaddedString(s, leftPadLen, " ", "");
else
return s;
}
/** creates a string from a real number, with specified accuracy (more iff it comes for free, ie integer-part);
* switches to E notation if needed to fit within maxlen; can be padded left up too (not useful)
* @param x number to use
* @param maxlen maximum length for the numeric string, if possible (-1 to suppress)
* @param prec number of digits accuracy desired (more kept for integers)
* @param leftPadLen will add spaces at left if necessary to make string this long (-1 to suppress) [probably not usef]
* @return such a string
*/
public static String makeRealStringNearZero(double x, int maxlen, int prec, int leftPadLen) {
if (Math.abs(x)<0.0000000001) x=0;
NumberFormat df = DecimalFormat.getInstance();
//df.setMaximumFractionDigits(maxlen);
df.setMinimumFractionDigits(0);
//df.setMaximumIntegerDigits(prec);
df.setMinimumIntegerDigits(1);
df.setGroupingUsed(false);
String s;
if (x==0) {
if (prec<=1) s="0";
else {
s="0.0";
while (s.length()<prec+1) s+="0";
}
} else {
// long bits= Double.doubleToLongBits(x);
// int s = ((bits >> 63) == 0) ? 1 : -1;
// int e = (int)((bits >> 52) & 0x7ffL);
// long m = (e == 0) ?
// (bits & 0xfffffffffffffL) << 1 :
// (bits & 0xfffffffffffffL) | 0x10000000000000L;
// //s*m*2^(e-1075);
int log = (int)Math.floor(Math.log10(x));
int scale = (log>=prec ? 0 : prec-log-1);
if (scale>0) { //need decimal digits
double scale10 = Math.pow(10, scale);
x = Math.rint(x*scale10)/scale10;
df.setMinimumFractionDigits(scale);
df.setMaximumFractionDigits(scale);
} else {
//x = Math.rint(x);
df.setMaximumFractionDigits(0);
}
s = df.format(x);
if (maxlen>0 && s.length()>maxlen) {
//too long:
double signif = x/Math.pow(10,log);
if (s.indexOf('.')>=0) {
//have a decimal point; either we are very small 0.000001
//or prec is larger than maxlen
if (Math.abs(x)<1) {
//very small-- use alternate notation
s = makeRealString(signif, -1, prec, -1) + "E"+log;
} else {
//leave it alone, user error
}
} else {
//no decimal point, integer part is too large, use alt notation
s = makeRealString(signif, -1, prec, -1) + "E"+log;
}
}
}
if (leftPadLen>s.length())
return makePaddedString(s, leftPadLen, " ", "");
else
return s;
}
/** returns the first word (whitespace delimited text), or null if there is none (input null or all whitespace) */
public static String getFirstWord(String s) {
if (s==null) return null;
int start = 0;
while (start<s.length()) {
if (!Character.isWhitespace(s.charAt(start)))
break;
start++;
}
int end = start;
if (end >= s.length())
return null;
while (end<s.length()) {
if (Character.isWhitespace(s.charAt(end)))
break;
end++;
}
return s.substring(start, end);
}
/** returns the last word (whitespace delimited text), or null if there is none (input null or all whitespace) */
public static String getLastWord(String s) {
if (s==null) return null;
int end = s.length()-1;
while (end >= 0) {
if (!Character.isWhitespace(s.charAt(end)))
break;
end--;
}
int start = end;
if (start < 0)
return null;
while (start >= 0) {
if (Character.isWhitespace(s.charAt(start)))
break;
start--;
}
return s.substring(start+1, end+1);
}
/** returns the first word after the given phrase, or null if no such phrase;
* if the character immediately after the phrase is not whitespace, the non-whitespace
* sequence starting with that character will be returned */
public static String getFirstWordAfter(String context, String phrase) {
if (context==null || phrase==null) return null;
int index = context.indexOf(phrase);
if (index<0) return null;
return getFirstWord(context.substring(index + phrase.length()));
}
/** @deprecated use {@link Time#makeTimeStringRounded(long)} */
@Deprecated
public static String makeTimeString(long utcMillis) {
return Time.makeTimeStringRounded(utcMillis);
}
/** returns e.g. { "prefix01", ..., "prefix96" };
* see more functional NumericRangeGlobExpander for "prefix{01-96}"
*/
public static String[] makeArray(String prefix, int count) {
String[] result = new String[count];
int len = (""+count).length();
for (int i=1; i<=count; i++)
result[i-1] = prefix + makePaddedString("", len, "0", ""+i);
return result;
}
public static String[] combineArrays(String[] ...arrays) {
int totalLen = 0;
for (String[] array : arrays) {
if (array!=null) totalLen += array.length;
}
String[] result = new String[totalLen];
int i=0;
for (String[] array : arrays) {
if (array!=null) for (String s : array) {
result[i++] = s;
}
}
return result;
}
public static String toInitialCapOnly(String value) {
if (value==null || value.length()==0) return value;
return value.substring(0, 1).toUpperCase() + value.substring(1).toLowerCase();
}
public static String reverse(String name) {
return new StringBuffer(name).reverse().toString();
}
public static boolean isLowerCase(String s) {
return s.toLowerCase().equals(s);
}
public static String makeRepeated(char c, int length) {
StringBuilder result = new StringBuilder(length);
for (int i = 0; i < length; i++) {
result.append(c);
}
return result.toString();
}
public static String trim(String s) {
if (s==null) return null;
return s.trim();
}
public static String trimEnd(String s) {
if (s==null) return null;
return ("a"+s).trim().substring(1);
}
/** returns up to maxlen characters from the start of s */
public static String maxlen(String s, int maxlen) {
if (s==null) return null;
return s.substring(0, Math.min(s.length(), maxlen));
}
/** returns toString of the object if it is not null, otherwise null */
public static String toString(Object o) {
if (o==null) return null;
return o.toString();
}
public static boolean containsLiteralIgnoreCase(CharSequence input, CharSequence fragment) {
if (input==null) return false;
if (isEmpty(fragment)) return true;
int lastValidStartPos = input.length()-fragment.length();
char f0u = Character.toUpperCase(fragment.charAt(0));
char f0l = Character.toLowerCase(fragment.charAt(0));
i: for (int i=0; i<=lastValidStartPos; i++) {
char ii = input.charAt(i);
if (ii==f0l || ii==f0u) {
for (int j=1; j<fragment.length(); j++) {
if (Character.toLowerCase(input.charAt(i+j))!=Character.toLowerCase(fragment.charAt(j)))
continue i;
}
return true;
}
}
return false;
}
public static boolean containsLiteral(CharSequence input, CharSequence fragment) {
if (input==null) return false;
if (isEmpty(fragment)) return true;
int lastValidStartPos = input.length()-fragment.length();
char f0 = fragment.charAt(0);
i: for (int i=0; i<=lastValidStartPos; i++) {
char ii = input.charAt(i);
if (ii==f0) {
for (int j=1; j<fragment.length(); j++) {
if (input.charAt(i+j)!=fragment.charAt(j))
continue i;
}
return true;
}
}
return false;
}
/** Returns a size string using metric suffixes from {@link ByteSizeStrings#metric()}, e.g. 23.5MB */
public static String makeSizeString(long sizeInBytes) {
return ByteSizeStrings.metric().makeSizeString(sizeInBytes);
}
/** Returns a size string using ISO suffixes from {@link ByteSizeStrings#iso()}, e.g. 23.5MiB */
public static String makeISOSizeString(long sizeInBytes) {
return ByteSizeStrings.iso().makeSizeString(sizeInBytes);
}
/** Returns a size string using Java suffixes from {@link ByteSizeStrings#java()}, e.g. 23m */
public static String makeJavaSizeString(long sizeInBytes) {
return ByteSizeStrings.java().makeSizeString(sizeInBytes);
}
/** returns a configurable shortener */
public static StringShortener shortener() {
return new StringShortener();
}
public static Supplier<String> toStringSupplier(Object src) {
return Suppliers.compose(Functions.toStringFunction(), Suppliers.ofInstance(src));
}
/** wraps a call to {@link String#format(String, Object...)} in a toString, i.e. using %s syntax,
* useful for places where we want deferred evaluation
* (e.g. as message to {@link Preconditions} to skip concatenation when not needed) */
public static FormattedString format(String pattern, Object... args) {
return new FormattedString(pattern, args);
}
/** returns "s" if the argument is not 1, empty string otherwise; useful when constructing plurals */
public static String s(int count) {
return count==1 ? "" : "s";
}
/** converts a map of any objects to a map of strings, preserving nulls and invoking toString where needed */
public static Map<String, String> toStringMap(Map<?,?> map) {
Map<String,String> result = MutableMap.<String, String>of();
for (Map.Entry<?,?> e: map.entrySet()) {
result.put(String.valueOf(e.getKey()), String.valueOf(e.getValue()));
}
return result;
}
/** returns base repeated count times */
public static String repeat(String base, int count) {
if (base==null) return null;
StringBuilder result = new StringBuilder();
for (int i=0; i<count; i++)
result.append(base);
return result.toString();
}
/** returns comparator which compares based on length, with shorter ones first (and null before that);
* in event of a tie, it uses the toString order */
public static Ordering<String> lengthComparator() {
return Ordering.<Integer>natural().onResultOf(StringFunctions.length()).compound(Ordering.<String>natural()).nullsFirst();
}
public static String getFirstLine(String s) {
int idx = s.indexOf('\n');
if (idx==-1) return s;
return s.substring(0, idx);
}
/** looks for first section of text in following the prefix and, if present, before the suffix;
* null if the prefix is not present in the string, and everything after the prefix if suffix is not present in the string;
* if either prefix or suffix is null, it is treated as the start/end of the string */
public static String getFragmentBetween(String input, String prefix, String suffix) {
if (input==null) return null;
int index;
if (prefix!=null) {
index = input.indexOf(prefix);
if (index==-1) return null;
input = input.substring(index + prefix.length());
}
if (suffix!=null) {
index = input.indexOf(suffix);
if (index>=0) input = input.substring(0, index);
}
return input;
}
public static int getWordCount(String phrase, boolean respectQuotes) {
if (phrase==null) return 0;
phrase = phrase.trim();
if (respectQuotes)
return new QuotedStringTokenizer(phrase).remainderAsList().size();
else
return Collections.list(new StringTokenizer(phrase)).size();
}
}
| apache-2.0 |
jiulongzaitian/kubernetes | pkg/printers/internalversion/describe.go | 141242 | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
"bytes"
"crypto/x509"
"fmt"
"io"
"net"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/golang/glog"
"github.com/fatih/camelcase"
versionedextension "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/dynamic"
externalclient "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/events"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/ref"
resourcehelper "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/certificates"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/apis/storage"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/printers"
"k8s.io/kubernetes/pkg/registry/rbac/validation"
"k8s.io/kubernetes/pkg/util/slice"
)
// Each level has 2 spaces for PrefixWriter
const (
LEVEL_0 = iota
LEVEL_1
LEVEL_2
LEVEL_3
)
// PrefixWriter can write text at various indentation levels.
type PrefixWriter interface {
// Write writes text with the specified indentation level.
Write(level int, format string, a ...interface{})
// WriteLine writes an entire line with no indentation level.
WriteLine(a ...interface{})
// Flush forces indentation to be reset.
Flush()
}
// prefixWriter implements PrefixWriter
type prefixWriter struct {
out io.Writer
}
var _ PrefixWriter = &prefixWriter{}
// NewPrefixWriter creates a new PrefixWriter.
func NewPrefixWriter(out io.Writer) PrefixWriter {
return &prefixWriter{out: out}
}
func (pw *prefixWriter) Write(level int, format string, a ...interface{}) {
levelSpace := " "
prefix := ""
for i := 0; i < level; i++ {
prefix += levelSpace
}
fmt.Fprintf(pw.out, prefix+format, a...)
}
func (pw *prefixWriter) WriteLine(a ...interface{}) {
fmt.Fprintln(pw.out, a...)
}
func (pw *prefixWriter) Flush() {
if f, ok := pw.out.(flusher); ok {
f.Flush()
}
}
func describerMap(c clientset.Interface, externalclient externalclient.Interface) map[schema.GroupKind]printers.Describer {
m := map[schema.GroupKind]printers.Describer{
api.Kind("Pod"): &PodDescriber{c},
api.Kind("ReplicationController"): &ReplicationControllerDescriber{c},
api.Kind("Secret"): &SecretDescriber{c},
api.Kind("Service"): &ServiceDescriber{c},
api.Kind("ServiceAccount"): &ServiceAccountDescriber{c},
api.Kind("Node"): &NodeDescriber{c},
api.Kind("LimitRange"): &LimitRangeDescriber{c},
api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c},
api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c},
api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c},
api.Kind("Namespace"): &NamespaceDescriber{c},
api.Kind("Endpoints"): &EndpointsDescriber{c},
api.Kind("ConfigMap"): &ConfigMapDescriber{c},
api.Kind("PriorityClass"): &PriorityClassDescriber{c},
extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c},
extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c},
extensions.Kind("PodSecurityPolicy"): &PodSecurityPolicyDescriber{c},
autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c},
extensions.Kind("DaemonSet"): &DaemonSetDescriber{c},
extensions.Kind("Deployment"): &DeploymentDescriber{c, externalclient},
extensions.Kind("Ingress"): &IngressDescriber{c},
batch.Kind("Job"): &JobDescriber{c},
batch.Kind("CronJob"): &CronJobDescriber{c, externalclient},
apps.Kind("StatefulSet"): &StatefulSetDescriber{c},
apps.Kind("Deployment"): &DeploymentDescriber{c, externalclient},
apps.Kind("DaemonSet"): &DaemonSetDescriber{c},
apps.Kind("ReplicaSet"): &ReplicaSetDescriber{c},
certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c},
storage.Kind("StorageClass"): &StorageClassDescriber{c},
policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c},
rbac.Kind("Role"): &RoleDescriber{c},
rbac.Kind("ClusterRole"): &ClusterRoleDescriber{c},
rbac.Kind("RoleBinding"): &RoleBindingDescriber{c},
rbac.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{c},
networking.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c},
scheduling.Kind("PriorityClass"): &PriorityClassDescriber{c},
}
return m
}
// DescribableResources lists all resource types we can describe.
func DescribableResources() []string {
keys := make([]string, 0)
for k := range describerMap(nil, nil) {
resource := strings.ToLower(k.Kind)
keys = append(keys, resource)
}
return keys
}
// DescriberFor returns the default describe functions for each of the standard
// Kubernetes types.
func DescriberFor(kind schema.GroupKind, c clientset.Interface, externalclient externalclient.Interface) (printers.Describer, bool) {
f, ok := describerMap(c, externalclient)[kind]
return f, ok
}
// GenericDescriberFor returns a generic describer for the specified mapping
// that uses only information available from runtime.Unstructured
func GenericDescriberFor(mapping *meta.RESTMapping, dynamic dynamic.Interface, events coreclient.EventsGetter) printers.Describer {
return &genericDescriber{mapping, dynamic, events}
}
type genericDescriber struct {
mapping *meta.RESTMapping
dynamic dynamic.Interface
events coreclient.EventsGetter
}
func (g *genericDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (output string, err error) {
apiResource := &metav1.APIResource{
Name: g.mapping.Resource,
Namespaced: g.mapping.Scope.Name() == meta.RESTScopeNameNamespace,
Kind: g.mapping.GroupVersionKind.Kind,
}
obj, err := g.dynamic.Resource(apiResource, namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = g.events.Events(namespace).Search(legacyscheme.Scheme, obj)
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", obj.GetName())
w.Write(LEVEL_0, "Namespace:\t%s\n", obj.GetNamespace())
printLabelsMultiline(w, "Labels", obj.GetLabels())
printAnnotationsMultiline(w, "Annotations", obj.GetAnnotations())
printUnstructuredContent(w, LEVEL_0, obj.UnstructuredContent(), "", ".metadata.name", ".metadata.namespace", ".metadata.labels", ".metadata.annotations")
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printUnstructuredContent(w PrefixWriter, level int, content map[string]interface{}, skipPrefix string, skip ...string) {
fields := []string{}
for field := range content {
fields = append(fields, field)
}
sort.Strings(fields)
for _, field := range fields {
value := content[field]
switch typedValue := value.(type) {
case map[string]interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...)
case []interface{}:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\n", smartLabelFor(field))
for _, child := range typedValue {
switch typedChild := child.(type) {
case map[string]interface{}:
printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...)
default:
w.Write(level+1, "%v\n", typedChild)
}
}
default:
skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field)
if slice.ContainsString(skip, skipExpr, nil) {
continue
}
w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue)
}
}
}
func smartLabelFor(field string) string {
commonAcronyms := []string{"API", "URL", "UID", "OSB", "GUID"}
splitted := camelcase.Split(field)
for i := 0; i < len(splitted); i++ {
part := splitted[i]
if slice.ContainsString(commonAcronyms, strings.ToUpper(part), nil) {
part = strings.ToUpper(part)
} else {
part = strings.Title(part)
}
splitted[i] = part
}
return strings.Join(splitted, " ")
}
// DefaultObjectDescriber can describe the default Kubernetes objects.
var DefaultObjectDescriber printers.ObjectDescriber
func init() {
d := &Describers{}
err := d.Add(
describeLimitRange,
describeQuota,
describePod,
describeService,
describeReplicationController,
describeDaemonSet,
describeNode,
describeNamespace,
)
if err != nil {
glog.Fatalf("Cannot register describers: %v", err)
}
DefaultObjectDescriber = d
}
// NamespaceDescriber generates information about a namespace
type NamespaceDescriber struct {
clientset.Interface
}
func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
resourceQuotaList, err := d.Core().ResourceQuotas(name).List(metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support resource quotas.
// Not an error, will not show resource quotas information.
resourceQuotaList = nil
} else {
return "", err
}
}
limitRangeList, err := d.Core().LimitRanges(name).List(metav1.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Server does not support limit ranges.
// Not an error, will not show limit ranges information.
limitRangeList = nil
} else {
return "", err
}
}
return describeNamespace(ns, resourceQuotaList, limitRangeList)
}
func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name)
printLabelsMultiline(w, "Labels", namespace.Labels)
printAnnotationsMultiline(w, "Annotations", namespace.Annotations)
w.Write(LEVEL_0, "Status:\t%s\n", string(namespace.Status.Phase))
if resourceQuotaList != nil {
w.Write(LEVEL_0, "\n")
DescribeResourceQuotas(resourceQuotaList, w)
}
if limitRangeList != nil {
w.Write(LEVEL_0, "\n")
DescribeLimitRanges(limitRangeList, w)
}
return nil
})
}
func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWriter) {
for i := range spec.Limits {
item := spec.Limits[i]
maxResources := item.Max
minResources := item.Min
defaultLimitResources := item.Default
defaultRequestResources := item.DefaultRequest
ratio := item.MaxLimitRequestRatio
set := map[api.ResourceName]bool{}
for k := range maxResources {
set[k] = true
}
for k := range minResources {
set[k] = true
}
for k := range defaultLimitResources {
set[k] = true
}
for k := range defaultRequestResources {
set[k] = true
}
for k := range ratio {
set[k] = true
}
for k := range set {
// if no value is set, we output -
maxValue := "-"
minValue := "-"
defaultLimitValue := "-"
defaultRequestValue := "-"
ratioValue := "-"
maxQuantity, maxQuantityFound := maxResources[k]
if maxQuantityFound {
maxValue = maxQuantity.String()
}
minQuantity, minQuantityFound := minResources[k]
if minQuantityFound {
minValue = minQuantity.String()
}
defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k]
if defaultLimitQuantityFound {
defaultLimitValue = defaultLimitQuantity.String()
}
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k]
if defaultRequestQuantityFound {
defaultRequestValue = defaultRequestQuantity.String()
}
ratioQuantity, ratioQuantityFound := ratio[k]
if ratioQuantityFound {
ratioValue = ratioQuantity.String()
}
msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n"
w.Write(LEVEL_0, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue)
}
}
}
// DescribeLimitRanges merges a set of limit range items into a single tabular description
func DescribeLimitRanges(limitRanges *api.LimitRangeList, w PrefixWriter) {
if len(limitRanges.Items) == 0 {
w.Write(LEVEL_0, "No resource limits.\n")
return
}
w.Write(LEVEL_0, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
for _, limitRange := range limitRanges.Items {
describeLimitRangeSpec(limitRange.Spec, " ", w)
}
}
// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas
func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) {
if len(quotas.Items) == 0 {
w.Write(LEVEL_0, "No resource quota.\n")
return
}
sort.Sort(SortableResourceQuotas(quotas.Items))
w.Write(LEVEL_0, "Resource Quotas")
for _, q := range quotas.Items {
w.Write(LEVEL_0, "\n Name:\t%s\n", q.Name)
if len(q.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(q.Spec.Scopes))
for _, scope := range q.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, " Resource\tUsed\tHard\n")
w.Write(LEVEL_0, " --------\t---\t---\n")
resources := make([]api.ResourceName, 0, len(q.Status.Hard))
for resource := range q.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
hardQuantity := q.Status.Hard[resource]
usedQuantity := q.Status.Used[resource]
w.Write(LEVEL_0, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String())
}
}
}
// LimitRangeDescriber generates information about a limit range
type LimitRangeDescriber struct {
clientset.Interface
}
func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
lr := d.Core().LimitRanges(namespace)
limitRange, err := lr.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeLimitRange(limitRange)
}
func describeLimitRange(limitRange *api.LimitRange) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", limitRange.Namespace)
w.Write(LEVEL_0, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n")
w.Write(LEVEL_0, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n")
describeLimitRangeSpec(limitRange.Spec, "", w)
return nil
})
}
// ResourceQuotaDescriber generates information about a resource quota
type ResourceQuotaDescriber struct {
clientset.Interface
}
func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
rq := d.Core().ResourceQuotas(namespace)
resourceQuota, err := rq.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeQuota(resourceQuota)
}
func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string {
switch scope {
case api.ResourceQuotaScopeTerminating:
return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system."
case api.ResourceQuotaScopeNotTerminating:
return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate."
case api.ResourceQuotaScopeBestEffort:
return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service."
case api.ResourceQuotaScopeNotBestEffort:
return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service."
default:
return ""
}
}
func describeQuota(resourceQuota *api.ResourceQuota) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", resourceQuota.Namespace)
if len(resourceQuota.Spec.Scopes) > 0 {
scopes := make([]string, 0, len(resourceQuota.Spec.Scopes))
for _, scope := range resourceQuota.Spec.Scopes {
scopes = append(scopes, string(scope))
}
sort.Strings(scopes)
w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", "))
for _, scope := range scopes {
helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope))
if len(helpText) > 0 {
w.Write(LEVEL_0, " * %s\n", helpText)
}
}
}
w.Write(LEVEL_0, "Resource\tUsed\tHard\n")
w.Write(LEVEL_0, "--------\t----\t----\n")
resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard))
for resource := range resourceQuota.Status.Hard {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
msg := "%v\t%v\t%v\n"
for i := range resources {
resource := resources[i]
hardQuantity := resourceQuota.Status.Hard[resource]
usedQuantity := resourceQuota.Status.Used[resource]
w.Write(LEVEL_0, msg, resource, usedQuantity.String(), hardQuantity.String())
}
return nil
})
}
// PodDescriber generates information about a pod and the replication controllers that
// create it.
type PodDescriber struct {
clientset.Interface
}
func (d *PodDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil {
if describerSettings.ShowEvents {
eventsInterface := d.Core().Events(namespace)
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err2 := eventsInterface.List(options)
if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err)
DescribeEvents(events, w)
return nil
})
}
}
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
if ref, err := ref.GetReference(legacyscheme.Scheme, pod); err != nil {
glog.Errorf("Unable to construct reference to '%#v': %v", pod, err)
} else {
ref.Kind = ""
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ref)
}
}
return describePod(pod, events)
}
func describePod(pod *api.Pod, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pod.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace)
if pod.Spec.Priority != nil {
w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority)
w.Write(LEVEL_0, "PriorityClassName:\t%s\n", stringOrNone(pod.Spec.PriorityClassName))
}
if pod.Spec.NodeName == "" {
w.Write(LEVEL_0, "Node:\t<none>\n")
} else {
w.Write(LEVEL_0, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
}
if pod.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z))
}
printLabelsMultiline(w, "Labels", pod.Labels)
printAnnotationsMultiline(w, "Annotations", pod.Annotations)
if pod.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestamp(*pod.DeletionTimestamp))
w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds)
} else {
w.Write(LEVEL_0, "Status:\t%s\n", string(pod.Status.Phase))
}
if len(pod.Status.Reason) > 0 {
w.Write(LEVEL_0, "Reason:\t%s\n", pod.Status.Reason)
}
if len(pod.Status.Message) > 0 {
w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message)
}
w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP)
if controlledBy := printController(pod); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
if len(pod.Status.NominatedNodeName) > 0 {
w.Write(LEVEL_0, "NominatedNodeName:\t%s\n", pod.Status.NominatedNodeName)
}
if len(pod.Spec.InitContainers) > 0 {
describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "")
}
describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "")
if len(pod.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n")
for _, c := range pod.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \n",
c.Type,
c.Status)
}
}
describeVolumes(pod.Spec.Volumes, w, "")
if pod.Status.QOSClass != "" {
w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass)
} else {
w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
}
printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector)
printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printController(controllee metav1.Object) string {
if controllerRef := metav1.GetControllerOf(controllee); controllerRef != nil {
return fmt.Sprintf("%s/%s", controllerRef.Kind, controllerRef.Name)
}
return ""
}
func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) {
if volumes == nil || len(volumes) == 0 {
w.Write(LEVEL_0, "%sVolumes:\t<none>\n", space)
return
}
w.Write(LEVEL_0, "%sVolumes:\n", space)
for _, volume := range volumes {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, volume.Name)
switch {
case volume.VolumeSource.HostPath != nil:
printHostPathVolumeSource(volume.VolumeSource.HostPath, w)
case volume.VolumeSource.EmptyDir != nil:
printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, w)
case volume.VolumeSource.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, w)
case volume.VolumeSource.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, w)
case volume.VolumeSource.GitRepo != nil:
printGitRepoVolumeSource(volume.VolumeSource.GitRepo, w)
case volume.VolumeSource.Secret != nil:
printSecretVolumeSource(volume.VolumeSource.Secret, w)
case volume.VolumeSource.ConfigMap != nil:
printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, w)
case volume.VolumeSource.NFS != nil:
printNFSVolumeSource(volume.VolumeSource.NFS, w)
case volume.VolumeSource.ISCSI != nil:
printISCSIVolumeSource(volume.VolumeSource.ISCSI, w)
case volume.VolumeSource.Glusterfs != nil:
printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, w)
case volume.VolumeSource.PersistentVolumeClaim != nil:
printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, w)
case volume.VolumeSource.RBD != nil:
printRBDVolumeSource(volume.VolumeSource.RBD, w)
case volume.VolumeSource.Quobyte != nil:
printQuobyteVolumeSource(volume.VolumeSource.Quobyte, w)
case volume.VolumeSource.DownwardAPI != nil:
printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, w)
case volume.VolumeSource.AzureDisk != nil:
printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w)
case volume.VolumeSource.VsphereVolume != nil:
printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w)
case volume.VolumeSource.Cinder != nil:
printCinderVolumeSource(volume.VolumeSource.Cinder, w)
case volume.VolumeSource.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w)
case volume.VolumeSource.PortworxVolume != nil:
printPortworxVolumeSource(volume.VolumeSource.PortworxVolume, w)
case volume.VolumeSource.ScaleIO != nil:
printScaleIOVolumeSource(volume.VolumeSource.ScaleIO, w)
case volume.VolumeSource.CephFS != nil:
printCephFSVolumeSource(volume.VolumeSource.CephFS, w)
case volume.VolumeSource.StorageOS != nil:
printStorageOSVolumeSource(volume.VolumeSource.StorageOS, w)
case volume.VolumeSource.FC != nil:
printFCVolumeSource(volume.VolumeSource.FC, w)
case volume.VolumeSource.AzureFile != nil:
printAzureFileVolumeSource(volume.VolumeSource.AzureFile, w)
case volume.VolumeSource.FlexVolume != nil:
printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
case volume.VolumeSource.Flocker != nil:
printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
}
}
func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWriter) {
hostPathType := "<none>"
if hostPath.Type != nil {
hostPathType = string(*hostPath.Type)
}
w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+
" Path:\t%v\n"+
" HostPathType:\t%v\n",
hostPath.Path, hostPathType)
}
func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+
" Medium:\t%v\n", emptyDir.Medium)
}
func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+
" PDName:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly)
}
func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" Partition:\t%v\n"+
" ReadOnly:\t%v\n",
aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly)
}
func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+
" Repository:\t%v\n"+
" Revision:\t%v\n",
git.Repository, git.Revision)
}
func printSecretVolumeSource(secret *api.SecretVolumeSource, w PrefixWriter) {
optional := secret.Optional != nil && *secret.Optional
w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+
" SecretName:\t%v\n"+
" Optional:\t%v\n",
secret.SecretName, optional)
}
func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, w PrefixWriter) {
optional := configMap.Optional != nil && *configMap.Optional
w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+
" Name:\t%v\n"+
" Optional:\t%v\n",
configMap.Name, optional)
}
func printNFSVolumeSource(nfs *api.NFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+
" Server:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
nfs.Server, nfs.Path, nfs.ReadOnly)
}
func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+
" Registry:\t%v\n"+
" Volume:\t%v\n"+
" ReadOnly:\t%v\n",
quobyte.Registry, quobyte.Volume, quobyte.ReadOnly)
}
func printPortworxVolumeSource(pwxVolume *api.PortworxVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+
" VolumeID:\t%v\n",
pwxVolume.VolumeID)
}
func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) {
initiator := "<none>"
if iscsi.InitiatorName != nil {
initiator = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator)
}
func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w PrefixWriter) {
initiatorName := "<none>"
if iscsi.InitiatorName != nil {
initiatorName = *iscsi.InitiatorName
}
w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+
" TargetPortal:\t%v\n"+
" IQN:\t%v\n"+
" Lun:\t%v\n"+
" ISCSIInterface\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" Portals:\t%v\n"+
" DiscoveryCHAPAuth:\t%v\n"+
" SessionCHAPAuth:\t%v\n"+
" SecretRef:\t%v\n"+
" InitiatorName:\t%v\n",
iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName)
}
func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+
" EndpointsName:\t%v\n"+
" Path:\t%v\n"+
" ReadOnly:\t%v\n",
glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly)
}
func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+
" ClaimName:\t%v\n"+
" ReadOnly:\t%v\n",
claim.ClaimName, claim.ReadOnly)
}
func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+
" CephMonitors:\t%v\n"+
" RBDImage:\t%v\n"+
" FSType:\t%v\n"+
" RBDPool:\t%v\n"+
" RadosUser:\t%v\n"+
" Keyring:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly)
}
func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n")
for _, mapping := range d.Items {
if mapping.FieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path)
}
if mapping.ResourceFieldRef != nil {
w.Write(LEVEL_3, "%v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path)
}
}
}
func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+
" DiskName:\t%v\n"+
" DiskURI:\t%v\n"+
" Kind: \t%v\n"+
" FSType:\t%v\n"+
" CachingMode:\t%v\n"+
" ReadOnly:\t%v\n",
d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly)
}
func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+
" VolumePath:\t%v\n"+
" FSType:\t%v\n"+
" StoragePolicyName:\t%v\n",
vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName)
}
func printPhotonPersistentDiskVolumeSource(photon *api.PhotonPersistentDiskVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+
" PdID:\t%v\n"+
" FSType:\t%v\n",
photon.PdID, photon.FSType)
}
func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly)
}
func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly)
}
func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, w PrefixWriter) {
var secretNS, secretName string
if sio.SecretRef != nil {
secretName = sio.SecretRef.Name
secretNS = sio.SecretRef.Namespace
}
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
" System:\t%v\n"+
" Protection Domain:\t%v\n"+
" Storage Pool:\t%v\n"+
" Storage Mode:\t%v\n"+
" VolumeName:\t%v\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly)
}
func printLocalVolumeSource(ls *api.LocalVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+
" Path:\t%v\n",
ls.Path)
}
func printCephFSVolumeSource(cephfs *api.CephFSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+
" Monitors:\t%v\n"+
" Path:\t%v\n"+
" User:\t%v\n"+
" SecretFile:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n",
cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly)
}
func printStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+
" VolumeName:\t%v\n"+
" VolumeNamespace:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly)
}
func printFCVolumeSource(fc *api.FCVolumeSource, w PrefixWriter) {
lun := "<none>"
if fc.Lun != nil {
lun = strconv.Itoa(int(*fc.Lun))
}
w.Write(LEVEL_2, "Type:\tFC (a Fibre Channel disk)\n"+
" TargetWWNs:\t%v\n"+
" LUN:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly)
}
func printAzureFileVolumeSource(azureFile *api.AzureFileVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly)
}
func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolumeSource, w PrefixWriter) {
ns := ""
if azureFile.SecretNamespace != nil {
ns = *azureFile.SecretNamespace
}
w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+
" SecretName:\t%v\n"+
" SecretNamespace:\t%v\n"+
" ShareName:\t%v\n"+
" ReadOnly:\t%v\n",
azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly)
}
func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+
" Driver:\t%v\n"+
" FSType:\t%v\n"+
" SecretRef:\t%v\n"+
" ReadOnly:\t%v\n"+
" Options:\t%v\n",
flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options)
}
func printFlockerVolumeSource(flocker *api.FlockerVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+
" DatasetName:\t%v\n"+
" DatasetUUID:\t%v\n",
flocker.DatasetName, flocker.DatasetUUID)
}
func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+
" Driver:\t%v\n"+
" VolumeHandle:\t%v\n"+
" ReadOnly:\t%v\n",
csi.Driver, csi.VolumeHandle, csi.ReadOnly)
}
type PersistentVolumeDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().PersistentVolumes()
pv, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, pv)
}
return describePersistentVolume(pv, events)
}
func printVolumeNodeAffinity(w PrefixWriter, affinity *api.VolumeNodeAffinity) {
w.Write(LEVEL_0, "Node Affinity:\t")
if affinity == nil || affinity.Required == nil {
w.WriteLine("<none>")
return
}
w.WriteLine("")
if affinity.Required != nil {
w.Write(LEVEL_1, "Required Terms:\t")
if len(affinity.Required.NodeSelectorTerms) == 0 {
w.WriteLine("<none>")
} else {
w.WriteLine("")
for i, term := range affinity.Required.NodeSelectorTerms {
printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions)
}
}
}
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []api.NodeSelectorRequirement) {
w.Write(indentLevel, "%s:%s", title, innerIndent)
if len(reqs) == 0 {
w.WriteLine("<none>")
return
}
for i, req := range reqs {
if i != 0 {
w.Write(indentLevel, "%s", innerIndent)
}
exprStr := fmt.Sprintf("%s %s", req.Key, strings.ToLower(string(req.Operator)))
if len(req.Values) > 0 {
exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", "))
}
w.Write(LEVEL_0, "%s\n", exprStr)
}
}
func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pv.Name)
printLabelsMultiline(w, "Labels", pv.Labels)
printAnnotationsMultiline(w, "Annotations", pv.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers)
w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClass(pv))
if pv.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestamp(*pv.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase)
}
if pv.Spec.ClaimRef != nil {
w.Write(LEVEL_0, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name)
} else {
w.Write(LEVEL_0, "Claim:\t%s\n", "")
}
w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy)
w.Write(LEVEL_0, "Access Modes:\t%s\n", helper.GetAccessModesAsString(pv.Spec.AccessModes))
if pv.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode)
}
storage := pv.Spec.Capacity[api.ResourceStorage]
w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String())
printVolumeNodeAffinity(w, pv.Spec.NodeAffinity)
w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message)
w.Write(LEVEL_0, "Source:\n")
switch {
case pv.Spec.HostPath != nil:
printHostPathVolumeSource(pv.Spec.HostPath, w)
case pv.Spec.GCEPersistentDisk != nil:
printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, w)
case pv.Spec.AWSElasticBlockStore != nil:
printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, w)
case pv.Spec.NFS != nil:
printNFSVolumeSource(pv.Spec.NFS, w)
case pv.Spec.ISCSI != nil:
printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w)
case pv.Spec.Glusterfs != nil:
printGlusterfsVolumeSource(pv.Spec.Glusterfs, w)
case pv.Spec.RBD != nil:
printRBDPersistentVolumeSource(pv.Spec.RBD, w)
case pv.Spec.Quobyte != nil:
printQuobyteVolumeSource(pv.Spec.Quobyte, w)
case pv.Spec.VsphereVolume != nil:
printVsphereVolumeSource(pv.Spec.VsphereVolume, w)
case pv.Spec.Cinder != nil:
printCinderVolumeSource(pv.Spec.Cinder, w)
case pv.Spec.AzureDisk != nil:
printAzureDiskVolumeSource(pv.Spec.AzureDisk, w)
case pv.Spec.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, w)
case pv.Spec.PortworxVolume != nil:
printPortworxVolumeSource(pv.Spec.PortworxVolume, w)
case pv.Spec.ScaleIO != nil:
printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w)
case pv.Spec.Local != nil:
printLocalVolumeSource(pv.Spec.Local, w)
case pv.Spec.CephFS != nil:
printCephFSPersistentVolumeSource(pv.Spec.CephFS, w)
case pv.Spec.StorageOS != nil:
printStorageOSPersistentVolumeSource(pv.Spec.StorageOS, w)
case pv.Spec.FC != nil:
printFCVolumeSource(pv.Spec.FC, w)
case pv.Spec.AzureFile != nil:
printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w)
case pv.Spec.FlexVolume != nil:
printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w)
case pv.Spec.Flocker != nil:
printFlockerVolumeSource(pv.Spec.Flocker, w)
case pv.Spec.CSI != nil:
printCSIPersistentVolumeSource(pv.Spec.CSI, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type PersistentVolumeClaimDescriber struct {
clientset.Interface
}
func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().PersistentVolumeClaims(namespace)
pvc, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
events, _ := d.Core().Events(namespace).Search(legacyscheme.Scheme, pvc)
return describePersistentVolumeClaim(pvc, events)
}
func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace)
w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(pvc))
if pvc.ObjectMeta.DeletionTimestamp != nil {
w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestamp(*pvc.ObjectMeta.DeletionTimestamp))
} else {
w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase)
}
w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName)
printLabelsMultiline(w, "Labels", pvc.Labels)
printAnnotationsMultiline(w, "Annotations", pvc.Annotations)
w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers)
storage := pvc.Spec.Resources.Requests[api.ResourceStorage]
capacity := ""
accessModes := ""
if pvc.Spec.VolumeName != "" {
accessModes = helper.GetAccessModesAsString(pvc.Status.AccessModes)
storage = pvc.Status.Capacity[api.ResourceStorage]
capacity = storage.String()
}
w.Write(LEVEL_0, "Capacity:\t%s\n", capacity)
w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes)
if pvc.Spec.VolumeMode != nil {
w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode)
}
if len(pvc.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tLastProbeTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range pvc.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastProbeTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus,
resolverFn EnvVarResolverFunc, w PrefixWriter, space string) {
statuses := map[string]api.ContainerStatus{}
for _, status := range containerStatuses {
statuses[status.Name] = status
}
describeContainersLabel(containers, label, space, w)
for _, container := range containers {
status, ok := statuses[container.Name]
describeContainerBasicInfo(container, status, ok, space, w)
describeContainerCommand(container, w)
if ok {
describeContainerState(status, w)
}
describeContainerResource(container, w)
describeContainerProbe(container, w)
if len(container.EnvFrom) > 0 {
describeContainerEnvFrom(container, resolverFn, w)
}
describeContainerEnvVars(container, resolverFn, w)
describeContainerVolumes(container, w)
}
}
func describeContainersLabel(containers []api.Container, label, space string, w PrefixWriter) {
none := ""
if len(containers) == 0 {
none = " <none>"
}
w.Write(LEVEL_0, "%s%s:%s\n", space, label, none)
}
func describeContainerBasicInfo(container api.Container, status api.ContainerStatus, ok bool, space string, w PrefixWriter) {
nameIndent := ""
if len(space) > 0 {
nameIndent = " "
}
w.Write(LEVEL_1, "%s%v:\n", nameIndent, container.Name)
if ok {
w.Write(LEVEL_2, "Container ID:\t%s\n", status.ContainerID)
}
w.Write(LEVEL_2, "Image:\t%s\n", container.Image)
if ok {
w.Write(LEVEL_2, "Image ID:\t%s\n", status.ImageID)
}
portString := describeContainerPorts(container.Ports)
if strings.Contains(portString, ",") {
w.Write(LEVEL_2, "Ports:\t%s\n", portString)
} else {
w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString))
}
hostPortString := describeContainerHostPorts(container.Ports)
if strings.Contains(hostPortString, ",") {
w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString)
} else {
w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString))
}
}
func describeContainerPorts(cPorts []api.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerHostPorts(cPorts []api.ContainerPort) string {
ports := make([]string, 0, len(cPorts))
for _, cPort := range cPorts {
ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol))
}
return strings.Join(ports, ", ")
}
func describeContainerCommand(container api.Container, w PrefixWriter) {
if len(container.Command) > 0 {
w.Write(LEVEL_2, "Command:\n")
for _, c := range container.Command {
w.Write(LEVEL_3, "%s\n", c)
}
}
if len(container.Args) > 0 {
w.Write(LEVEL_2, "Args:\n")
for _, arg := range container.Args {
w.Write(LEVEL_3, "%s\n", arg)
}
}
}
func describeContainerResource(container api.Container, w PrefixWriter) {
resources := container.Resources
if len(resources.Limits) > 0 {
w.Write(LEVEL_2, "Limits:\n")
}
for _, name := range SortedResourceNames(resources.Limits) {
quantity := resources.Limits[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
if len(resources.Requests) > 0 {
w.Write(LEVEL_2, "Requests:\n")
}
for _, name := range SortedResourceNames(resources.Requests) {
quantity := resources.Requests[name]
w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String())
}
}
func describeContainerState(status api.ContainerStatus, w PrefixWriter) {
describeStatus("State", status.State, w)
if status.LastTerminationState.Terminated != nil {
describeStatus("Last State", status.LastTerminationState, w)
}
w.Write(LEVEL_2, "Ready:\t%v\n", printBool(status.Ready))
w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount)
}
func describeContainerProbe(container api.Container, w PrefixWriter) {
if container.LivenessProbe != nil {
probe := DescribeProbe(container.LivenessProbe)
w.Write(LEVEL_2, "Liveness:\t%s\n", probe)
}
if container.ReadinessProbe != nil {
probe := DescribeProbe(container.ReadinessProbe)
w.Write(LEVEL_2, "Readiness:\t%s\n", probe)
}
}
func describeContainerVolumes(container api.Container, w PrefixWriter) {
// Show volumeMounts
none := ""
if len(container.VolumeMounts) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Mounts:%s\n", none)
sort.Sort(SortableVolumeMounts(container.VolumeMounts))
for _, mount := range container.VolumeMounts {
flags := []string{}
switch {
case mount.ReadOnly:
flags = append(flags, "ro")
case !mount.ReadOnly:
flags = append(flags, "rw")
case len(mount.SubPath) > 0:
flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath))
}
w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ","))
}
// Show volumeDevices if exists
if len(container.VolumeDevices) > 0 {
w.Write(LEVEL_2, "Devices:%s\n", none)
sort.Sort(SortableVolumeDevices(container.VolumeDevices))
for _, device := range container.VolumeDevices {
w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name)
}
}
}
func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.Env) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment:%s\n", none)
for _, e := range container.Env {
if e.ValueFrom == nil {
w.Write(LEVEL_3, "%s:\t%s\n", e.Name, e.Value)
continue
}
switch {
case e.ValueFrom.FieldRef != nil:
var valueFrom string
if resolverFn != nil {
valueFrom = resolverFn(e)
}
w.Write(LEVEL_3, "%s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath)
case e.ValueFrom.ResourceFieldRef != nil:
valueFrom, err := resourcehelper.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container)
if err != nil {
valueFrom = ""
}
resource := e.ValueFrom.ResourceFieldRef.Resource
if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") {
valueFrom = "node allocatable"
}
w.Write(LEVEL_3, "%s:\t%s (%s)\n", e.Name, valueFrom, resource)
case e.ValueFrom.SecretKeyRef != nil:
optional := e.ValueFrom.SecretKeyRef.Optional != nil && *e.ValueFrom.SecretKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' in secret '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name, optional)
case e.ValueFrom.ConfigMapKeyRef != nil:
optional := e.ValueFrom.ConfigMapKeyRef.Optional != nil && *e.ValueFrom.ConfigMapKeyRef.Optional
w.Write(LEVEL_3, "%s:\t<set to the key '%s' of config map '%s'>\tOptional: %t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name, optional)
}
}
}
func describeContainerEnvFrom(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) {
none := ""
if len(container.EnvFrom) == 0 {
none = "\t<none>"
}
w.Write(LEVEL_2, "Environment Variables from:%s\n", none)
for _, e := range container.EnvFrom {
from := ""
name := ""
optional := false
if e.ConfigMapRef != nil {
from = "ConfigMap"
name = e.ConfigMapRef.Name
optional = e.ConfigMapRef.Optional != nil && *e.ConfigMapRef.Optional
} else if e.SecretRef != nil {
from = "Secret"
name = e.SecretRef.Name
optional = e.SecretRef.Optional != nil && *e.SecretRef.Optional
}
if len(e.Prefix) == 0 {
w.Write(LEVEL_3, "%s\t%s\tOptional: %t\n", name, from, optional)
} else {
w.Write(LEVEL_3, "%s\t%s with prefix '%s'\tOptional: %t\n", name, from, e.Prefix, optional)
}
}
}
// DescribeProbe is exported for consumers in other API groups that have probes
func DescribeProbe(probe *api.Probe) string {
attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold)
switch {
case probe.Exec != nil:
return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs)
case probe.HTTPGet != nil:
url := &url.URL{}
url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme))
if len(probe.HTTPGet.Port.String()) > 0 {
url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String())
} else {
url.Host = probe.HTTPGet.Host
}
url.Path = probe.HTTPGet.Path
return fmt.Sprintf("http-get %s %s", url.String(), attrs)
case probe.TCPSocket != nil:
return fmt.Sprintf("tcp-socket %s:%s %s", probe.TCPSocket.Host, probe.TCPSocket.Port.String(), attrs)
}
return fmt.Sprintf("unknown %s", attrs)
}
type EnvVarResolverFunc func(e api.EnvVar) string
// EnvValueFrom is exported for use by describers in other packages
func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc {
return func(e api.EnvVar) string {
internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "")
if err != nil {
return "" // pod validation should catch this on create
}
valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
if err != nil {
return "" // pod validation should catch this on create
}
return valueFrom
}
}
func describeStatus(stateName string, state api.ContainerState, w PrefixWriter) {
switch {
case state.Running != nil:
w.Write(LEVEL_2, "%s:\tRunning\n", stateName)
w.Write(LEVEL_3, "Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z))
case state.Waiting != nil:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
if state.Waiting.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Waiting.Reason)
}
case state.Terminated != nil:
w.Write(LEVEL_2, "%s:\tTerminated\n", stateName)
if state.Terminated.Reason != "" {
w.Write(LEVEL_3, "Reason:\t%s\n", state.Terminated.Reason)
}
if state.Terminated.Message != "" {
w.Write(LEVEL_3, "Message:\t%s\n", state.Terminated.Message)
}
w.Write(LEVEL_3, "Exit Code:\t%d\n", state.Terminated.ExitCode)
if state.Terminated.Signal > 0 {
w.Write(LEVEL_3, "Signal:\t%d\n", state.Terminated.Signal)
}
w.Write(LEVEL_3, "Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z))
w.Write(LEVEL_3, "Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z))
default:
w.Write(LEVEL_2, "%s:\tWaiting\n", stateName)
}
}
func describeVolumeClaimTemplates(templates []api.PersistentVolumeClaim, w PrefixWriter) {
if len(templates) == 0 {
w.Write(LEVEL_0, "Volume Claims:\t<none>\n")
return
}
w.Write(LEVEL_0, "Volume Claims:\n")
for _, pvc := range templates {
w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name)
w.Write(LEVEL_1, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(&pvc))
printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString())
printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString())
if capacity, ok := pvc.Spec.Resources.Requests[api.ResourceStorage]; ok {
w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String())
} else {
w.Write(LEVEL_1, "Capacity:\t%s\n", "<default>")
}
w.Write(LEVEL_1, "Access Modes:\t%s\n", pvc.Spec.AccessModes)
}
}
func printBoolPtr(value *bool) string {
if value != nil {
return printBool(*value)
}
return "<unset>"
}
func printBool(value bool) string {
if value {
return "True"
}
return "False"
}
// ReplicationControllerDescriber generates information about a replication controller
// and the pods it has created.
type ReplicationControllerDescriber struct {
clientset.Interface
}
func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
rc := d.Core().ReplicationControllers(namespace)
pc := d.Core().Pods(namespace)
controller, err := rc.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID)
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, controller)
}
return describeReplicationController(controller, events, running, waiting, succeeded, failed)
}
func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", controller.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", controller.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector))
printLabelsMultiline(w, "Labels", controller.Labels)
printAnnotationsMultiline(w, "Annotations", controller.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(controller.Spec.Template, w)
if len(controller.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range controller.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func DescribePodTemplate(template *api.PodTemplateSpec, w PrefixWriter) {
w.Write(LEVEL_0, "Pod Template:\n")
if template == nil {
w.Write(LEVEL_1, "<unset>")
return
}
printLabelsMultiline(w, " Labels", template.Labels)
if len(template.Annotations) > 0 {
printAnnotationsMultiline(w, " Annotations", template.Annotations)
}
if len(template.Spec.ServiceAccountName) > 0 {
w.Write(LEVEL_1, "Service Account:\t%s\n", template.Spec.ServiceAccountName)
}
if len(template.Spec.InitContainers) > 0 {
describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, w, " ")
}
describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ")
describeVolumes(template.Spec.Volumes, w, " ")
}
// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created.
type ReplicaSetDescriber struct {
clientset.Interface
}
func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
rsc := d.Extensions().ReplicaSets(namespace)
pc := d.Core().Pods(namespace)
rs, err := rsc.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID)
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, rs)
}
return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr)
}
func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", rs.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", rs.Namespace)
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(rs.Spec.Selector))
printLabelsMultiline(w, "Labels", rs.Labels)
printAnnotationsMultiline(w, "Annotations", rs.Annotations)
if controlledBy := printController(rs); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas)
w.Write(LEVEL_0, "Pods Status:\t")
if getPodErr != nil {
w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr)
} else {
w.Write(LEVEL_0, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
}
DescribePodTemplate(&rs.Spec.Template, w)
if len(rs.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range rs.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// JobDescriber generates information about a job and the pods it has created.
type JobDescriber struct {
clientset.Interface
}
func (d *JobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, job)
}
return describeJob(job, events)
}
func describeJob(job *batch.Job, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", job.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", job.Namespace)
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
printLabelsMultiline(w, "Labels", job.Labels)
printAnnotationsMultiline(w, "Annotations", job.Annotations)
if controlledBy := printController(job); len(controlledBy) > 0 {
w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy)
}
w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism)
if job.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *job.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if job.Status.StartTime != nil {
w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z))
}
if job.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds)
}
w.Write(LEVEL_0, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed)
DescribePodTemplate(&job.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// CronJobDescriber generates information about a cron job and the jobs it has created.
type CronJobDescriber struct {
clientset.Interface
external externalclient.Interface
}
func (d *CronJobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
cronJob, err := d.external.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, cronJob)
}
internalCronJob := &batch.CronJob{}
if err := legacyscheme.Scheme.Convert(cronJob, internalCronJob, nil); err != nil {
return "", err
}
return describeCronJob(internalCronJob, events)
}
func describeCronJob(cronJob *batch.CronJob, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace)
printLabelsMultiline(w, "Labels", cronJob.Labels)
printAnnotationsMultiline(w, "Annotations", cronJob.Annotations)
w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule)
w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy)
w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend))
if cronJob.Spec.StartingDeadlineSeconds != nil {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds)
} else {
w.Write(LEVEL_0, "Starting Deadline Seconds:\t<unset>\n")
}
describeJobTemplate(cronJob.Spec.JobTemplate, w)
if cronJob.Status.LastScheduleTime != nil {
w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z))
} else {
w.Write(LEVEL_0, "Last Schedule Time:\t<unset>\n")
}
printActiveJobs(w, "Active Jobs", cronJob.Status.Active)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeJobTemplate(jobTemplate batch.JobTemplateSpec, w PrefixWriter) {
if jobTemplate.Spec.Selector != nil {
selector, _ := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector)
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
if jobTemplate.Spec.Parallelism != nil {
w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism)
} else {
w.Write(LEVEL_0, "Parallelism:\t<unset>\n")
}
if jobTemplate.Spec.Completions != nil {
w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions)
} else {
w.Write(LEVEL_0, "Completions:\t<unset>\n")
}
if jobTemplate.Spec.ActiveDeadlineSeconds != nil {
w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds)
}
DescribePodTemplate(&jobTemplate.Spec.Template, w)
}
func printActiveJobs(w PrefixWriter, title string, jobs []api.ObjectReference) {
w.Write(LEVEL_0, "%s:\t", title)
if len(jobs) == 0 {
w.WriteLine("<none>")
return
}
for i, job := range jobs {
if i != 0 {
w.Write(LEVEL_0, ", ")
}
w.Write(LEVEL_0, "%s", job.Name)
}
w.WriteLine("")
}
// DaemonSetDescriber generates information about a daemon set and the pods it has created.
type DaemonSetDescriber struct {
clientset.Interface
}
func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
dc := d.Extensions().DaemonSets(namespace)
pc := d.Core().Pods(namespace)
daemon, err := dc.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID)
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, daemon)
}
return describeDaemonSet(daemon, events, running, waiting, succeeded, failed)
}
func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name)
selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
if err != nil {
// this shouldn't happen if LabelSelector passed validation
return err
}
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector))
printLabelsMultiline(w, "Labels", daemon.Labels)
printAnnotationsMultiline(w, "Annotations", daemon.Annotations)
w.Write(LEVEL_0, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled)
w.Write(LEVEL_0, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Up-to-date Pods: %d\n", daemon.Status.UpdatedNumberScheduled)
w.Write(LEVEL_0, "Number of Nodes Scheduled with Available Pods: %d\n", daemon.Status.NumberAvailable)
w.Write(LEVEL_0, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&daemon.Spec.Template, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// SecretDescriber generates information about a secret
type SecretDescriber struct {
clientset.Interface
}
func (d *SecretDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().Secrets(namespace)
secret, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeSecret(secret)
}
func describeSecret(secret *api.Secret) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", secret.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace)
printLabelsMultiline(w, "Labels", secret.Labels)
skipAnnotations := sets.NewString(api.LastAppliedConfigAnnotation)
printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations)
w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range secret.Data {
switch {
case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken:
w.Write(LEVEL_0, "%s:\t%s\n", k, string(v))
default:
w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v))
}
}
return nil
})
}
type IngressDescriber struct {
clientset.Interface
}
func (i *IngressDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := i.Extensions().Ingresses(namespace)
ing, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return i.describeIngress(ing, describerSettings)
}
func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string {
endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{})
service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{})
spName := ""
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
switch backend.ServicePort.Type {
case intstr.String:
if backend.ServicePort.StrVal == sp.Name {
spName = sp.Name
}
case intstr.Int:
if int32(backend.ServicePort.IntVal) == sp.Port {
spName = sp.Name
}
}
}
return formatEndpoints(endpoints, sets.NewString(spName))
}
func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings printers.DescriberSettings) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%v\n", ing.Name)
w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace)
w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true))
def := ing.Spec.Backend
ns := ing.Namespace
if def == nil {
// Ingresses that don't specify a default backend inherit the
// default backend in the kube-system namespace.
def = &extensions.IngressBackend{
ServiceName: "default-http-backend",
ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}
ns = metav1.NamespaceSystem
}
w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def))
if len(ing.Spec.TLS) != 0 {
describeIngressTLS(w, ing.Spec.TLS)
}
w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n")
w.Write(LEVEL_1, "----\t----\t--------\n")
count := 0
for _, rules := range ing.Spec.Rules {
if rules.HTTP == nil {
continue
}
count++
host := rules.Host
if len(host) == 0 {
host = "*"
}
w.Write(LEVEL_1, "%s\t\n", host)
for _, path := range rules.HTTP.Paths {
w.Write(LEVEL_2, "\t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ns, &path.Backend))
}
}
if count == 0 {
w.Write(LEVEL_1, "%s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def))
}
describeIngressAnnotations(w, ing.Annotations)
if describerSettings.ShowEvents {
events, _ := i.Core().Events(ing.Namespace).Search(legacyscheme.Scheme, ing)
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
func describeIngressTLS(w PrefixWriter, ingTLS []extensions.IngressTLS) {
w.Write(LEVEL_0, "TLS:\n")
for _, t := range ingTLS {
if t.SecretName == "" {
w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ","))
} else {
w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ","))
}
}
return
}
// TODO: Move from annotations into Ingress status.
func describeIngressAnnotations(w PrefixWriter, annotations map[string]string) {
w.Write(LEVEL_0, "Annotations:\n")
for k, v := range annotations {
w.Write(LEVEL_1, "%v:\t%s\n", k, v)
}
return
}
// ServiceDescriber generates information about a service.
type ServiceDescriber struct {
clientset.Interface
}
func (d *ServiceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().Services(namespace)
service, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{})
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, service)
}
return describeService(service, endpoints, events)
}
func buildIngressString(ingress []api.LoadBalancerIngress) string {
var buffer bytes.Buffer
for i := range ingress {
if i != 0 {
buffer.WriteString(", ")
}
if ingress[i].IP != "" {
buffer.WriteString(ingress[i].IP)
} else {
buffer.WriteString(ingress[i].Hostname)
}
}
return buffer.String()
}
func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) {
if endpoints == nil {
endpoints = &api.Endpoints{}
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", service.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", service.Namespace)
printLabelsMultiline(w, "Labels", service.Labels)
printAnnotationsMultiline(w, "Annotations", service.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector))
w.Write(LEVEL_0, "Type:\t%s\n", service.Spec.Type)
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.ClusterIP)
if len(service.Spec.ExternalIPs) > 0 {
w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ","))
}
if service.Spec.LoadBalancerIP != "" {
w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP)
}
if service.Spec.ExternalName != "" {
w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName)
}
if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress)
w.Write(LEVEL_0, "LoadBalancer Ingress:\t%s\n", list)
}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
name := sp.Name
if name == "" {
name = "<unset>"
}
w.Write(LEVEL_0, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol)
if sp.TargetPort.Type == intstr.Type(intstr.Int) {
w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol)
} else {
w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol)
}
if sp.NodePort != 0 {
w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol)
}
w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name)))
}
w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity)
if service.Spec.ExternalTrafficPolicy != "" {
w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy)
}
if service.Spec.HealthCheckNodePort != 0 {
w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort)
}
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
w.Write(LEVEL_0, "LoadBalancer Source Ranges:\t%v\n", strings.Join(service.Spec.LoadBalancerSourceRanges, ","))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// EndpointsDescriber generates information about an Endpoint.
type EndpointsDescriber struct {
clientset.Interface
}
func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().Endpoints(namespace)
ep, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ep)
}
return describeEndpoints(ep, events)
}
func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ep.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ep.Namespace)
printLabelsMultiline(w, "Labels", ep.Labels)
printAnnotationsMultiline(w, "Annotations", ep.Annotations)
w.Write(LEVEL_0, "Subsets:\n")
for i := range ep.Subsets {
subset := &ep.Subsets[i]
addresses := make([]string, 0, len(subset.Addresses))
for _, addr := range subset.Addresses {
addresses = append(addresses, addr.IP)
}
addressesString := strings.Join(addresses, ",")
if len(addressesString) == 0 {
addressesString = "<none>"
}
w.Write(LEVEL_1, "Addresses:\t%s\n", addressesString)
notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses))
for _, addr := range subset.NotReadyAddresses {
notReadyAddresses = append(notReadyAddresses, addr.IP)
}
notReadyAddressesString := strings.Join(notReadyAddresses, ",")
if len(notReadyAddressesString) == 0 {
notReadyAddressesString = "<none>"
}
w.Write(LEVEL_1, "NotReadyAddresses:\t%s\n", notReadyAddressesString)
if len(subset.Ports) > 0 {
w.Write(LEVEL_1, "Ports:\n")
w.Write(LEVEL_2, "Name\tPort\tProtocol\n")
w.Write(LEVEL_2, "----\t----\t--------\n")
for _, port := range subset.Ports {
name := port.Name
if len(name) == 0 {
name = "<unset>"
}
w.Write(LEVEL_2, "%s\t%d\t%s\n", name, port.Port, port.Protocol)
}
}
w.Write(LEVEL_0, "\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// ServiceAccountDescriber generates information about a service.
type ServiceAccountDescriber struct {
clientset.Interface
}
func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().ServiceAccounts(namespace)
serviceAccount, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
tokens := []api.Secret{}
// missingSecrets is the set of all secrets present in the
// serviceAccount but not present in the set of existing secrets.
missingSecrets := sets.NewString()
secrets, err := d.Core().Secrets(namespace).List(metav1.ListOptions{})
// errors are tolerated here in order to describe the serviceAccount with all
// of the secrets that it references, even if those secrets cannot be fetched.
if err == nil {
// existingSecrets is the set of all secrets remaining on a
// service account that are not present in the "tokens" slice.
existingSecrets := sets.NewString()
for _, s := range secrets.Items {
if s.Type == api.SecretTypeServiceAccountToken {
name, _ := s.Annotations[api.ServiceAccountNameKey]
uid, _ := s.Annotations[api.ServiceAccountUIDKey]
if name == serviceAccount.Name && uid == string(serviceAccount.UID) {
tokens = append(tokens, s)
}
}
existingSecrets.Insert(s.Name)
}
for _, s := range serviceAccount.Secrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
for _, s := range serviceAccount.ImagePullSecrets {
if !existingSecrets.Has(s.Name) {
missingSecrets.Insert(s.Name)
}
}
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, serviceAccount)
}
return describeServiceAccount(serviceAccount, tokens, missingSecrets, events)
}
func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace)
printLabelsMultiline(w, "Labels", serviceAccount.Labels)
printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations)
var (
emptyHeader = " "
pullHeader = "Image pull secrets:"
mountHeader = "Mountable secrets: "
tokenHeader = "Tokens: "
pullSecretNames = []string{}
mountSecretNames = []string{}
tokenSecretNames = []string{}
)
for _, s := range serviceAccount.ImagePullSecrets {
pullSecretNames = append(pullSecretNames, s.Name)
}
for _, s := range serviceAccount.Secrets {
mountSecretNames = append(mountSecretNames, s.Name)
}
for _, s := range tokens {
tokenSecretNames = append(tokenSecretNames, s.Name)
}
types := map[string][]string{
pullHeader: pullSecretNames,
mountHeader: mountSecretNames,
tokenHeader: tokenSecretNames,
}
for _, header := range sets.StringKeySet(types).List() {
names := types[header]
if len(names) == 0 {
w.Write(LEVEL_0, "%s\t<none>\n", header)
} else {
prefix := header
for _, name := range names {
if missingSecrets.Has(name) {
w.Write(LEVEL_0, "%s\t%s (not found)\n", prefix, name)
} else {
w.Write(LEVEL_0, "%s\t%s\n", prefix, name)
}
prefix = emptyHeader
}
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// RoleDescriber generates information about a node.
type RoleDescriber struct {
clientset.Interface
}
func (d *RoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
role, err := d.Rbac().Roles(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbac.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...)
}
compactRules, err := validation.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", combineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
// ClusterRoleDescriber generates information about a node.
type ClusterRoleDescriber struct {
clientset.Interface
}
func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
role, err := d.Rbac().ClusterRoles().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
breakdownRules := []rbac.PolicyRule{}
for _, rule := range role.Rules {
breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...)
}
compactRules, err := validation.CompactRules(breakdownRules)
if err != nil {
return "", err
}
sort.Stable(rbac.SortableRuleSlice(compactRules))
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", role.Name)
printLabelsMultiline(w, "Labels", role.Labels)
printAnnotationsMultiline(w, "Annotations", role.Annotations)
w.Write(LEVEL_0, "PolicyRule:\n")
w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n")
w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n")
for _, r := range compactRules {
w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", combineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs)
}
return nil
})
}
func combineResourceGroup(resource, group []string) string {
if len(resource) == 0 {
return ""
}
parts := strings.SplitN(resource[0], "/", 2)
combine := parts[0]
if len(group) > 0 && group[0] != "" {
combine = combine + "." + group[0]
}
if len(parts) == 2 {
combine = combine + "/" + parts[1]
}
return combine
}
// RoleBindingDescriber generates information about a node.
type RoleBindingDescriber struct {
clientset.Interface
}
func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
binding, err := d.Rbac().RoleBindings(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// ClusterRoleBindingDescriber generates information about a node.
type ClusterRoleBindingDescriber struct {
clientset.Interface
}
func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
binding, err := d.Rbac().ClusterRoleBindings().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", binding.Name)
printLabelsMultiline(w, "Labels", binding.Labels)
printAnnotationsMultiline(w, "Annotations", binding.Annotations)
w.Write(LEVEL_0, "Role:\n")
w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind)
w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name)
w.Write(LEVEL_0, "Subjects:\n")
w.Write(LEVEL_1, "Kind\tName\tNamespace\n")
w.Write(LEVEL_1, "----\t----\t---------\n")
for _, s := range binding.Subjects {
w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace)
}
return nil
})
}
// NodeDescriber generates information about a node.
type NodeDescriber struct {
clientset.Interface
}
func (d *NodeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
mc := d.Core().Nodes()
node, err := mc.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
if err != nil {
return "", err
}
// in a policy aware setting, users may have access to a node, but not all pods
// in that case, we note that the user does not have access to the pods
canViewPods := true
nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
if !errors.IsForbidden(err) {
return "", err
}
canViewPods = false
}
var events *api.EventList
if describerSettings.ShowEvents {
if ref, err := ref.GetReference(legacyscheme.Scheme, node); err != nil {
glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
} else {
// TODO: We haven't decided the namespace for Node object yet.
ref.UID = types.UID(ref.Name)
events, _ = d.Core().Events("").Search(legacyscheme.Scheme, ref)
}
}
return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods)
}
func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", node.Name)
if roles := findNodeRoles(node); len(roles) > 0 {
w.Write(LEVEL_0, "Roles:\t%s\n", strings.Join(roles, ","))
} else {
w.Write(LEVEL_0, "Roles:\t%s\n", "<none>")
}
printLabelsMultiline(w, "Labels", node.Labels)
printAnnotationsMultiline(w, "Annotations", node.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
printNodeTaintsMultiline(w, "Taints", node.Spec.Taints)
w.Write(LEVEL_0, "Unschedulable:\t%v\n", node.Spec.Unschedulable)
if len(node.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n")
for _, c := range node.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
c.Status,
c.LastHeartbeatTime.Time.Format(time.RFC1123Z),
c.LastTransitionTime.Time.Format(time.RFC1123Z),
c.Reason,
c.Message)
}
}
w.Write(LEVEL_0, "Addresses:\n")
for _, address := range node.Status.Addresses {
w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address)
}
printResourceList := func(resourceList api.ResourceList) {
resources := make([]api.ResourceName, 0, len(resourceList))
for resource := range resourceList {
resources = append(resources, resource)
}
sort.Sort(SortableResourceNames(resources))
for _, resource := range resources {
value := resourceList[resource]
w.Write(LEVEL_0, " %s:\t%s\n", resource, value.String())
}
}
if len(node.Status.Capacity) > 0 {
w.Write(LEVEL_0, "Capacity:\n")
printResourceList(node.Status.Capacity)
}
if len(node.Status.Allocatable) > 0 {
w.Write(LEVEL_0, "Allocatable:\n")
printResourceList(node.Status.Allocatable)
}
w.Write(LEVEL_0, "System Info:\n")
w.Write(LEVEL_0, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID)
w.Write(LEVEL_0, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID)
w.Write(LEVEL_0, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID)
w.Write(LEVEL_0, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion)
w.Write(LEVEL_0, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage)
w.Write(LEVEL_0, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem)
w.Write(LEVEL_0, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture)
w.Write(LEVEL_0, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion)
w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion)
w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion)
if len(node.Spec.PodCIDR) > 0 {
w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR)
}
if len(node.Spec.ProviderID) > 0 {
w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID)
}
if canViewPods && nodeNonTerminatedPodsList != nil {
describeNodeResource(nodeNonTerminatedPodsList, node, w)
} else {
w.Write(LEVEL_0, "Pods:\tnot authorized\n")
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type StatefulSetDescriber struct {
client clientset.Interface
}
func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
pc := p.client.Core().Pods(namespace)
selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
if err != nil {
return "", err
}
running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID)
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, ps)
}
return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed)
}
func describeStatefulSet(ps *apps.StatefulSet, selector labels.Selector, events *api.EventList, running, waiting, succeeded, failed int) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", ps.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
printLabelsMultiline(w, "Labels", ps.Labels)
printAnnotationsMultiline(w, "Annotations", ps.Annotations)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d total\n", ps.Spec.Replicas, ps.Status.Replicas)
w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
DescribePodTemplate(&ps.Spec.Template, w)
describeVolumeClaimTemplates(ps.Spec.VolumeClaimTemplates, w)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type CertificateSigningRequestDescriber struct {
client clientset.Interface
}
func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
cr, err := certificates.ParseCSR(csr)
if err != nil {
return "", fmt.Errorf("Error parsing CSR: %v", err)
}
status, err := extractCSRStatus(csr)
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, csr)
}
return describeCertificateSigningRequest(csr, cr, status, events)
}
func describeCertificateSigningRequest(csr *certificates.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *api.EventList) (string, error) {
printListHelper := func(w PrefixWriter, prefix, name string, values []string) {
if len(values) == 0 {
return
}
w.Write(LEVEL_0, prefix+name+":\t")
w.Write(LEVEL_0, strings.Join(values, "\n"+prefix+"\t"))
w.Write(LEVEL_0, "\n")
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", csr.Name)
w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(csr.Labels))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations))
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Requesting User:\t%s\n", csr.Spec.Username)
w.Write(LEVEL_0, "Status:\t%s\n", status)
w.Write(LEVEL_0, "Subject:\n")
w.Write(LEVEL_0, "\tCommon Name:\t%s\n", cr.Subject.CommonName)
w.Write(LEVEL_0, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber)
printListHelper(w, "\t", "Organization", cr.Subject.Organization)
printListHelper(w, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit)
printListHelper(w, "\t", "Country", cr.Subject.Country)
printListHelper(w, "\t", "Locality", cr.Subject.Locality)
printListHelper(w, "\t", "Province", cr.Subject.Province)
printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress)
printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode)
if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses) > 0 {
w.Write(LEVEL_0, "Subject Alternative Names:\n")
printListHelper(w, "\t", "DNS Names", cr.DNSNames)
printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses)
var ipaddrs []string
for _, ipaddr := range cr.IPAddresses {
ipaddrs = append(ipaddrs, ipaddr.String())
}
printListHelper(w, "\t", "IP Addresses", ipaddrs)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler.
type HorizontalPodAutoscalerDescriber struct {
client clientset.Interface
}
func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = d.client.Core().Events(namespace).Search(legacyscheme.Scheme, hpa)
}
return describeHorizontalPodAutoscaler(hpa, events, d)
}
func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, events *api.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace)
printLabelsMultiline(w, "Labels", hpa.Labels)
printAnnotationsMultiline(w, "Annotations", hpa.Annotations)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z))
w.Write(LEVEL_0, "Reference:\t%s/%s\n",
hpa.Spec.ScaleTargetRef.Kind,
hpa.Spec.ScaleTargetRef.Name)
w.Write(LEVEL_0, "Metrics:\t( current / target )\n")
for i, metric := range hpa.Spec.Metrics {
switch metric.Type {
case autoscaling.ExternalMetricSourceType:
if metric.External.TargetAverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil &&
hpa.Status.CurrentMetrics[i].External.CurrentAverageValue != nil {
current = hpa.Status.CurrentMetrics[i].External.CurrentAverageValue.String()
}
w.Write(LEVEL_1, "%q (target average value):\t%s / %s\n", metric.External.MetricName, current, metric.External.TargetAverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil {
current = hpa.Status.CurrentMetrics[i].External.CurrentValue.String()
}
w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.MetricName, current, metric.External.TargetValue.String())
}
case autoscaling.PodsMetricSourceType:
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil {
current = hpa.Status.CurrentMetrics[i].Pods.CurrentAverageValue.String()
}
w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.MetricName, current, metric.Pods.TargetAverageValue.String())
case autoscaling.ObjectMetricSourceType:
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil {
current = hpa.Status.CurrentMetrics[i].Object.CurrentValue.String()
}
w.Write(LEVEL_1, "%q on %s/%s:\t%s / %s\n", metric.Object.MetricName, metric.Object.Target.Kind, metric.Object.Target.Name, current, metric.Object.TargetValue.String())
case autoscaling.ResourceMetricSourceType:
w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name))
if metric.Resource.TargetAverageValue != nil {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil {
current = hpa.Status.CurrentMetrics[i].Resource.CurrentAverageValue.String()
}
w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.Resource.TargetAverageValue.String())
} else {
current := "<unknown>"
if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil && hpa.Status.CurrentMetrics[i].Resource.CurrentAverageUtilization != nil {
current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].Resource.CurrentAverageUtilization, hpa.Status.CurrentMetrics[i].Resource.CurrentAverageValue.String())
}
target := "<auto>"
if metric.Resource.TargetAverageUtilization != nil {
target = fmt.Sprintf("%d%%", *metric.Resource.TargetAverageUtilization)
}
w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target)
}
default:
w.Write(LEVEL_1, "<unknown metric type %q>", string(metric.Type))
}
}
minReplicas := "<unset>"
if hpa.Spec.MinReplicas != nil {
minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas)
}
w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas)
w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas)
w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind)
w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas)
if len(hpa.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n")
w.Write(LEVEL_1, "Type\tStatus\tReason\tMessage\n")
w.Write(LEVEL_1, "----\t------\t------\t-------\n")
for _, c := range hpa.Status.Conditions {
w.Write(LEVEL_1, "%v\t%v\t%v\t%v\n", c.Type, c.Status, c.Reason, c.Message)
}
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) {
w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items))
w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n")
w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\n")
allocatable := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
allocatable = node.Status.Allocatable
}
for _, pod := range nodeNonTerminatedPodsList.Items {
req, limit := resourcehelper.PodRequestsAndLimits(&pod)
cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory]
fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100
w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name,
cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit),
memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit))
}
w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n")
w.Write(LEVEL_1, "Resource\tRequests\tLimits\n")
w.Write(LEVEL_1, "--------\t--------\t------\n")
reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList)
cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory]
fractionCpuReqs := float64(0)
fractionCpuLimits := float64(0)
if allocatable.Cpu().MilliValue() != 0 {
fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
}
fractionMemoryReqs := float64(0)
fractionMemoryLimits := float64(0)
if allocatable.Memory().Value() != 0 {
fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100
fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100
}
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
api.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits))
w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n",
api.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
extResources := make([]string, 0, len(allocatable))
for resource := range allocatable {
if !helper.IsStandardContainerResourceName(string(resource)) && resource != api.ResourcePods {
extResources = append(extResources, string(resource))
}
}
sort.Strings(extResources)
for _, ext := range extResources {
extRequests, extLimits := reqs[api.ResourceName(ext)], limits[api.ResourceName(ext)]
w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String())
}
}
func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity) {
reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{}
for _, pod := range podList.Items {
podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod)
for podReqName, podReqValue := range podReqs {
if value, ok := reqs[podReqName]; !ok {
reqs[podReqName] = *podReqValue.Copy()
} else {
value.Add(podReqValue)
reqs[podReqName] = value
}
}
for podLimitName, podLimitValue := range podLimits {
if value, ok := limits[podLimitName]; !ok {
limits[podLimitName] = *podLimitValue.Copy()
} else {
value.Add(podLimitValue)
limits[podLimitName] = value
}
}
}
return
}
func DescribeEvents(el *api.EventList, w PrefixWriter) {
if len(el.Items) == 0 {
w.Write(LEVEL_0, "Events:\t<none>\n")
return
}
w.Flush()
sort.Sort(events.SortableEvents(el.Items))
w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n")
w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n")
for _, e := range el.Items {
var interval string
if e.Count > 1 {
interval = fmt.Sprintf("%s (x%d over %s)", translateTimestamp(e.LastTimestamp), e.Count, translateTimestamp(e.FirstTimestamp))
} else {
interval = translateTimestamp(e.FirstTimestamp)
}
w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
e.Type,
e.Reason,
interval,
formatEventSource(e.Source),
strings.TrimSpace(e.Message),
)
}
}
// DeploymentDescriber generates information about a deployment.
type DeploymentDescriber struct {
clientset.Interface
external externalclient.Interface
}
func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
d, err := dd.external.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return "", err
}
internalDeployment := &extensions.Deployment{}
if err := legacyscheme.Scheme.Convert(d, internalDeployment, extensions.SchemeGroupVersion); err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = dd.Core().Events(namespace).Search(legacyscheme.Scheme, d)
}
return describeDeployment(d, selector, internalDeployment, events, dd)
}
func describeDeployment(d *versionedextension.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", d.ObjectMeta.Namespace)
w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z))
printLabelsMultiline(w, "Labels", d.Labels)
printAnnotationsMultiline(w, "Annotations", d.Annotations)
w.Write(LEVEL_0, "Selector:\t%s\n", selector)
w.Write(LEVEL_0, "Replicas:\t%d desired | %d updated | %d total | %d available | %d unavailable\n", *(d.Spec.Replicas), d.Status.UpdatedReplicas, d.Status.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas)
w.Write(LEVEL_0, "StrategyType:\t%s\n", d.Spec.Strategy.Type)
w.Write(LEVEL_0, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds)
if d.Spec.Strategy.RollingUpdate != nil {
ru := d.Spec.Strategy.RollingUpdate
w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String())
}
DescribePodTemplate(&internalDeployment.Spec.Template, w)
if len(d.Status.Conditions) > 0 {
w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n")
w.Write(LEVEL_1, "----\t------\t------\n")
for _, c := range d.Status.Conditions {
w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason)
}
}
oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.ExtensionsV1beta1())
if err == nil {
w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs))
var newRSs []*versionedextension.ReplicaSet
if newRS != nil {
newRSs = append(newRSs, newRS)
}
w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs))
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
func printReplicaSetsByLabels(matchingRSs []*versionedextension.ReplicaSet) string {
// Format the matching ReplicaSets into strings.
rsStrings := make([]string, 0, len(matchingRSs))
for _, rs := range matchingRSs {
rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, *rs.Spec.Replicas))
}
list := strings.Join(rsStrings, ", ")
if list == "" {
return "<none>"
}
return list
}
func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) {
options := metav1.ListOptions{LabelSelector: selector.String()}
rcPods, err := c.List(options)
if err != nil {
return
}
for _, pod := range rcPods.Items {
controllerRef := metav1.GetControllerOf(&pod)
// Skip pods that are orphans or owned by other controllers.
if controllerRef == nil || controllerRef.UID != uid {
continue
}
switch pod.Status.Phase {
case api.PodRunning:
running++
case api.PodPending:
waiting++
case api.PodSucceeded:
succeeded++
case api.PodFailed:
failed++
}
}
return
}
// ConfigMapDescriber generates information about a ConfigMap
type ConfigMapDescriber struct {
clientset.Interface
}
func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Core().ConfigMaps(namespace)
configMap, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", configMap.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", configMap.Namespace)
printLabelsMultiline(w, "Labels", configMap.Labels)
printAnnotationsMultiline(w, "Annotations", configMap.Annotations)
w.Write(LEVEL_0, "\nData\n====\n")
for k, v := range configMap.Data {
w.Write(LEVEL_0, "%s:\n----\n", k)
w.Write(LEVEL_0, "%s\n", string(v))
}
if describerSettings.ShowEvents {
events, err := d.Core().Events(namespace).Search(legacyscheme.Scheme, configMap)
if err != nil {
return err
}
if events != nil {
DescribeEvents(events, w)
}
}
return nil
})
}
// NetworkPolicyDescriber generates information about a networking.NetworkPolicy
type NetworkPolicyDescriber struct {
clientset.Interface
}
func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
c := d.Networking().NetworkPolicies(namespace)
networkPolicy, err := c.Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describeNetworkPolicy(networkPolicy)
}
func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace)
w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp)
printLabelsMultiline(w, "Labels", networkPolicy.Labels)
printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations)
describeNetworkPolicySpec(networkPolicy.Spec, w)
return nil
})
}
func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) {
w.Write(LEVEL_0, "Spec:\n")
w.Write(LEVEL_1, "PodSelector: ")
if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 {
w.Write(LEVEL_2, "<none> (Allowing the specific traffic to all pods in this namespace)\n")
} else {
w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector))
}
w.Write(LEVEL_1, "Allowing ingress traffic:\n")
printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w)
w.Write(LEVEL_1, "Allowing egress traffic:\n")
printNetworkPolicySpecEgressTo(nps.Egress, " ", w)
w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes))
}
func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) {
if len(npirs) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for ingress connectivity)")
return
}
for i, npir := range npirs {
if len(npir.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range npir.Ports {
var proto api.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = api.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(npir.From) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: <any> (traffic not restricted by source)")
} else {
for _, from := range npir.From {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "From:")
if from.PodSelector != nil && from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector))
} else if from.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector))
} else if from.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, from.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(from.IPBlock.Except, ", "))
}
}
}
if i != len(npirs)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) {
if len(npers) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "<none> (Selected pods are isolated for egress connectivity)")
return
}
for i, nper := range npers {
if len(nper.Ports) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: <any> (traffic allowed to all ports)")
} else {
for _, port := range nper.Ports {
var proto api.Protocol
if port.Protocol != nil {
proto = *port.Protocol
} else {
proto = api.ProtocolTCP
}
w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto)
}
}
if len(nper.To) == 0 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: <any> (traffic not restricted by source)")
} else {
for _, to := range nper.To {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "To:")
if to.PodSelector != nil && to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.PodSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector))
} else if to.NamespaceSelector != nil {
w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector))
} else if to.IPBlock != nil {
w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent)
w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, to.IPBlock.CIDR)
w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(to.IPBlock.Except, ", "))
}
}
}
if i != len(npers)-1 {
w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------")
}
}
}
type StorageClassDescriber struct {
clientset.Interface
}
func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, sc)
}
return describeStorageClass(sc, events)
}
func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", sc.Name)
w.Write(LEVEL_0, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta))
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations))
w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner)
w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters))
w.Write(LEVEL_0, "AllowVolumeExpansion:\t%s\n", printBoolPtr(sc.AllowVolumeExpansion))
if len(sc.MountOptions) == 0 {
w.Write(LEVEL_0, "MountOptions:\t<none>\n")
} else {
w.Write(LEVEL_0, "MountOptions:\n")
for _, option := range sc.MountOptions {
w.Write(LEVEL_1, "%s\n", option)
}
}
if sc.ReclaimPolicy != nil {
w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy)
}
if sc.VolumeBindingMode != nil {
w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode)
}
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
type PodDisruptionBudgetDescriber struct {
clientset.Interface
}
func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = p.Core().Events(namespace).Search(legacyscheme.Scheme, pdb)
}
return describePodDisruptionBudget(pdb, events)
}
func describePodDisruptionBudget(pdb *policy.PodDisruptionBudget, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name)
w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace)
if pdb.Spec.MinAvailable != nil {
w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String())
} else if pdb.Spec.MaxUnavailable != nil {
w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String())
}
if pdb.Spec.Selector != nil {
w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector))
} else {
w.Write(LEVEL_0, "Selector:\t<unset>\n")
}
w.Write(LEVEL_0, "Status:\n")
w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.PodDisruptionsAllowed)
w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy)
w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy)
w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods)
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PriorityClassDescriber generates information about a PriorityClass.
type PriorityClassDescriber struct {
clientset.Interface
}
func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
pc, err := s.Scheduling().PriorityClasses().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
var events *api.EventList
if describerSettings.ShowEvents {
events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, pc)
}
return describePriorityClass(pc, events)
}
func describePriorityClass(pc *scheduling.PriorityClass, events *api.EventList) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", pc.Name)
w.Write(LEVEL_0, "Value:\t%v\n", pc.Value)
w.Write(LEVEL_0, "GlobalDefault:\t%v\n", pc.GlobalDefault)
w.Write(LEVEL_0, "Description:\t%s\n", pc.Description)
w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(pc.Annotations))
if events != nil {
DescribeEvents(events, w)
}
return nil
})
}
// PodSecurityPolicyDescriber generates information about a PodSecurityPolicy.
type PodSecurityPolicyDescriber struct {
clientset.Interface
}
func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) {
psp, err := d.Policy().PodSecurityPolicies().Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
return describePodSecurityPolicy(psp)
}
func describePodSecurityPolicy(psp *policy.PodSecurityPolicy) (string, error) {
return tabbedString(func(out io.Writer) error {
w := NewPrefixWriter(out)
w.Write(LEVEL_0, "Name:\t%s\n", psp.Name)
w.Write(LEVEL_0, "\nSettings:\n")
w.Write(LEVEL_1, "Allow Privileged:\t%t\n", psp.Spec.Privileged)
w.Write(LEVEL_1, "Allow Privilege Escalation:\t%v\n", psp.Spec.AllowPrivilegeEscalation)
w.Write(LEVEL_1, "Default Add Capabilities:\t%v\n", capsToString(psp.Spec.DefaultAddCapabilities))
w.Write(LEVEL_1, "Required Drop Capabilities:\t%s\n", capsToString(psp.Spec.RequiredDropCapabilities))
w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities))
w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes))
if len(psp.Spec.AllowedFlexVolumes) > 0 {
w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes))
}
w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork)
w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts))
w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID)
w.Write(LEVEL_1, "Allow Host IPC:\t%t\n", psp.Spec.HostIPC)
w.Write(LEVEL_1, "Read Only Root Filesystem:\t%v\n", psp.Spec.ReadOnlyRootFilesystem)
w.Write(LEVEL_1, "SELinux Context Strategy: %s\t\n", string(psp.Spec.SELinux.Rule))
var user, role, seLinuxType, level string
if psp.Spec.SELinux.SELinuxOptions != nil {
user = psp.Spec.SELinux.SELinuxOptions.User
role = psp.Spec.SELinux.SELinuxOptions.Role
seLinuxType = psp.Spec.SELinux.SELinuxOptions.Type
level = psp.Spec.SELinux.SELinuxOptions.Level
}
w.Write(LEVEL_2, "User:\t%s\n", stringOrNone(user))
w.Write(LEVEL_2, "Role:\t%s\n", stringOrNone(role))
w.Write(LEVEL_2, "Type:\t%s\n", stringOrNone(seLinuxType))
w.Write(LEVEL_2, "Level:\t%s\n", stringOrNone(level))
w.Write(LEVEL_1, "Run As User Strategy: %s\t\n", string(psp.Spec.RunAsUser.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", userIDRangeToString(psp.Spec.RunAsUser.Ranges))
w.Write(LEVEL_1, "FSGroup Strategy: %s\t\n", string(psp.Spec.FSGroup.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", groupIDRangeToString(psp.Spec.FSGroup.Ranges))
w.Write(LEVEL_1, "Supplemental Groups Strategy: %s\t\n", string(psp.Spec.SupplementalGroups.Rule))
w.Write(LEVEL_2, "Ranges:\t%s\n", groupIDRangeToString(psp.Spec.SupplementalGroups.Ranges))
return nil
})
}
func stringOrNone(s string) string {
return stringOrDefaultValue(s, "<none>")
}
func stringOrDefaultValue(s, defaultValue string) string {
if len(s) > 0 {
return s
}
return defaultValue
}
func fsTypeToString(volumes []policy.FSType) string {
strVolumes := []string{}
for _, v := range volumes {
strVolumes = append(strVolumes, string(v))
}
return stringOrNone(strings.Join(strVolumes, ","))
}
func flexVolumesToString(flexVolumes []policy.AllowedFlexVolume) string {
volumes := []string{}
for _, flexVolume := range flexVolumes {
volumes = append(volumes, "driver="+flexVolume.Driver)
}
return stringOrDefaultValue(strings.Join(volumes, ","), "<all>")
}
func hostPortRangeToString(ranges []policy.HostPortRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func userIDRangeToString(ranges []policy.UserIDRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func groupIDRangeToString(ranges []policy.GroupIDRange) string {
formattedString := ""
if ranges != nil {
strRanges := []string{}
for _, r := range ranges {
strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max))
}
formattedString = strings.Join(strRanges, ",")
}
return stringOrNone(formattedString)
}
func capsToString(caps []api.Capability) string {
formattedString := ""
if caps != nil {
strCaps := []string{}
for _, c := range caps {
strCaps = append(strCaps, string(c))
}
formattedString = strings.Join(strCaps, ",")
}
return stringOrNone(formattedString)
}
func policyTypesToString(pts []networking.PolicyType) string {
formattedString := ""
if pts != nil {
strPts := []string{}
for _, p := range pts {
strPts = append(strPts, string(p))
}
formattedString = strings.Join(strPts, ", ")
}
return stringOrNone(formattedString)
}
// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types.
func newErrNoDescriber(types ...reflect.Type) error {
names := make([]string, 0, len(types))
for _, t := range types {
names = append(names, t.String())
}
return printers.ErrNoDescriber{Types: names}
}
// Describers implements ObjectDescriber against functions registered via Add. Those functions can
// be strongly typed. Types are exactly matched (no conversion or assignable checks).
type Describers struct {
searchFns map[reflect.Type][]typeFunc
}
// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string,
// if at least one describer function has been registered with the exact types passed, or if any
// describer can print the exact object in its first argument (the remainder will be provided empty
// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will
// be returned
// TODO: reorder and partial match extra.
func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) {
exactType := reflect.TypeOf(exact)
fns, ok := d.searchFns[exactType]
if !ok {
return "", newErrNoDescriber(exactType)
}
if len(extra) == 0 {
for _, typeFn := range fns {
if len(typeFn.Extra) == 0 {
return typeFn.Describe(exact, extra...)
}
}
typeFn := fns[0]
for _, t := range typeFn.Extra {
v := reflect.New(t).Elem()
extra = append(extra, v.Interface())
}
return fns[0].Describe(exact, extra...)
}
types := make([]reflect.Type, 0, len(extra))
for _, obj := range extra {
types = append(types, reflect.TypeOf(obj))
}
for _, typeFn := range fns {
if typeFn.Matches(types) {
return typeFn.Describe(exact, extra...)
}
}
return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...)
}
// Add adds one or more describer functions to the printers.Describer. The passed function must
// match the signature:
//
// func(...) (string, error)
//
// Any number of arguments may be provided.
func (d *Describers) Add(fns ...interface{}) error {
for _, fn := range fns {
fv := reflect.ValueOf(fn)
ft := fv.Type()
if ft.Kind() != reflect.Func {
return fmt.Errorf("expected func, got: %v", ft)
}
numIn := ft.NumIn()
if numIn == 0 {
return fmt.Errorf("expected at least one 'in' params, got: %v", ft)
}
if ft.NumOut() != 2 {
return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft)
}
types := make([]reflect.Type, 0, numIn)
for i := 0; i < numIn; i++ {
types = append(types, ft.In(i))
}
if ft.Out(0) != reflect.TypeOf(string("")) {
return fmt.Errorf("expected string return, got: %v", ft)
}
var forErrorType error
// This convolution is necessary, otherwise TypeOf picks up on the fact
// that forErrorType is nil.
errorType := reflect.TypeOf(&forErrorType).Elem()
if ft.Out(1) != errorType {
return fmt.Errorf("expected error return, got: %v", ft)
}
exact := types[0]
extra := types[1:]
if d.searchFns == nil {
d.searchFns = make(map[reflect.Type][]typeFunc)
}
fns := d.searchFns[exact]
fn := typeFunc{Extra: extra, Fn: fv}
fns = append(fns, fn)
d.searchFns[exact] = fns
}
return nil
}
// typeFunc holds information about a describer function and the types it accepts
type typeFunc struct {
Extra []reflect.Type
Fn reflect.Value
}
// Matches returns true when the passed types exactly match the Extra list.
func (fn typeFunc) Matches(types []reflect.Type) bool {
if len(fn.Extra) != len(types) {
return false
}
// reorder the items in array types and fn.Extra
// convert the type into string and sort them, check if they are matched
varMap := make(map[reflect.Type]bool)
for i := range fn.Extra {
varMap[fn.Extra[i]] = true
}
for i := range types {
if _, found := varMap[types[i]]; !found {
return false
}
}
return true
}
// Describe invokes the nested function with the exact number of arguments.
func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) {
values := []reflect.Value{reflect.ValueOf(exact)}
for _, obj := range extra {
values = append(values, reflect.ValueOf(obj))
}
out := fn.Fn.Call(values)
s := out[0].Interface().(string)
var err error
if !out[1].IsNil() {
err = out[1].Interface().(error)
}
return s, err
}
// printLabelsMultiline prints multiple labels with a proper alignment.
func printLabelsMultiline(w PrefixWriter, title string, labels map[string]string) {
printLabelsMultilineWithIndent(w, "", title, "\t", labels, sets.NewString())
}
// printLabelsMultiline prints multiple labels with a user-defined alignment.
func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, labels map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if labels == nil || len(labels) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(labels))
for key := range labels {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(keys) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s=%s\n", key, labels[key])
i++
}
}
// printTaintsMultiline prints multiple taints with a proper alignment.
func printNodeTaintsMultiline(w PrefixWriter, title string, taints []api.Taint) {
printTaintsMultilineWithIndent(w, "", title, "\t", taints)
}
// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment.
func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []api.Taint) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if taints == nil || len(taints) == 0 {
w.WriteLine("<none>")
return
}
// to print taints in the sorted order
sort.Slice(taints, func(i, j int) bool {
cmpKey := func(taint api.Taint) string {
return string(taint.Effect) + "," + taint.Key
}
return cmpKey(taints[i]) < cmpKey(taints[j])
})
for i, taint := range taints {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s\n", taint.ToString())
}
}
// printPodTolerationsMultiline prints multiple tolerations with a proper alignment.
func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []api.Toleration) {
printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations)
}
// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment.
func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []api.Toleration) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if tolerations == nil || len(tolerations) == 0 {
w.WriteLine("<none>")
return
}
// to print tolerations in the sorted order
sort.Slice(tolerations, func(i, j int) bool {
return tolerations[i].Key < tolerations[j].Key
})
for i, toleration := range tolerations {
if i != 0 {
w.Write(LEVEL_0, "%s", initialIndent)
w.Write(LEVEL_0, "%s", innerIndent)
}
w.Write(LEVEL_0, "%s", toleration.Key)
if len(toleration.Value) != 0 {
w.Write(LEVEL_0, "=%s", toleration.Value)
}
if len(toleration.Effect) != 0 {
w.Write(LEVEL_0, ":%s", toleration.Effect)
}
if toleration.TolerationSeconds != nil {
w.Write(LEVEL_0, " for %ds", *toleration.TolerationSeconds)
}
w.Write(LEVEL_0, "\n")
}
}
type flusher interface {
Flush()
}
func tabbedString(f func(io.Writer) error) (string, error) {
out := new(tabwriter.Writer)
buf := &bytes.Buffer{}
out.Init(buf, 0, 8, 2, ' ', 0)
err := f(out)
if err != nil {
return "", err
}
out.Flush()
str := string(buf.String())
return str, nil
}
type SortableResourceNames []api.ResourceName
func (list SortableResourceNames) Len() int {
return len(list)
}
func (list SortableResourceNames) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceNames) Less(i, j int) bool {
return list[i] < list[j]
}
// SortedResourceNames returns the sorted resource names of a resource list.
func SortedResourceNames(list api.ResourceList) []api.ResourceName {
resources := make([]api.ResourceName, 0, len(list))
for res := range list {
resources = append(resources, res)
}
sort.Sort(SortableResourceNames(resources))
return resources
}
type SortableResourceQuotas []api.ResourceQuota
func (list SortableResourceQuotas) Len() int {
return len(list)
}
func (list SortableResourceQuotas) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableResourceQuotas) Less(i, j int) bool {
return list[i].Name < list[j].Name
}
type SortableVolumeMounts []api.VolumeMount
func (list SortableVolumeMounts) Len() int {
return len(list)
}
func (list SortableVolumeMounts) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeMounts) Less(i, j int) bool {
return list[i].MountPath < list[j].MountPath
}
type SortableVolumeDevices []api.VolumeDevice
func (list SortableVolumeDevices) Len() int {
return len(list)
}
func (list SortableVolumeDevices) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
func (list SortableVolumeDevices) Less(i, j int) bool {
return list[i].DevicePath < list[j].DevicePath
}
var maxAnnotationLen = 200
// printAnnotationsMultilineWithFilter prints filtered multiple annotations with a proper alignment.
func printAnnotationsMultilineWithFilter(w PrefixWriter, title string, annotations map[string]string, skip sets.String) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, skip)
}
// printAnnotationsMultiline prints multiple annotations with a proper alignment.
func printAnnotationsMultiline(w PrefixWriter, title string, annotations map[string]string) {
printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, sets.NewString())
}
// printAnnotationsMultilineWithIndent prints multiple annotations with a user-defined alignment.
// If annotation string is too long, we omit chars more than 200 length.
func printAnnotationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, annotations map[string]string, skip sets.String) {
w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent)
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
// to print labels in the sorted order
keys := make([]string, 0, len(annotations))
for key := range annotations {
if skip.Has(key) {
continue
}
keys = append(keys, key)
}
if len(annotations) == 0 {
w.WriteLine("<none>")
return
}
sort.Strings(keys)
for i, key := range keys {
if i != 0 {
w.Write(LEVEL_0, initialIndent)
w.Write(LEVEL_0, innerIndent)
}
line := fmt.Sprintf("%s=%s", key, annotations[key])
if len(line) > maxAnnotationLen {
w.Write(LEVEL_0, "%s...\n", line[:maxAnnotationLen])
} else {
w.Write(LEVEL_0, "%s\n", line)
}
i++
}
}
| apache-2.0 |
raphaelning/resteasy-client-android | jaxrs/examples/oreilly-jaxrs-2.0-workbook/ex14_1/src/main/java/com/restfully/shop/services/ShoppingApplication.java | 200 | package com.restfully.shop.services;
import javax.ws.rs.ApplicationPath;
import javax.ws.rs.core.Application;
@ApplicationPath("/services")
public class ShoppingApplication extends Application
{
}
| apache-2.0 |
foxish/kubernetes | pkg/cloudprovider/providers/azure/azure_test.go | 93179 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
)
var testClusterName = "testCluster"
func TestParseConfig(t *testing.T) {
azureConfig := `{
"aadClientCertPassword": "aadClientCertPassword",
"aadClientCertPath": "aadClientCertPath",
"aadClientId": "aadClientId",
"aadClientSecret": "aadClientSecret",
"cloud":"AzurePublicCloud",
"cloudProviderBackoff": true,
"cloudProviderBackoffDuration": 1,
"cloudProviderBackoffExponent": 1,
"cloudProviderBackoffJitter": 1,
"cloudProviderBackoffRetries": 1,
"cloudProviderRatelimit": true,
"cloudProviderRateLimitBucket": 1,
"CloudProviderRateLimitBucketWrite": 1,
"cloudProviderRateLimitQPS": 1,
"CloudProviderRateLimitQPSWrite": 1,
"location": "location",
"maximumLoadBalancerRuleCount": 1,
"primaryAvailabilitySetName": "primaryAvailabilitySetName",
"primaryScaleSetName": "primaryScaleSetName",
"resourceGroup": "resourceGroup",
"routeTableName": "routeTableName",
"securityGroupName": "securityGroupName",
"subnetName": "subnetName",
"subscriptionId": "subscriptionId",
"tenantId": "tenantId",
"useInstanceMetadata": true,
"useManagedIdentityExtension": true,
"vnetName": "vnetName",
"vnetResourceGroup": "vnetResourceGroup",
vmType: "standard"
}`
expected := &Config{
AzureAuthConfig: auth.AzureAuthConfig{
AADClientCertPassword: "aadClientCertPassword",
AADClientCertPath: "aadClientCertPath",
AADClientID: "aadClientId",
AADClientSecret: "aadClientSecret",
Cloud: "AzurePublicCloud",
SubscriptionID: "subscriptionId",
TenantID: "tenantId",
UseManagedIdentityExtension: true,
},
CloudProviderBackoff: true,
CloudProviderBackoffDuration: 1,
CloudProviderBackoffExponent: 1,
CloudProviderBackoffJitter: 1,
CloudProviderBackoffRetries: 1,
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 1,
CloudProviderRateLimitBucketWrite: 1,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
Location: "location",
MaximumLoadBalancerRuleCount: 1,
PrimaryAvailabilitySetName: "primaryAvailabilitySetName",
PrimaryScaleSetName: "primaryScaleSetName",
ResourceGroup: "resourceGroup",
RouteTableName: "routeTableName",
SecurityGroupName: "securityGroupName",
SubnetName: "subnetName",
UseInstanceMetadata: true,
VMType: "standard",
VnetName: "vnetName",
VnetResourceGroup: "vnetResourceGroup",
}
buffer := bytes.NewBufferString(azureConfig)
config, err := parseConfig(buffer)
assert.NoError(t, err)
assert.Equal(t, expected, config)
}
// Test flipServiceInternalAnnotation
func TestFlipServiceInternalAnnotation(t *testing.T) {
svc := getTestService("servicea", v1.ProtocolTCP, 80)
svcUpdated := flipServiceInternalAnnotation(&svc)
if !requiresInternalLoadBalancer(svcUpdated) {
t.Errorf("Expected svc to be an internal service")
}
svcUpdated = flipServiceInternalAnnotation(svcUpdated)
if requiresInternalLoadBalancer(svcUpdated) {
t.Errorf("Expected svc to be an external service")
}
svc2 := getInternalTestService("serviceb", 8081)
svc2Updated := flipServiceInternalAnnotation(&svc2)
if requiresInternalLoadBalancer(svc2Updated) {
t.Errorf("Expected svc to be an external service")
}
svc2Updated = flipServiceInternalAnnotation(svc2Updated)
if !requiresInternalLoadBalancer(svc2Updated) {
t.Errorf("Expected svc to be an internal service")
}
}
// Test additional of a new service/port.
func TestAddPort(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80)
clusterResources := getClusterResources(az, 1, 1)
svc.Spec.Ports = append(svc.Spec.Ports, v1.ServicePort{
Name: fmt.Sprintf("port-udp-%d", 1234),
Protocol: v1.ProtocolUDP,
Port: 1234,
NodePort: getBackendPort(1234),
})
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
// ensure we got a frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have a frontend ip configuration")
}
validateLoadBalancer(t, lb, svc)
}
func TestLoadBalancerInternalServiceModeSelection(t *testing.T) {
testLoadBalancerServiceDefaultModeSelection(t, true)
testLoadBalancerServiceAutoModeSelection(t, true)
testLoadBalancerServicesSpecifiedSelection(t, true)
testLoadBalancerMaxRulesServices(t, true)
testLoadBalancerServiceAutoModeDeleteSelection(t, true)
}
func TestLoadBalancerExternalServiceModeSelection(t *testing.T) {
testLoadBalancerServiceDefaultModeSelection(t, false)
testLoadBalancerServiceAutoModeSelection(t, false)
testLoadBalancerServicesSpecifiedSelection(t, false)
testLoadBalancerMaxRulesServices(t, false)
testLoadBalancerServiceAutoModeDeleteSelection(t, false)
}
func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool) {
az := getTestCloud()
const vmCount = 8
const availabilitySetCount = 4
const serviceCount = 9
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
for index := 1; index <= serviceCount; index++ {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if lbStatus == nil {
t.Errorf("Unexpected error: %s", svcName)
}
expectedLBName := testClusterName
if isInternal {
expectedLBName = testClusterName + "-internal"
}
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
lb := result.Values()[0]
lbCount := len(result.Values())
expectedNumOfLB := 1
if lbCount != expectedNumOfLB {
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
}
if !strings.EqualFold(*lb.Name, expectedLBName) {
t.Errorf("lb name should be the default LB name Extected (%s) Fouund (%s)", expectedLBName, *lb.Name)
}
ruleCount := len(*lb.LoadBalancingRules)
if ruleCount != index {
t.Errorf("lb rule count should be equal to nuber of services deployed, expected (%d) Found (%d)", index, ruleCount)
}
}
}
// Validate even distribution of external services across load balancers
// based on number of availability sets
func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) {
az := getTestCloud()
const vmCount = 8
const availabilitySetCount = 4
const serviceCount = 9
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
for index := 1; index <= serviceCount; index++ {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
setLoadBalancerAutoModeAnnotation(&svc)
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if lbStatus == nil {
t.Errorf("Unexpected error: %s", svcName)
}
// expected is MIN(index, availabilitySetCount)
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
lbCount := len(result.Values())
if lbCount != expectedNumOfLB {
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
}
maxRules := 0
minRules := serviceCount
for _, lb := range result.Values() {
ruleCount := len(*lb.LoadBalancingRules)
if ruleCount < minRules {
minRules = ruleCount
}
if ruleCount > maxRules {
maxRules = ruleCount
}
}
delta := maxRules - minRules
if delta > 1 {
t.Errorf("Unexpected min or max rule in LB's in resource group: Service Index (%d) Min (%d) Max(%d)", index, minRules, maxRules)
}
}
}
// Validate availability set selection of services across load balancers
// based on provided availability sets through service annotation
// The scenario is that there are 4 availability sets in the agent pool but the
// services will be assigned load balancers that are part of the provided availability sets
// specified in service annotation
func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) {
az := getTestCloud()
const vmCount = 8
const availabilitySetCount = 4
const serviceCount = 9
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
selectedAvailabilitySetName1 := getAvailabilitySetName(az, 1, availabilitySetCount)
selectedAvailabilitySetName2 := getAvailabilitySetName(az, 2, availabilitySetCount)
for index := 1; index <= serviceCount; index++ {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
lbMode := fmt.Sprintf("%s,%s", selectedAvailabilitySetName1, selectedAvailabilitySetName2)
setLoadBalancerModeAnnotation(&svc, lbMode)
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if lbStatus == nil {
t.Errorf("Unexpected error: %s", svcName)
}
// expected is MIN(index, 2)
expectedNumOfLB := int(math.Min(float64(index), float64(2)))
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
lbCount := len(result.Values())
if lbCount != expectedNumOfLB {
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
}
}
}
func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
az := getTestCloud()
const vmCount = 1
const availabilitySetCount = 1
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
az.Config.MaximumLoadBalancerRuleCount = 1
for index := 1; index <= az.Config.MaximumLoadBalancerRuleCount; index++ {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if lbStatus == nil {
t.Errorf("Unexpected error: %s", svcName)
}
// expected is MIN(index, az.Config.MaximumLoadBalancerRuleCount)
expectedNumOfLBRules := int(math.Min(float64(index), float64(az.Config.MaximumLoadBalancerRuleCount)))
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
lbCount := len(result.Values())
if lbCount != expectedNumOfLBRules {
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLBRules, lbCount)
}
}
// validate adding a new service fails since it will exceed the max limit on LB
svcName := fmt.Sprintf("service-%d", az.Config.MaximumLoadBalancerRuleCount+1)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
_, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err == nil {
t.Errorf("Expect any new service to fail as max limit in lb has reached")
} else {
expectedErrMessageSubString := "all available load balancers have exceeded maximum rule limit"
if !strings.Contains(err.Error(), expectedErrMessageSubString) {
t.Errorf("Error message returned is not expected, expected sub string=%s, actual error message=%v", expectedErrMessageSubString, err)
}
}
}
// Validate service deletion in lb auto selection mode
func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal bool) {
az := getTestCloud()
const vmCount = 8
const availabilitySetCount = 4
const serviceCount = 9
clusterResources := getClusterResources(az, vmCount, availabilitySetCount)
getTestSecurityGroup(az)
for index := 1; index <= serviceCount; index++ {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
setLoadBalancerAutoModeAnnotation(&svc)
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if lbStatus == nil {
t.Errorf("Unexpected error: %s", svcName)
}
}
for index := serviceCount; index >= 1; index-- {
svcName := fmt.Sprintf("service-%d", index)
var svc v1.Service
if isInternal {
svc = getInternalTestService(svcName, 8081)
addTestSubnet(t, az, &svc)
} else {
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
}
setLoadBalancerAutoModeAnnotation(&svc)
// expected is MIN(index, availabilitySetCount)
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
lbCount := len(result.Values())
if lbCount != expectedNumOfLB {
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
}
err := az.EnsureLoadBalancerDeleted(context.TODO(), testClusterName, &svc)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
}
}
// Test addition of a new service on an internal LB with a subnet.
func TestReconcileLoadBalancerAddServiceOnInternalSubnet(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getInternalTestService("servicea", 80)
addTestSubnet(t, az, &svc)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
// ensure we got a frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have a frontend ip configuration")
}
validateLoadBalancer(t, lb, svc)
}
func TestReconcileSecurityGroupFromAnyDestinationAddressPrefixToLoadBalancerIP(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("serviceea", v1.ProtocolTCP, 80)
svc1.Spec.LoadBalancerIP = "192.168.0.0"
sg := getTestSecurityGroup(az)
// Simulate a pre-Kubernetes 1.8 NSG, where we do not specify the destination address prefix
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(""), true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc1)
}
func TestReconcileSecurityGroupDynamicLoadBalancerIP(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicea", v1.ProtocolTCP, 80)
svc1.Spec.LoadBalancerIP = ""
sg := getTestSecurityGroup(az)
dynamicallyAssignedIP := "192.168.0.0"
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(dynamicallyAssignedIP), true)
if err != nil {
t.Errorf("unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc1)
}
// Test addition of services on an internal LB using both default and explicit subnets.
func TestReconcileLoadBalancerAddServicesOnMultipleSubnets(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc1 := getTestService("service1", v1.ProtocolTCP, 8081)
svc2 := getInternalTestService("service2", 8081)
// Internal and External service cannot reside on the same LB resource
addTestSubnet(t, az, &svc2)
// svc1 is using LB without "-internal" suffix
lb, err := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling svc1: %q", err)
}
// ensure we got a frontend ip configuration for each service
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have 1 frontend ip configurations")
}
validateLoadBalancer(t, lb, svc1)
// svc2 is using LB with "-internal" suffix
lb, err = az.reconcileLoadBalancer(testClusterName, &svc2, nil, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling svc2: %q", err)
}
// ensure we got a frontend ip configuration for each service
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have 1 frontend ip configurations")
}
validateLoadBalancer(t, lb, svc2)
}
// Test moving a service exposure from one subnet to another.
func TestReconcileLoadBalancerEditServiceSubnet(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getInternalTestService("service1", 8081)
addTestSubnet(t, az, &svc)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling initial svc: %q", err)
}
validateLoadBalancer(t, lb, svc)
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = "NewSubnet"
addTestSubnet(t, az, &svc)
lb, err = az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling edits to svc: %q", err)
}
// ensure we got a frontend ip configuration for the service
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have 1 frontend ip configuration")
}
validateLoadBalancer(t, lb, svc)
}
func TestReconcileLoadBalancerNodeHealth(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getTestService("servicea", v1.ProtocolTCP, 80)
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
svc.Spec.HealthCheckNodePort = int32(32456)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
// ensure we got a frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 1 {
t.Error("Expected the loadbalancer to have a frontend ip configuration")
}
validateLoadBalancer(t, lb, svc)
}
// Test removing all services results in removing the frontend ip configuration
func TestReconcileLoadBalancerRemoveService(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
lb, err = az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, false /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
// ensure we abandoned the frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 0 {
t.Error("Expected the loadbalancer to have no frontend ip configuration")
}
validateLoadBalancer(t, lb)
}
// Test removing all service ports results in removing the frontend ip configuration
func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getTestService("servicea", v1.ProtocolTCP, 80)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateLoadBalancer(t, lb, svc)
svcUpdated := getTestService("servicea", v1.ProtocolTCP)
lb, err = az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, false /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
// ensure we abandoned the frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 0 {
t.Error("Expected the loadbalancer to have no frontend ip configuration")
}
validateLoadBalancer(t, lb, svcUpdated)
}
// Test removal of a port from an existing service.
func TestReconcileLoadBalancerRemovesPort(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80)
lb, err = az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateLoadBalancer(t, lb, svcUpdated)
}
// Test reconciliation of multiple services on same port
func TestReconcileLoadBalancerMultipleServices(t *testing.T) {
az := getTestCloud()
clusterResources := getClusterResources(az, 1, 1)
svc1 := getTestService("servicea", v1.ProtocolTCP, 80, 443)
svc2 := getTestService("serviceb", v1.ProtocolTCP, 80)
updatedLoadBalancer, err := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
updatedLoadBalancer, err = az.reconcileLoadBalancer(testClusterName, &svc2, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateLoadBalancer(t, updatedLoadBalancer, svc1, svc2)
}
func findLBRuleForPort(lbRules []network.LoadBalancingRule, port int32) (network.LoadBalancingRule, error) {
for _, lbRule := range lbRules {
if *lbRule.FrontendPort == port {
return lbRule, nil
}
}
return network.LoadBalancingRule{}, fmt.Errorf("Expected LB rule with port %d but none found", port)
}
func TestServiceDefaultsToNoSessionPersistence(t *testing.T) {
az := getTestCloud()
svc := getTestService("service-sa-omitted", v1.ProtocolTCP, 7170)
clusterResources := getClusterResources(az, 1, 1)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling svc1: %q", err)
}
validateLoadBalancer(t, lb, svc)
lbRule, err := findLBRuleForPort(*lb.LoadBalancingRules, 7170)
if err != nil {
t.Error(err)
}
if lbRule.LoadDistribution != network.Default {
t.Errorf("Expected LB rule to have default load distribution but was %s", lbRule.LoadDistribution)
}
}
func TestServiceRespectsNoSessionAffinity(t *testing.T) {
az := getTestCloud()
svc := getTestService("service-sa-none", v1.ProtocolTCP, 7170)
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
clusterResources := getClusterResources(az, 1, 1)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling svc1: %q", err)
}
validateLoadBalancer(t, lb, svc)
lbRule, err := findLBRuleForPort(*lb.LoadBalancingRules, 7170)
if err != nil {
t.Error(err)
}
if lbRule.LoadDistribution != network.Default {
t.Errorf("Expected LB rule to have default load distribution but was %s", lbRule.LoadDistribution)
}
}
func TestServiceRespectsClientIPSessionAffinity(t *testing.T) {
az := getTestCloud()
svc := getTestService("service-sa-clientip", v1.ProtocolTCP, 7170)
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
clusterResources := getClusterResources(az, 1, 1)
lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error reconciling svc1: %q", err)
}
validateLoadBalancer(t, lb, svc)
lbRule, err := findLBRuleForPort(*lb.LoadBalancingRules, 7170)
if err != nil {
t.Error(err)
}
if lbRule.LoadDistribution != network.SourceIP {
t.Errorf("Expected LB rule to have SourceIP load distribution but was %s", lbRule.LoadDistribution)
}
}
func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) {
az := getTestCloud()
getTestSecurityGroup(az)
svc1 := getTestService("servicea", v1.ProtocolTCP, 80)
clusterResources := getClusterResources(az, 1, 1)
lb, _ := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true)
lbStatus, _ := az.getServiceLoadBalancerStatus(&svc1, lb)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, &lbStatus.Ingress[0].IP, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc1)
}
func TestReconcileSecurityGroupNewInternalServiceAddsPort(t *testing.T) {
az := getTestCloud()
getTestSecurityGroup(az)
svc1 := getInternalTestService("serviceea", 80)
addTestSubnet(t, az, &svc1)
clusterResources := getClusterResources(az, 1, 1)
lb, _ := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true)
lbStatus, _ := az.getServiceLoadBalancerStatus(&svc1, lb)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, &lbStatus.Ingress[0].IP, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc1)
}
func TestReconcileSecurityGroupRemoveService(t *testing.T) {
az := getTestCloud()
service1 := getTestService("servicea", v1.ProtocolTCP, 81)
service2 := getTestService("serviceb", v1.ProtocolTCP, 82)
clusterResources := getClusterResources(az, 1, 1)
lb, _ := az.reconcileLoadBalancer(testClusterName, &service1, clusterResources.nodes, true)
az.reconcileLoadBalancer(testClusterName, &service2, clusterResources.nodes, true)
lbStatus, _ := az.getServiceLoadBalancerStatus(&service1, lb)
sg := getTestSecurityGroup(az, service1, service2)
validateSecurityGroup(t, sg, service1, service2)
sg, err := az.reconcileSecurityGroup(testClusterName, &service1, &lbStatus.Ingress[0].IP, false /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, service2)
}
func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
clusterResources := getClusterResources(az, 1, 1)
sg := getTestSecurityGroup(az, svc)
svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80)
lb, _ := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true)
lbStatus, _ := az.getServiceLoadBalancerStatus(&svc, lb)
sg, err := az.reconcileSecurityGroup(testClusterName, &svcUpdated, &lbStatus.Ingress[0].IP, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svcUpdated)
}
func TestReconcileSecurityWithSourceRanges(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
svc.Spec.LoadBalancerSourceRanges = []string{
"192.168.0.0/24",
"10.0.0.0/32",
}
clusterResources := getClusterResources(az, 1, 1)
sg := getTestSecurityGroup(az, svc)
lb, _ := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true)
lbStatus, _ := az.getServiceLoadBalancerStatus(&svc, lb)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc, &lbStatus.Ingress[0].IP, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc)
}
func TestReconcilePublicIPWithNewService(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
pip2, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip2, &svc, true)
if pip.Name != pip2.Name ||
pip.PublicIPAddressPropertiesFormat.IPAddress != pip2.PublicIPAddressPropertiesFormat.IPAddress {
t.Errorf("We should get the exact same public ip resource after a second reconcile")
}
}
func TestReconcilePublicIPRemoveService(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
// Remove the service
pip, err = az.reconcilePublicIP(testClusterName, &svc, false /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, false)
}
func TestReconcilePublicIPWithInternalService(t *testing.T) {
az := getTestCloud()
svc := getInternalTestService("servicea", 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
}
func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
az := getTestCloud()
svc := getInternalTestService("servicea", 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
// Update to external service
svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80)
pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svcUpdated, true)
// Update to internal service again
pip, err = az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
}
func getTestCloud() (az *Cloud) {
az = &Cloud{
Config: Config{
AzureAuthConfig: auth.AzureAuthConfig{
TenantID: "tenant",
SubscriptionID: "subscription",
},
ResourceGroup: "rg",
VnetResourceGroup: "rg",
Location: "westus",
VnetName: "vnet",
SubnetName: "subnet",
SecurityGroupName: "nsg",
RouteTableName: "rt",
PrimaryAvailabilitySetName: "as",
MaximumLoadBalancerRuleCount: 250,
VMType: vmTypeStandard,
},
}
az.DisksClient = newFakeDisksClient()
az.InterfacesClient = newFakeAzureInterfacesClient()
az.LoadBalancerClient = newFakeAzureLBClient()
az.PublicIPAddressesClient = newFakeAzurePIPClient(az.Config.SubscriptionID)
az.RoutesClient = newFakeRoutesClient()
az.RouteTablesClient = newFakeRouteTablesClient()
az.SecurityGroupsClient = newFakeAzureNSGClient()
az.SubnetsClient = newFakeAzureSubnetsClient()
az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient()
az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient()
az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient()
az.vmSet = newAvailabilitySet(az)
az.vmCache, _ = az.newVMCache()
az.lbCache, _ = az.newLBCache()
az.nsgCache, _ = az.newNSGCache()
az.rtCache, _ = az.newRouteTableCache()
return az
}
const networkInterfacesIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkInterfaces/%s"
const primaryIPConfigIDTemplate = "%s/ipConfigurations/ipconfig"
// returns the full identifier of Network Interface.
func getNetworkInterfaceID(subscriptionID string, resourceGroupName, nicName string) string {
return fmt.Sprintf(
networkInterfacesIDTemplate,
subscriptionID,
resourceGroupName,
nicName)
}
// returns the full identifier of a private ipconfig of the nic
func getPrimaryIPConfigID(nicID string) string {
return fmt.Sprintf(
primaryIPConfigIDTemplate,
nicID)
}
const TestResourceNameFormat = "%s-%d"
const TestVMResourceBaseName = "vm"
const TestASResourceBaseName = "as"
func getTestResourceName(resourceBaseName string, index int) string {
return fmt.Sprintf(TestResourceNameFormat, resourceBaseName, index)
}
func getVMName(vmIndex int) string {
return getTestResourceName(TestVMResourceBaseName, vmIndex)
}
func getAvailabilitySetName(az *Cloud, vmIndex int, numAS int) string {
asIndex := vmIndex % numAS
if asIndex == 0 {
return az.Config.PrimaryAvailabilitySetName
}
return getTestResourceName(TestASResourceBaseName, asIndex)
}
// test supporting on 1 nic per vm
// we really dont care about the name of the nic
// just using the vm name for testing purposes
func getNICName(vmIndex int) string {
return getVMName(vmIndex)
}
type ClusterResources struct {
nodes []*v1.Node
availabilitySetNames []string
}
func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clusterResources *ClusterResources) {
if vmCount < availabilitySetCount {
return nil
}
clusterResources = &ClusterResources{}
clusterResources.nodes = []*v1.Node{}
clusterResources.availabilitySetNames = []string{}
for vmIndex := 0; vmIndex < vmCount; vmIndex++ {
vmName := getVMName(vmIndex)
asName := getAvailabilitySetName(az, vmIndex, availabilitySetCount)
clusterResources.availabilitySetNames = append(clusterResources.availabilitySetNames, asName)
nicName := getNICName(vmIndex)
nicID := getNetworkInterfaceID(az.Config.SubscriptionID, az.Config.ResourceGroup, nicName)
primaryIPConfigID := getPrimaryIPConfigID(nicID)
isPrimary := true
newNIC := network.Interface{
ID: &nicID,
Name: &nicName,
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
ID: &primaryIPConfigID,
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddress: &nicName,
Primary: &isPrimary,
},
},
},
},
}
az.InterfacesClient.CreateOrUpdate(az.Config.ResourceGroup, nicName, newNIC, nil)
// create vm
asID := az.getAvailabilitySetID(asName)
newVM := compute.VirtualMachine{
Name: &vmName,
Location: &az.Config.Location,
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{
ID: &asID,
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: &nicID,
},
},
},
},
}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, vmName, newVM)
if err != nil {
}
// add to kubernetes
newNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: vmName,
Labels: map[string]string{
kubeletapis.LabelHostname: vmName,
},
},
}
clusterResources.nodes = append(clusterResources.nodes, newNode)
}
return clusterResources
}
func getBackendPort(port int32) int32 {
return port + 10000
}
func getTestService(identifier string, proto v1.Protocol, requestedPorts ...int32) v1.Service {
ports := []v1.ServicePort{}
for _, port := range requestedPorts {
ports = append(ports, v1.ServicePort{
Name: fmt.Sprintf("port-tcp-%d", port),
Protocol: proto,
Port: port,
NodePort: getBackendPort(port),
})
}
svc := v1.Service{
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeLoadBalancer,
Ports: ports,
},
}
svc.Name = identifier
svc.Namespace = "default"
svc.UID = types.UID(identifier)
svc.Annotations = make(map[string]string)
return svc
}
func getInternalTestService(identifier string, requestedPorts ...int32) v1.Service {
svc := getTestService(identifier, v1.ProtocolTCP, requestedPorts...)
svc.Annotations[ServiceAnnotationLoadBalancerInternal] = "true"
return svc
}
func setLoadBalancerModeAnnotation(service *v1.Service, lbMode string) {
service.Annotations[ServiceAnnotationLoadBalancerMode] = lbMode
}
func setLoadBalancerAutoModeAnnotation(service *v1.Service) {
setLoadBalancerModeAnnotation(service, ServiceAnnotationLoadBalancerAutoModeValue)
}
func getServiceSourceRanges(service *v1.Service) []string {
if len(service.Spec.LoadBalancerSourceRanges) == 0 {
if !requiresInternalLoadBalancer(service) {
return []string{"Internet"}
}
}
return service.Spec.LoadBalancerSourceRanges
}
func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGroup {
rules := []network.SecurityRule{}
for _, service := range services {
for _, port := range service.Spec.Ports {
sources := getServiceSourceRanges(&service)
for _, src := range sources {
ruleName := getSecurityRuleName(&service, port, src)
rules = append(rules, network.SecurityRule{
Name: to.StringPtr(ruleName),
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
SourceAddressPrefix: to.StringPtr(src),
DestinationPortRange: to.StringPtr(fmt.Sprintf("%d", port.Port)),
},
})
}
}
}
sg := network.SecurityGroup{
Name: &az.SecurityGroupName,
SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{
SecurityRules: &rules,
},
}
az.SecurityGroupsClient.CreateOrUpdate(
az.ResourceGroup,
az.SecurityGroupName,
sg,
nil)
return &sg
}
func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, services ...v1.Service) {
expectedRuleCount := 0
expectedFrontendIPCount := 0
expectedProbeCount := 0
expectedFrontendIPs := []ExpectedFrontendIPInfo{}
for _, svc := range services {
if len(svc.Spec.Ports) > 0 {
expectedFrontendIPCount++
expectedFrontendIP := ExpectedFrontendIPInfo{
Name: getFrontendIPConfigName(&svc, subnet(&svc)),
Subnet: subnet(&svc),
}
expectedFrontendIPs = append(expectedFrontendIPs, expectedFrontendIP)
}
for _, wantedRule := range svc.Spec.Ports {
expectedRuleCount++
wantedRuleName := getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
foundRule := false
for _, actualRule := range *loadBalancer.LoadBalancingRules {
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
*actualRule.FrontendPort == wantedRule.Port &&
*actualRule.BackendPort == wantedRule.Port {
foundRule = true
break
}
}
if !foundRule {
t.Errorf("Expected load balancer rule but didn't find it: %q", wantedRuleName)
}
// if UDP rule, there is no probe
if wantedRule.Protocol == v1.ProtocolUDP {
continue
}
expectedProbeCount++
foundProbe := false
if serviceapi.NeedsHealthCheck(&svc) {
path, port := serviceapi.GetServiceHealthCheckPathPort(&svc)
for _, actualProbe := range *loadBalancer.Probes {
if strings.EqualFold(*actualProbe.Name, wantedRuleName) &&
*actualProbe.Port == port &&
*actualProbe.RequestPath == path &&
actualProbe.Protocol == network.ProbeProtocolHTTP {
foundProbe = true
break
}
}
} else {
for _, actualProbe := range *loadBalancer.Probes {
if strings.EqualFold(*actualProbe.Name, wantedRuleName) &&
*actualProbe.Port == wantedRule.NodePort {
foundProbe = true
break
}
}
}
if !foundProbe {
for _, actualProbe := range *loadBalancer.Probes {
t.Logf("Probe: %s %d", *actualProbe.Name, *actualProbe.Port)
}
t.Errorf("Expected loadbalancer probe but didn't find it: %q", wantedRuleName)
}
}
}
frontendIPCount := len(*loadBalancer.FrontendIPConfigurations)
if frontendIPCount != expectedFrontendIPCount {
t.Errorf("Expected the loadbalancer to have %d frontend IPs. Found %d.\n%v", expectedFrontendIPCount, frontendIPCount, loadBalancer.FrontendIPConfigurations)
}
frontendIPs := *loadBalancer.FrontendIPConfigurations
for _, expectedFrontendIP := range expectedFrontendIPs {
if !expectedFrontendIP.existsIn(frontendIPs) {
t.Errorf("Expected the loadbalancer to have frontend IP %s/%s. Found %s", expectedFrontendIP.Name, to.String(expectedFrontendIP.Subnet), describeFIPs(frontendIPs))
}
}
lenRules := len(*loadBalancer.LoadBalancingRules)
if lenRules != expectedRuleCount {
t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n%v", expectedRuleCount, lenRules, loadBalancer.LoadBalancingRules)
}
lenProbes := len(*loadBalancer.Probes)
if lenProbes != expectedProbeCount {
t.Errorf("Expected the loadbalancer to have %d probes. Found %d.", expectedRuleCount, lenProbes)
}
}
type ExpectedFrontendIPInfo struct {
Name string
Subnet *string
}
func (expected ExpectedFrontendIPInfo) matches(frontendIP network.FrontendIPConfiguration) bool {
return strings.EqualFold(expected.Name, to.String(frontendIP.Name)) && strings.EqualFold(to.String(expected.Subnet), to.String(subnetName(frontendIP)))
}
func (expected ExpectedFrontendIPInfo) existsIn(frontendIPs []network.FrontendIPConfiguration) bool {
for _, fip := range frontendIPs {
if expected.matches(fip) {
return true
}
}
return false
}
func subnetName(frontendIP network.FrontendIPConfiguration) *string {
if frontendIP.Subnet != nil {
return frontendIP.Subnet.Name
}
return nil
}
func describeFIPs(frontendIPs []network.FrontendIPConfiguration) string {
description := ""
for _, actualFIP := range frontendIPs {
actualSubnetName := ""
if actualFIP.Subnet != nil {
actualSubnetName = to.String(actualFIP.Subnet.Name)
}
actualFIPText := fmt.Sprintf("%s/%s ", to.String(actualFIP.Name), actualSubnetName)
description = description + actualFIPText
}
return description
}
func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service *v1.Service, wantLb bool) {
isInternal := requiresInternalLoadBalancer(service)
if isInternal || !wantLb {
if publicIP != nil {
t.Errorf("Expected publicIP resource to be nil, when it is an internal service or doesn't want LB")
}
return
}
// For external service
if publicIP == nil {
t.Errorf("Expected publicIP resource exists, when it is not an internal service")
}
if publicIP.Tags == nil || publicIP.Tags["service"] == nil {
t.Errorf("Expected publicIP resource has tags[service]")
}
serviceName := getServiceName(service)
if serviceName != *(publicIP.Tags["service"]) {
t.Errorf("Expected publicIP resource has matching tags[service]")
}
// We cannot use service.Spec.LoadBalancerIP to compare with
// Public IP's IPAddress
// Because service properties are updated outside of cloudprovider code
}
func contains(ruleValues []string, targetValue string) bool {
for _, ruleValue := range ruleValues {
if strings.EqualFold(ruleValue, targetValue) {
return true
}
}
return false
}
func securityRuleMatches(serviceSourceRange string, servicePort v1.ServicePort, serviceIP string, securityRule network.SecurityRule) error {
ruleSource := securityRule.SourceAddressPrefixes
if ruleSource == nil || len(*ruleSource) == 0 {
if securityRule.SourceAddressPrefix == nil {
ruleSource = &[]string{}
} else {
ruleSource = &[]string{*securityRule.SourceAddressPrefix}
}
}
rulePorts := securityRule.DestinationPortRanges
if rulePorts == nil || len(*rulePorts) == 0 {
if securityRule.DestinationPortRange == nil {
rulePorts = &[]string{}
} else {
rulePorts = &[]string{*securityRule.DestinationPortRange}
}
}
ruleDestination := securityRule.DestinationAddressPrefixes
if ruleDestination == nil || len(*ruleDestination) == 0 {
if securityRule.DestinationAddressPrefix == nil {
ruleDestination = &[]string{}
} else {
ruleDestination = &[]string{*securityRule.DestinationAddressPrefix}
}
}
if !contains(*ruleSource, serviceSourceRange) {
return fmt.Errorf("Rule does not contain source %s", serviceSourceRange)
}
if !contains(*rulePorts, fmt.Sprintf("%d", servicePort.Port)) {
return fmt.Errorf("Rule does not contain port %d", servicePort.Port)
}
if serviceIP != "" && !contains(*ruleDestination, serviceIP) {
return fmt.Errorf("Rule does not contain destination %s", serviceIP)
}
return nil
}
func validateSecurityGroup(t *testing.T, securityGroup *network.SecurityGroup, services ...v1.Service) {
seenRules := make(map[string]string)
for _, svc := range services {
for _, wantedRule := range svc.Spec.Ports {
sources := getServiceSourceRanges(&svc)
for _, source := range sources {
wantedRuleName := getSecurityRuleName(&svc, wantedRule, source)
seenRules[wantedRuleName] = wantedRuleName
foundRule := false
for _, actualRule := range *securityGroup.SecurityRules {
if strings.EqualFold(*actualRule.Name, wantedRuleName) {
err := securityRuleMatches(source, wantedRule, svc.Spec.LoadBalancerIP, actualRule)
if err != nil {
t.Errorf("Found matching security rule %q but properties were incorrect: %v", wantedRuleName, err)
}
foundRule = true
break
}
}
if !foundRule {
t.Errorf("Expected security group rule but didn't find it: %q", wantedRuleName)
}
}
}
}
lenRules := len(*securityGroup.SecurityRules)
expectedRuleCount := len(seenRules)
if lenRules != expectedRuleCount {
t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n", expectedRuleCount, lenRules)
}
}
func TestSecurityRulePriorityPicksNextAvailablePriority(t *testing.T) {
rules := []network.SecurityRule{}
var expectedPriority int32 = loadBalancerMinimumPriority + 50
var i int32
for i = loadBalancerMinimumPriority; i < expectedPriority; i++ {
rules = append(rules, network.SecurityRule{
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
Priority: to.Int32Ptr(i),
},
})
}
priority, err := getNextAvailablePriority(rules)
if err != nil {
t.Errorf("Unexpectected error: %q", err)
}
if priority != expectedPriority {
t.Errorf("Expected priority %d. Got priority %d.", expectedPriority, priority)
}
}
func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) {
rules := []network.SecurityRule{}
var i int32
for i = loadBalancerMinimumPriority; i < loadBalancerMaximumPriority; i++ {
rules = append(rules, network.SecurityRule{
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
Priority: to.Int32Ptr(i),
},
})
}
_, err := getNextAvailablePriority(rules)
if err == nil {
t.Error("Expectected an error. There are no priority levels left.")
}
}
func TestProtocolTranslationTCP(t *testing.T) {
proto := v1.ProtocolTCP
transportProto, securityGroupProto, probeProto, err := getProtocolsFromKubernetesProtocol(proto)
if err != nil {
t.Error(err)
}
if *transportProto != network.TransportProtocolTCP {
t.Errorf("Expected TCP LoadBalancer Rule Protocol. Got %v", transportProto)
}
if *securityGroupProto != network.SecurityRuleProtocolTCP {
t.Errorf("Expected TCP SecurityGroup Protocol. Got %v", transportProto)
}
if *probeProto != network.ProbeProtocolTCP {
t.Errorf("Expected TCP LoadBalancer Probe Protocol. Got %v", transportProto)
}
}
func TestProtocolTranslationUDP(t *testing.T) {
proto := v1.ProtocolUDP
transportProto, securityGroupProto, probeProto, _ := getProtocolsFromKubernetesProtocol(proto)
if *transportProto != network.TransportProtocolUDP {
t.Errorf("Expected UDP LoadBalancer Rule Protocol. Got %v", transportProto)
}
if *securityGroupProto != network.SecurityRuleProtocolUDP {
t.Errorf("Expected UDP SecurityGroup Protocol. Got %v", transportProto)
}
if probeProto != nil {
t.Errorf("Expected UDP LoadBalancer Probe Protocol. Got %v", transportProto)
}
}
// Test Configuration deserialization (json)
func TestNewCloudFromJSON(t *testing.T) {
config := `{
"tenantId": "--tenant-id--",
"subscriptionId": "--subscription-id--",
"aadClientId": "--aad-client-id--",
"aadClientSecret": "--aad-client-secret--",
"aadClientCertPath": "--aad-client-cert-path--",
"aadClientCertPassword": "--aad-client-cert-password--",
"resourceGroup": "--resource-group--",
"location": "--location--",
"subnetName": "--subnet-name--",
"securityGroupName": "--security-group-name--",
"vnetName": "--vnet-name--",
"routeTableName": "--route-table-name--",
"primaryAvailabilitySetName": "--primary-availability-set-name--",
"cloudProviderBackoff": true,
"cloudProviderRatelimit": true,
"cloudProviderRateLimitQPS": 0.5,
"cloudProviderRateLimitBucket": 5
}`
validateConfig(t, config)
}
// Test Backoff and Rate Limit defaults (json)
func TestCloudDefaultConfigFromJSON(t *testing.T) {
config := `{
"aadClientId": "--aad-client-id--",
"aadClientSecret": "--aad-client-secret--"
}`
validateEmptyConfig(t, config)
}
// Test Backoff and Rate Limit defaults (yaml)
func TestCloudDefaultConfigFromYAML(t *testing.T) {
config := `
aadClientId: --aad-client-id--
aadClientSecret: --aad-client-secret--
`
validateEmptyConfig(t, config)
}
// Test Configuration deserialization (yaml)
func TestNewCloudFromYAML(t *testing.T) {
config := `
tenantId: --tenant-id--
subscriptionId: --subscription-id--
aadClientId: --aad-client-id--
aadClientSecret: --aad-client-secret--
aadClientCertPath: --aad-client-cert-path--
aadClientCertPassword: --aad-client-cert-password--
resourceGroup: --resource-group--
location: --location--
subnetName: --subnet-name--
securityGroupName: --security-group-name--
vnetName: --vnet-name--
routeTableName: --route-table-name--
primaryAvailabilitySetName: --primary-availability-set-name--
cloudProviderBackoff: true
cloudProviderBackoffRetries: 6
cloudProviderBackoffExponent: 1.5
cloudProviderBackoffDuration: 5
cloudProviderBackoffJitter: 1.0
cloudProviderRatelimit: true
cloudProviderRateLimitQPS: 0.5
cloudProviderRateLimitBucket: 5
`
validateConfig(t, config)
}
func validateConfig(t *testing.T, config string) {
azureCloud := getCloudFromConfig(t, config)
if azureCloud.TenantID != "--tenant-id--" {
t.Errorf("got incorrect value for TenantID")
}
if azureCloud.SubscriptionID != "--subscription-id--" {
t.Errorf("got incorrect value for SubscriptionID")
}
if azureCloud.AADClientID != "--aad-client-id--" {
t.Errorf("got incorrect value for AADClientID")
}
if azureCloud.AADClientSecret != "--aad-client-secret--" {
t.Errorf("got incorrect value for AADClientSecret")
}
if azureCloud.AADClientCertPath != "--aad-client-cert-path--" {
t.Errorf("got incorrect value for AADClientCertPath")
}
if azureCloud.AADClientCertPassword != "--aad-client-cert-password--" {
t.Errorf("got incorrect value for AADClientCertPassword")
}
if azureCloud.ResourceGroup != "--resource-group--" {
t.Errorf("got incorrect value for ResourceGroup")
}
if azureCloud.Location != "--location--" {
t.Errorf("got incorrect value for Location")
}
if azureCloud.SubnetName != "--subnet-name--" {
t.Errorf("got incorrect value for SubnetName")
}
if azureCloud.SecurityGroupName != "--security-group-name--" {
t.Errorf("got incorrect value for SecurityGroupName")
}
if azureCloud.VnetName != "--vnet-name--" {
t.Errorf("got incorrect value for VnetName")
}
if azureCloud.RouteTableName != "--route-table-name--" {
t.Errorf("got incorrect value for RouteTableName")
}
if azureCloud.PrimaryAvailabilitySetName != "--primary-availability-set-name--" {
t.Errorf("got incorrect value for PrimaryAvailabilitySetName")
}
if azureCloud.CloudProviderBackoff != true {
t.Errorf("got incorrect value for CloudProviderBackoff")
}
if azureCloud.CloudProviderBackoffRetries != 6 {
t.Errorf("got incorrect value for CloudProviderBackoffRetries")
}
if azureCloud.CloudProviderBackoffExponent != 1.5 {
t.Errorf("got incorrect value for CloudProviderBackoffExponent")
}
if azureCloud.CloudProviderBackoffDuration != 5 {
t.Errorf("got incorrect value for CloudProviderBackoffDuration")
}
if azureCloud.CloudProviderBackoffJitter != 1.0 {
t.Errorf("got incorrect value for CloudProviderBackoffJitter")
}
if azureCloud.CloudProviderRateLimit != true {
t.Errorf("got incorrect value for CloudProviderRateLimit")
}
if azureCloud.CloudProviderRateLimitQPS != 0.5 {
t.Errorf("got incorrect value for CloudProviderRateLimitQPS")
}
if azureCloud.CloudProviderRateLimitBucket != 5 {
t.Errorf("got incorrect value for CloudProviderRateLimitBucket")
}
}
func getCloudFromConfig(t *testing.T, config string) *Cloud {
configReader := strings.NewReader(config)
cloud, err := NewCloud(configReader)
if err != nil {
t.Error(err)
}
azureCloud, ok := cloud.(*Cloud)
if !ok {
t.Error("NewCloud returned incorrect type")
}
return azureCloud
}
// TODO include checks for other appropriate default config parameters
func validateEmptyConfig(t *testing.T, config string) {
azureCloud := getCloudFromConfig(t, config)
// backoff should be disabled by default if not explicitly enabled in config
if azureCloud.CloudProviderBackoff != false {
t.Errorf("got incorrect value for CloudProviderBackoff")
}
// rate limits should be disabled by default if not explicitly enabled in config
if azureCloud.CloudProviderRateLimit != false {
t.Errorf("got incorrect value for CloudProviderRateLimit")
}
}
func TestGetZone(t *testing.T) {
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, data)
}))
defer ts.Close()
cloud := &Cloud{}
cloud.Location = "eastus"
zone, err := cloud.getZoneFromURL(ts.URL)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if zone.FailureDomain != "99" {
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
}
if zone.Region != cloud.Location {
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
}
}
func TestFetchFaultDomain(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
}))
defer ts.Close()
faultDomain, err := fetchFaultDomain(ts.URL)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if faultDomain == nil {
t.Errorf("Unexpected nil fault domain")
}
if *faultDomain != "99" {
t.Errorf("Expected '99', saw '%s'", *faultDomain)
}
}
func TestDecodeInstanceInfo(t *testing.T) {
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
faultDomain, err := readFaultDomain(strings.NewReader(response))
if err != nil {
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
}
if faultDomain == nil {
t.Error("Fault domain was unexpectedly nil")
}
if *faultDomain != "99" {
t.Error("got incorrect fault domain")
}
}
func TestGetNodeNameByProviderID(t *testing.T) {
az := getTestCloud()
providers := []struct {
providerID string
name types.NodeName
fail bool
}{
{
providerID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
name: "k8s-agent-AAAAAAAA-0",
fail: false,
},
{
providerID: CloudProviderName + ":/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
name: "",
fail: true,
},
{
providerID: CloudProviderName + "://",
name: "",
fail: true,
},
{
providerID: ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
name: "",
fail: true,
},
{
providerID: "aws:///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
name: "",
fail: true,
},
}
for _, test := range providers {
name, err := az.vmSet.GetNodeNameByProviderID(test.providerID)
if (err != nil) != test.fail {
t.Errorf("Expected to failt=%t, with pattern %v", test.fail, test)
}
if test.fail {
continue
}
if name != test.name {
t.Errorf("Expected %v, but got %v", test.name, name)
}
}
}
func TestMetadataURLGeneration(t *testing.T) {
metadata := NewInstanceMetadata()
fullPath := metadata.makeMetadataURL("some/path")
if fullPath != "http://169.254.169.254/metadata/some/path" {
t.Errorf("Expected http://169.254.169.254/metadata/some/path saw %s", fullPath)
}
}
func TestMetadataParsing(t *testing.T) {
data := `
{
"interface": [
{
"ipv4": {
"ipAddress": [
{
"privateIpAddress": "10.0.1.4",
"publicIpAddress": "X.X.X.X"
}
],
"subnet": [
{
"address": "10.0.1.0",
"prefix": "24"
}
]
},
"ipv6": {
"ipAddress": [
]
},
"macAddress": "002248020E1E"
}
]
}
`
network := NetworkMetadata{}
if err := json.Unmarshal([]byte(data), &network); err != nil {
t.Errorf("Unexpected error: %v", err)
}
ip := network.Interface[0].IPV4.IPAddress[0].PrivateIP
if ip != "10.0.1.4" {
t.Errorf("Unexpected value: %s, expected 10.0.1.4", ip)
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, data)
}))
defer server.Close()
metadata := &InstanceMetadata{
baseURL: server.URL,
}
networkJSON := NetworkMetadata{}
if err := metadata.Object("/some/path", &networkJSON); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !reflect.DeepEqual(network, networkJSON) {
t.Errorf("Unexpected inequality:\n%#v\nvs\n%#v", network, networkJSON)
}
}
func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) {
if svc.Annotations[ServiceAnnotationLoadBalancerInternal] != "true" {
t.Error("Subnet added to non-internal service")
}
subName := svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]
if subName == "" {
subName = az.SubnetName
}
subnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s",
az.SubscriptionID,
az.VnetResourceGroup,
az.VnetName,
subName)
_, errChan := az.SubnetsClient.CreateOrUpdate(az.VnetResourceGroup, az.VnetName, subName,
network.Subnet{
ID: &subnetID,
Name: &subName,
}, nil)
if err := <-errChan; err != nil {
t.Errorf("Subnet cannot be created or update, %v", err)
}
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = subName
}
func TestIfServiceSpecifiesSharedRuleAndRuleDoesNotExistItIsCreated(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicesr", v1.ProtocolTCP, 80)
svc.Spec.LoadBalancerIP = "192.168.77.88"
svc.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc, to.StringPtr(svc.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc)
expectedRuleName := "shared-TCP-80-Internet"
_, securityRule, ruleFound := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName)
if !ruleFound {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 80}, "192.168.77.88", securityRule)
if err != nil {
t.Errorf("Shared rule was not updated with new service IP: %v", err)
}
if securityRule.Priority == nil {
t.Errorf("Shared rule %s had no priority", expectedRuleName)
}
if securityRule.Access != network.SecurityRuleAccessAllow {
t.Errorf("Shared rule %s did not have Allow access", expectedRuleName)
}
if securityRule.Direction != network.SecurityRuleDirectionInbound {
t.Errorf("Shared rule %s did not have Inbound direction", expectedRuleName)
}
}
func TestIfServiceSpecifiesSharedRuleAndRuleExistsThenTheServicesPortAndAddressAreAdded(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicesr", v1.ProtocolTCP, 80)
svc.Spec.LoadBalancerIP = "192.168.77.88"
svc.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName := "shared-TCP-80-Internet"
sg := getTestSecurityGroup(az)
sg.SecurityRules = &[]network.SecurityRule{
{
Name: &expectedRuleName,
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
Protocol: network.SecurityRuleProtocolTCP,
SourcePortRange: to.StringPtr("*"),
SourceAddressPrefix: to.StringPtr("Internet"),
DestinationPortRange: to.StringPtr("80"),
DestinationAddressPrefix: to.StringPtr("192.168.33.44"),
Access: network.SecurityRuleAccessAllow,
Direction: network.SecurityRuleDirectionInbound,
},
},
}
sg, err := az.reconcileSecurityGroup(testClusterName, &svc, to.StringPtr(svc.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc)
_, securityRule, ruleFound := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName)
if !ruleFound {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName)
}
expectedDestinationIPCount := 2
if len(*securityRule.DestinationAddressPrefixes) != expectedDestinationIPCount {
t.Errorf("Shared rule should have had %d destination IP addresses but had %d", expectedDestinationIPCount, len(*securityRule.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 80}, "192.168.33.44", securityRule)
if err != nil {
t.Errorf("Shared rule no longer matched other service IP: %v", err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 80}, "192.168.77.88", securityRule)
if err != nil {
t.Errorf("Shared rule was not updated with new service IP: %v", err)
}
}
func TestIfServicesSpecifySharedRuleButDifferentPortsThenSeparateRulesAreCreated(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 8888)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName1 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2)
_, securityRule1, rule1Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName1)
if !rule1Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName1)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount1 := 1
if len(*securityRule1.DestinationAddressPrefixes) != expectedDestinationIPCount1 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName1, expectedDestinationIPCount1, len(*securityRule1.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule1)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName1, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule1)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName1)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
}
func TestIfServicesSpecifySharedRuleButDifferentProtocolsThenSeparateRulesAreCreated(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolUDP, 4444)
svc2.Spec.LoadBalancerIP = "192.168.77.88"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName1 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-UDP-4444-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2)
_, securityRule1, rule1Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName1)
if !rule1Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName1)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount1 := 1
if len(*securityRule1.DestinationAddressPrefixes) != expectedDestinationIPCount1 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName1, expectedDestinationIPCount1, len(*securityRule1.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule1)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName1, err)
}
if securityRule1.Protocol != network.SecurityRuleProtocolTCP {
t.Errorf("Shared rule %s should have been %s but was %s", expectedRuleName1, network.SecurityRuleProtocolTCP, securityRule1.Protocol)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
if securityRule2.Protocol != network.SecurityRuleProtocolUDP {
t.Errorf("Shared rule %s should have been %s but was %s", expectedRuleName2, network.SecurityRuleProtocolUDP, securityRule2.Protocol)
}
}
func TestIfServicesSpecifySharedRuleButDifferentSourceAddressesThenSeparateRulesAreCreated(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 80)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Spec.LoadBalancerSourceRanges = []string{"192.168.12.0/24"}
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 80)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Spec.LoadBalancerSourceRanges = []string{"192.168.34.0/24"}
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName1 := "shared-TCP-80-192.168.12.0_24"
expectedRuleName2 := "shared-TCP-80-192.168.34.0_24"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2)
_, securityRule1, rule1Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName1)
if !rule1Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName1)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount1 := 1
if len(*securityRule1.DestinationAddressPrefixes) != expectedDestinationIPCount1 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName1, expectedDestinationIPCount1, len(*securityRule1.DestinationAddressPrefixes))
}
err = securityRuleMatches(svc1.Spec.LoadBalancerSourceRanges[0], v1.ServicePort{Port: 80}, "192.168.77.88", securityRule1)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName1, err)
}
err = securityRuleMatches(svc2.Spec.LoadBalancerSourceRanges[0], v1.ServicePort{Port: 80}, "192.168.33.44", securityRule1)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName1)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches(svc2.Spec.LoadBalancerSourceRanges[0], v1.ServicePort{Port: 80}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches(svc1.Spec.LoadBalancerSourceRanges[0], v1.ServicePort{Port: 80}, "192.168.77.88", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
}
func TestIfServicesSpecifySharedRuleButSomeAreOnDifferentPortsThenRulesAreSeparatedOrConsoliatedByPort(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 8888)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc3 := getTestService("servicesr3", v1.ProtocolTCP, 4444)
svc3.Spec.LoadBalancerIP = "192.168.99.11"
svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName13 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc3, to.StringPtr(svc3.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc3: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2, svc3)
_, securityRule13, rule13Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName13)
if !rule13Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName13)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount13 := 2
if len(*securityRule13.DestinationAddressPrefixes) != expectedDestinationIPCount13 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName13, expectedDestinationIPCount13, len(*securityRule13.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule13)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName13, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule13)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName13, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule13)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName13)
}
if securityRule13.Priority == nil {
t.Errorf("Shared rule %s had no priority", expectedRuleName13)
}
if securityRule13.Access != network.SecurityRuleAccessAllow {
t.Errorf("Shared rule %s did not have Allow access", expectedRuleName13)
}
if securityRule13.Direction != network.SecurityRuleDirectionInbound {
t.Errorf("Shared rule %s did not have Inbound direction", expectedRuleName13)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
}
func TestIfServiceSpecifiesSharedRuleAndServiceIsDeletedThenTheServicesPortAndAddressAreRemoved(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 80)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 80)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName := "shared-TCP-80-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2)
sg, err = az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc1: %q", err)
}
validateSecurityGroup(t, sg, svc2)
_, securityRule, ruleFound := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName)
if !ruleFound {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName)
}
expectedDestinationIPCount := 1
if len(*securityRule.DestinationAddressPrefixes) != expectedDestinationIPCount {
t.Errorf("Shared rule should have had %d destination IP addresses but had %d", expectedDestinationIPCount, len(*securityRule.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 80}, "192.168.33.44", securityRule)
if err != nil {
t.Errorf("Shared rule no longer matched other service IP: %v", err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 80}, "192.168.77.88", securityRule)
if err == nil {
t.Error("Shared rule was not updated to remove deleted service IP")
}
}
func TestIfSomeServicesShareARuleAndOneIsDeletedItIsRemovedFromTheRightRule(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 8888)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc3 := getTestService("servicesr3", v1.ProtocolTCP, 4444)
svc3.Spec.LoadBalancerIP = "192.168.99.11"
svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName13 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc3, to.StringPtr(svc3.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc3: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2, svc3)
sg, err = az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc1: %q", err)
}
validateSecurityGroup(t, sg, svc2, svc3)
_, securityRule13, rule13Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName13)
if !rule13Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName13)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount13 := 1
if len(*securityRule13.DestinationAddressPrefixes) != expectedDestinationIPCount13 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName13, expectedDestinationIPCount13, len(*securityRule13.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule13)
if err == nil {
t.Errorf("Shared rule %s should have had svc1 removed but did not", expectedRuleName13)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule13)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName13, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule13)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName13)
}
if securityRule13.Priority == nil {
t.Errorf("Shared rule %s had no priority", expectedRuleName13)
}
if securityRule13.Access != network.SecurityRuleAccessAllow {
t.Errorf("Shared rule %s did not have Allow access", expectedRuleName13)
}
if securityRule13.Direction != network.SecurityRuleDirectionInbound {
t.Errorf("Shared rule %s did not have Inbound direction", expectedRuleName13)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
}
func TestIfServiceSpecifiesSharedRuleAndLastServiceIsDeletedThenRuleIsDeleted(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 8888)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc3 := getTestService("servicesr3", v1.ProtocolTCP, 4444)
svc3.Spec.LoadBalancerIP = "192.168.99.11"
svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
expectedRuleName13 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc3, to.StringPtr(svc3.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc3: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2, svc3)
sg, err = az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc3, to.StringPtr(svc3.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc3: %q", err)
}
validateSecurityGroup(t, sg, svc2)
_, _, rule13Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName13)
if rule13Found {
t.Fatalf("Expected security rule %q to have been deleted but it was still present", expectedRuleName13)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong service's port and IP", expectedRuleName2)
}
}
func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicesr1", v1.ProtocolTCP, 4444)
svc1.Spec.LoadBalancerIP = "192.168.77.88"
svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc2 := getTestService("servicesr2", v1.ProtocolTCP, 8888)
svc2.Spec.LoadBalancerIP = "192.168.33.44"
svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc3 := getTestService("servicesr3", v1.ProtocolTCP, 4444)
svc3.Spec.LoadBalancerIP = "192.168.99.11"
svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true"
svc4 := getTestService("servicesr4", v1.ProtocolTCP, 4444)
svc4.Spec.LoadBalancerIP = "192.168.22.33"
svc4.Annotations[ServiceAnnotationSharedSecurityRule] = "false"
svc5 := getTestService("servicesr5", v1.ProtocolTCP, 8888)
svc5.Spec.LoadBalancerIP = "192.168.22.33"
svc5.Annotations[ServiceAnnotationSharedSecurityRule] = "false"
expectedRuleName13 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
expectedRuleName4 := getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
expectedRuleName5 := getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
sg := getTestSecurityGroup(az)
sg, err := az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc2, to.StringPtr(svc2.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc2: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc3, to.StringPtr(svc3.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc3: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc4, to.StringPtr(svc4.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc4: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc5, to.StringPtr(svc5.Spec.LoadBalancerIP), true)
if err != nil {
t.Errorf("Unexpected error adding svc4: %q", err)
}
validateSecurityGroup(t, sg, svc1, svc2, svc3, svc4, svc5)
expectedRuleCount := 4
if len(*sg.SecurityRules) != expectedRuleCount {
t.Errorf("Expected security group to have %d rules but it had %d", expectedRuleCount, len(*sg.SecurityRules))
}
_, securityRule13, rule13Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName13)
if !rule13Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName13)
}
_, securityRule2, rule2Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
_, securityRule4, rule4Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName4)
if !rule4Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName4)
}
_, securityRule5, rule5Found := findSecurityRuleByName(*sg.SecurityRules, expectedRuleName5)
if !rule5Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName5)
}
expectedDestinationIPCount13 := 2
if len(*securityRule13.DestinationAddressPrefixes) != expectedDestinationIPCount13 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName13, expectedDestinationIPCount13, len(*securityRule13.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.77.88", securityRule13)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName13, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.99.11", securityRule13)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName13, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 4444}, "192.168.22.33", securityRule13)
if err == nil {
t.Errorf("Shared rule %s matched wrong (unshared) service's port and IP", expectedRuleName13)
}
expectedDestinationIPCount2 := 1
if len(*securityRule2.DestinationAddressPrefixes) != expectedDestinationIPCount2 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName2, expectedDestinationIPCount2, len(*securityRule2.DestinationAddressPrefixes))
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.33.44", securityRule2)
if err != nil {
t.Errorf("Shared rule %s did not match service IP: %v", expectedRuleName2, err)
}
err = securityRuleMatches("Internet", v1.ServicePort{Port: 8888}, "192.168.22.33", securityRule2)
if err == nil {
t.Errorf("Shared rule %s matched wrong (unshared) service's port and IP", expectedRuleName2)
}
if securityRule4.DestinationAddressPrefixes != nil {
t.Errorf("Expected unshared rule %s to use single destination IP address but used collection", expectedRuleName4)
}
if securityRule4.DestinationAddressPrefix == nil {
t.Errorf("Expected unshared rule %s to have a destination IP address", expectedRuleName4)
} else {
if !strings.EqualFold(*securityRule4.DestinationAddressPrefix, svc4.Spec.LoadBalancerIP) {
t.Errorf("Expected unshared rule %s to have a destination %s but had %s", expectedRuleName4, svc4.Spec.LoadBalancerIP, *securityRule4.DestinationAddressPrefix)
}
}
if securityRule5.DestinationAddressPrefixes != nil {
t.Errorf("Expected unshared rule %s to use single destination IP address but used collection", expectedRuleName5)
}
if securityRule5.DestinationAddressPrefix == nil {
t.Errorf("Expected unshared rule %s to have a destination IP address", expectedRuleName5)
} else {
if !strings.EqualFold(*securityRule5.DestinationAddressPrefix, svc5.Spec.LoadBalancerIP) {
t.Errorf("Expected unshared rule %s to have a destination %s but had %s", expectedRuleName5, svc5.Spec.LoadBalancerIP, *securityRule5.DestinationAddressPrefix)
}
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc1, to.StringPtr(svc1.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc1: %q", err)
}
sg, err = az.reconcileSecurityGroup(testClusterName, &svc5, to.StringPtr(svc5.Spec.LoadBalancerIP), false)
if err != nil {
t.Errorf("Unexpected error removing svc5: %q", err)
}
_, securityRule13, rule13Found = findSecurityRuleByName(*sg.SecurityRules, expectedRuleName13)
if !rule13Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName13)
}
_, securityRule2, rule2Found = findSecurityRuleByName(*sg.SecurityRules, expectedRuleName2)
if !rule2Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName2)
}
_, securityRule4, rule4Found = findSecurityRuleByName(*sg.SecurityRules, expectedRuleName4)
if !rule4Found {
t.Fatalf("Expected security rule %q but it was not present", expectedRuleName4)
}
_, _, rule5Found = findSecurityRuleByName(*sg.SecurityRules, expectedRuleName5)
if rule5Found {
t.Fatalf("Expected security rule %q to have been removed but it was not present", expectedRuleName5)
}
expectedDestinationIPCount13 = 1
if len(*securityRule13.DestinationAddressPrefixes) != expectedDestinationIPCount13 {
t.Errorf("Shared rule %s should have had %d destination IP addresses but had %d", expectedRuleName13, expectedDestinationIPCount13, len(*securityRule13.DestinationAddressPrefixes))
}
}
// TODO: sanity check if the same IP address incorrectly gets put in twice?
// (shouldn't happen but...)
// func TestIfServiceIsEditedFromOwnRuleToSharedRuleThenOwnRuleIsDeletedAndSharedRuleIsCreated(t *testing.T) {
// t.Error()
// }
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
// t.Error()
// }
| apache-2.0 |
glevand/coreos--etcd | integration/metrics_test.go | 4579 | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"strconv"
"testing"
"time"
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
)
// TestMetricDbSizeBoot checks that the db size metric is set on boot.
func TestMetricDbSizeBoot(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes")
if err != nil {
t.Fatal(err)
}
if v == "0" {
t.Fatalf("expected non-zero, got %q", v)
}
}
func TestMetricDbSizeDefrag(t *testing.T) {
testMetricDbSizeDefrag(t, "etcd")
}
func TestMetricDbSizeDefragDebugging(t *testing.T) {
testMetricDbSizeDefrag(t, "etcd_debugging")
}
// testMetricDbSizeDefrag checks that the db size metric is set after defrag.
func testMetricDbSizeDefrag(t *testing.T, name string) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
mc := toGRPC(clus.Client(0)).Maintenance
// expand the db size
numPuts := 25 // large enough to write more than 1 page
putreq := &pb.PutRequest{Key: []byte("k"), Value: make([]byte, 4096)}
for i := 0; i < numPuts; i++ {
time.Sleep(10 * time.Millisecond) // to execute multiple backend txn
if _, err := kvc.Put(context.TODO(), putreq); err != nil {
t.Fatal(err)
}
}
// wait for backend txn sync
time.Sleep(500 * time.Millisecond)
expected := numPuts * len(putreq.Value)
beforeDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes")
if err != nil {
t.Fatal(err)
}
bv, err := strconv.Atoi(beforeDefrag)
if err != nil {
t.Fatal(err)
}
if bv < expected {
t.Fatalf("expected db size greater than %d, got %d", expected, bv)
}
beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
if err != nil {
t.Fatal(err)
}
biu, err := strconv.Atoi(beforeDefragInUse)
if err != nil {
t.Fatal(err)
}
if biu < expected {
t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu)
}
// clear out historical keys, in use bytes should free pages
creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true}
if _, kerr := kvc.Compact(context.TODO(), creq); kerr != nil {
t.Fatal(kerr)
}
// Put to move PendingPages to FreePages
if _, err = kvc.Put(context.TODO(), putreq); err != nil {
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond)
afterCompactionInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
if err != nil {
t.Fatal(err)
}
aciu, err := strconv.Atoi(afterCompactionInUse)
if err != nil {
t.Fatal(err)
}
if biu <= aciu {
t.Fatalf("expected less than %d, got %d after compaction", biu, aciu)
}
// defrag should give freed space back to fs
mc.Defragment(context.TODO(), &pb.DefragmentRequest{})
afterDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes")
if err != nil {
t.Fatal(err)
}
av, err := strconv.Atoi(afterDefrag)
if err != nil {
t.Fatal(err)
}
if bv <= av {
t.Fatalf("expected less than %d, got %d after defrag", bv, av)
}
afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
if err != nil {
t.Fatal(err)
}
adiu, err := strconv.Atoi(afterDefragInUse)
if err != nil {
t.Fatal(err)
}
if adiu > av {
t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av)
}
}
func TestMetricQuotaBackendBytes(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes")
if err != nil {
t.Fatal(err)
}
qv, err := strconv.ParseFloat(qs, 64)
if err != nil {
t.Fatal(err)
}
if int64(qv) != etcdserver.DefaultQuotaBytes {
t.Fatalf("expected %d, got %f", etcdserver.DefaultQuotaBytes, qv)
}
}
| apache-2.0 |
RobotControlTechnologies/lego_communication_library | EV3/Commands.cs | 24456 | using System;
using System.Text;
using MonoBrick;
using System.Collections.Generic;
namespace MonoBrick.EV3
{
/// <summary>
/// Encoded Parameter format
/// </summary>
[Flags]
public enum ParameterFormat : byte {
#pragma warning disable
Short = 0x00,
Long = 0x80
#pragma warning restore
}
/// <summary>
/// Encoded Parameter type
/// </summary>
[Flags]
public enum ParameterType : byte {
#pragma warning disable
Constant = 0x00,
Variable = 0x40
#pragma warning restore
}
/// <summary>
/// Encoded Parameter sign when using short constant format
/// </summary>
[Flags]
enum ShortSign : byte {
#pragma warning disable
Positive = 0x00,
Negative = 0x20
#pragma warning restore
}
/// <summary>
/// Encoded Parameter type when using long constant format
/// </summary>
[Flags]
public enum ConstantParameterType : byte {
#pragma warning disable
Value = 0x00,
Label = 0x20
#pragma warning restore
}
/// <summary>
/// Encoded Parameter scope when using long variable format
/// </summary>
[Flags]
public enum VariableScope : byte {
#pragma warning disable
Local = 0x00,
Global = 0x20,
#pragma warning restore
}
/// <summary>
/// Encoded Parameter type when using long variable format
/// </summary>
[Flags]
public enum VariableType : byte {
#pragma warning disable
Value = 0x00,
Handle = 0x10
#pragma warning restore
}
/// <summary>
/// Encoded Parameter following when using long format
/// </summary>
[Flags]
enum FollowType : byte {
#pragma warning disable
OneByte = 0x01,
TwoBytes = 0x02,
FourBytes = 0x03,
TerminatedString = 0x00,
TerminatedString2 = 0x04
#pragma warning restore
}
/// <summary>
/// Program slots used by the EV3
/// </summary>
public enum ProgramSlots{
/// <summary>
/// Program slot reserved for executing the user interface
/// </summary>
Gui = 0,
/// <summary>
/// Program slot used to execute user projects, apps and tools
/// </summary>
User = 1,
/// <summary>
/// Program slot used for direct commands coming from c_com
/// </summary>
Cmd = 2,
/// <summary>
/// Program slot used for direct commands coming from c_ui
/// </summary>
Term = 3,
/// <summary>
/// Program slot used to run the debug ui
/// </summary>
Debug = 4,
/// <summary>
/// ONLY VALID IN opPROGRAM_STOP
/// </summary>
Current = -1
}
/// <summary>
/// The daisychain layer
/// </summary>
public enum DaisyChainLayer{
/// <summary>
/// The EV3
/// </summary>
EV3 = 0,
/// <summary>
/// First EV3 in the Daisychain
/// </summary>
First = 1,
/// <summary>
/// Second EV3 in the Daisychain
/// </summary>
Second = 2,
/// <summary>
/// Third EV3 in the Daisychain
/// </summary>
Third = 3,
}
/// <summary>
/// EV3 command type.
/// </summary>
public enum CommandType{
/// <summary>
/// Direct command
/// </summary>
DirectCommand = 0x00,
/// <summary>
/// System command.
/// </summary>
SystemCommand = 0x01,
/// <summary>
/// Direct command reply.
/// </summary>
DirectReply = 0x02,
/// <summary>
/// System command reply.
/// </summary>
SystemReply = 0x03,
/// <summary>
/// Direct reply with error.
/// </summary>
DirectReplyWithError = 0x04,
/// <summary>
/// System reply with error.
/// </summary>
SystemReplyWithError = 0x05
}
/// <summary>
/// EV3 system commands
/// </summary>
public enum SystemCommand {
#pragma warning disable
None = 0x00,
BeginDownload = 0x92,
ContinueDownload = 0x93,
BeginUpload = 0x94,
ContinueUpload = 0x95,
BeginGetFile = 0x96,
ContinueGetFile = 0x97,
CloseFileHandle = 0x98,
ListFiles = 0x99,
ContinueListFiles = 0x9a,
CreateDir = 0x9b,
DeleteFile = 0x9c,
ListOpenHandles = 0x9d,
WriteMailbox = 0x9e,
BluetoothPin = 0x9f,
EnterFirmwareUpdate = 0xa0
#pragma warning restore
}
/// <summary>
/// EV3 byte codes
/// </summary>
public enum ByteCodes{
#pragma warning disable
//VM
ProgramStop = 0x02,
ProgramStart = 0x03,
//Move
InitBytes = 0x2F,
//VM
Info = 0x7C,
String = 0x7D,
MemoryWrite = 0x7E,
MemoryRead = 0x7F,
//Sound
Sound = 0x94,
SoundTest = 095,
SoundReady = 0x96,
//Input
InputSample = 0x97,
InputDeviceList = 0x98,
InputDevice = 0x99,
InputRead = 0x9a,
InputTest = 0x9b,
InputReady = 0x9c,
InputReadSI = 0x9d,
InputReadExt = 0x9e,
InputWrite = 0x9f,
//output
OutputGetType = 0xa0,
OutputSetType = 0xa1,
OutputReset = 0xa2,
OutputStop = 0xA3,
OutputPower = 0xA4,
OutputSpeed = 0xA5,
OutputStart = 0xA6,
OutputPolarity = 0xA7,
OutputRead = 0xA8,
OutputTest = 0xA9,
OutputReady = 0xAA,
OutputPosition = 0xAB,
OutputStepPower = 0xAC,
OutputTimePower = 0xAD,
OutputStepSpeed = 0xAE,
OutputTimeSpeed = 0xAF,
OutputStepSync = 0xB0,
OutputTimeSync = 0xB1,
OutputClrCount = 0xB2,
OutputGetCount = 0xB3,
//Memory
File = 0xC0,
Array = 0xc1,
ArrayWrite = 0xc2,
ArrayRead = 0xc3,
ArrayAppend = 0xc4,
MemoryUsage = 0xc5,
FileName = 0xc6,
//Mailbox
MailboxOpen = 0xD8,
MailboxWrite = 0xD9,
MailboxRead = 0xDA,
MailboxTest = 0xDB,
MailboxReady = 0xDC,
MailboxClose = 0xDD,
#pragma warning restore
}
/// <summary>
/// EV3 sound sub codes
/// </summary>
public enum SoundSubCodes{
#pragma warning disable
Break = 0,
Tone = 1,
Play = 2,
Repeat = 3,
Service = 4
#pragma warning restore
}
/// <summary>
/// EV3 input sub codes.
/// </summary>
public enum InputSubCodes{
#pragma warning disable
GetFormat = 2,
CalMinMax = 3,
CalDefault = 4,
GetTypeMode = 5,
GetSymbol = 6,
CalMin = 7,
CalMax = 8,
Setup = 9,
ClearAll = 10,
GetRaw = 11,
GetConnection = 12,
StopAll = 13,
GetName = 21,
GetModeName = 22,
SetRaw = 23,
GetFigures = 24,
GetChanges = 25,
ClrChanges = 26,
ReadyPCT = 27,
ReadyRaw = 28,
ReadySI = 29,
GetMinMax = 30,
GetBumps = 31
#pragma warning disable
}
/// <summary>
/// EV3 file sub codes.
/// </summary>
public enum FileSubCodes{
#pragma warning disable
OpenAppend = 0,
OpenRead = 1,
OpenWrite = 2,
ReadValue = 3,
WriteValue = 4,
ReadText = 5,
WriteText = 6,
Close = 7,
LoadImage = 8,
GetHandle = 9,
LoadPicture = 10,
GetPool = 11,
Unload = 12,
GetFolders = 13,
GetIcon = 14,
GetSubfolderName = 15,
WriteLog = 16,
CLoseLog = 17,
GetImage = 18,
GetItem = 19,
GetCacheFiles = 20,
PutCacheFile = 21,
GetCacheFile = 22,
DelCacheFile = 23,
DelSubfolder = 24,
GetLogName = 25,
GetCacheName = 26,
OpenLog = 27,
ReadBytes = 28,
WriteBytes = 29,
Remove = 30,
Move = 31,
#pragma warning restore
}
/// <summary>
/// Memory sub codes
/// </summary>
public enum MemorySubCodes{
#pragma warning disable
Delete = 0,
Create8 = 1,
Create16 = 2,
Create32 = 3,
CreateTEF = 4,
Resize = 5,
Fill = 6,
Copy = 7,
Init8 = 8,
Init16 = 9,
Init32 = 10,
InitF = 11,
Size = 12,
#pragma warning restore
}
/// <summary>
/// Class for creating a EV3 system command.
/// </summary>
public class Command: BrickCommand{
private SystemCommand systemCommand;
private CommandType commandType;
private UInt16 sequenceNumber;
/// <summary>
/// The short value maximum size
/// </summary>
public const sbyte ShortValueMax = 31;
/// <summary>
/// The short value minimum size
/// </summary>
public const sbyte ShortValueMin = -32;
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Command"/> class.
/// </summary>
/// <param name="data">Data.</param>
public Command(byte [] data){
if(data.Length < 4){
throw new System.ArgumentException("Invalid EV3 Command");
}
for(int i = 0; i < data.Length; i++){
dataArr.Add(data[i]);
}
this.sequenceNumber = (UInt16)(0x0000 | dataArr[0] | (dataArr[1] << 2));
try{
commandType = (CommandType) (data[2] & 0x7f);
if(commandType == CommandType.SystemCommand){
systemCommand = (SystemCommand) data[3];
}
else{
systemCommand = SystemCommand.None;
}
}
catch(BrickException){
throw new System.ArgumentException("Invalid EV3 Command");
}
replyRequired = !Convert.ToBoolean(data[2]&0x80);
}
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Command"/> class as a system command
/// </summary>
/// <param name="systemCommand">System command.</param>
/// <param name="sequenceNumber">Sequence number.</param>
/// <param name="reply">If set to <c>true</c> reply will be send from brick</param>
public Command(SystemCommand systemCommand, UInt16 sequenceNumber, bool reply)
{
this.systemCommand = systemCommand;
this.commandType = CommandType.SystemCommand;
this.sequenceNumber = sequenceNumber;
this.Append(sequenceNumber);
if (reply){
replyRequired = true;
dataArr.Add((byte)commandType);
}
else{
replyRequired = false;
dataArr.Add((byte)((byte) commandType | 0x80));
}
dataArr.Add((byte)systemCommand);
}
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Command"/> class as a direct command
/// </summary>
/// <param name="byteCode">Bytecode to use for the direct command</param>
/// <param name="globalVariables">Global variables.</param>
/// <param name="localVariables">Number of global variables</param>
/// <param name="sequenceNumber">Number of local variables</param>
/// <param name="reply">If set to <c>true</c> reply will be send from the brick</param>
public Command(ByteCodes byteCode, int globalVariables, int localVariables, UInt16 sequenceNumber, bool reply): this(globalVariables, localVariables, sequenceNumber, reply){
this.Append(byteCode);
}
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Command"/> as a direct command
/// </summary>
/// <param name="globalVariables">Global bytes.</param>
/// <param name="localVariables">Number of global variables</param>
/// <param name="sequenceNumber">Number of local variables</param>
/// <param name="reply">If set to <c>true</c> reply will be send from brick</param>
public Command(int globalVariables, int localVariables, UInt16 sequenceNumber, bool reply)
{
this.systemCommand = SystemCommand.None;
this.commandType = CommandType.DirectCommand;
this.sequenceNumber = sequenceNumber;
this.Append(sequenceNumber);
if (reply){
replyRequired = true;
dataArr.Add((byte)commandType);
}
else{
replyRequired = false;
dataArr.Add((byte)((byte) commandType | 0x80));
}
byte firstByte = (byte)(globalVariables & 0xFF);
byte secondByte = (byte)((localVariables << 2) | (globalVariables >> 8));
this.Append(firstByte);
this.Append(secondByte);
}
/// <summary>
/// Gets the EV3 system command.
/// </summary>
/// <value>The system command.</value>
public SystemCommand SystemCommandType{
get{return systemCommand;}
}
/// <summary>
/// Gets the EV3 command type
/// </summary>
/// <value>The type of the command.</value>
public CommandType CommandType {
get{return commandType;}
}
/// <summary>
/// Gets the sequence number
/// </summary>
/// <value>The sequence number.</value>
public UInt16 SequenceNumber {
get{return sequenceNumber;}
}
/// <summary>
/// Append a sensor type value
/// </summary>
/// <param name="type">Sensor type to append</param>
public void Append (SensorType type)
{
Append((byte) type, ParameterFormat.Short);
}
/// <summary>
/// Append a sensor mode value
/// </summary>
/// <param name="mode">Sensor mode to append</param>
public void Append (SensorMode mode)
{
Append((byte) mode, ParameterFormat.Short);
}
/// <summary>
/// Append a byte code value
/// </summary>
/// <param name="byteCode">Byte code to append</param>
public void Append(ByteCodes byteCode){
Append((byte) byteCode);
}
/// <summary>
/// Append a file sub code
/// </summary>
/// <param name="code">Code to append.</param>
public void Append(FileSubCodes code){
Append((sbyte) code, ParameterFormat.Short);
}
/// <summary>
/// Append a file sub code
/// </summary>
/// <param name="code">Code to append.</param>
public void Append(SoundSubCodes code){
Append((sbyte) code, ParameterFormat.Short);
}
/// <summary>
/// Append a daisy chain layer
/// </summary>
/// <param name="chain">Daisy chain layer to append</param>
public void Append(DaisyChainLayer chain){
Append((sbyte) chain, ParameterFormat.Short);
}
/// <summary>
/// Append a input sub code
/// </summary>
/// <param name="subCode">Sub code to append</param>
public void Append (InputSubCodes subCode)
{
Append((sbyte) subCode, ParameterFormat.Short);
}
/// <summary>
/// Append a memory sub code
/// </summary>
/// <param name="subCode">Sub code to append</param>
public void Append (MemorySubCodes subCode)
{
Append((sbyte) subCode, ParameterFormat.Short);
}
/// <summary>
/// Append a sensor port
/// </summary>
/// <param name="port">Sensor port to append</param>
public void Append (SensorPort port)
{
Append((sbyte) port, ParameterFormat.Short);
}
/// <summary>
/// Append a motor port
/// </summary>
/// <param name="port">Motor port to append</param>
public void Append(MotorPort port){
Append((sbyte) port, ParameterFormat.Short);
}
/// <summary>
/// Append a output bit field
/// </summary>
/// <param name="bitField">Bit field to append</param>
public void Append (OutputBitfield bitField)
{
Append((sbyte) bitField, ParameterFormat.Short);
}
/// <summary>
/// Append a constant parameter encoded byte in either short or long format. Note that if format is long parameter constant type will be a value
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="format">Use either short or long format</param>
public void Append (byte value, ParameterFormat format)
{
if(format == ParameterFormat.Short){
if(value > (byte) ShortValueMax){
value = (byte) ShortValueMax;
}
byte b = (byte)((byte)format | (byte) ParameterType.Constant | (byte)(value & ((byte)0x1f)) | (byte)ShortSign.Positive);
Append (b);
}
else{
byte b = (byte)((byte)format | (byte) ParameterType.Constant | (byte) ConstantParameterType.Value | (byte)FollowType.OneByte);
Append (b);
Append (value);
}
}
/// <summary>
/// Append a constant parameter encoded byte in either short or long format. Note that if format is long parameter constant type will be a value
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="format">Use either short or long format</param>
public void Append(sbyte value, ParameterFormat format)
{
if(format == ParameterFormat.Short){
byte b = 0x00;
if(value <0 ){
if(value < ShortValueMin){
value = ShortValueMin;
}
b = (byte)((byte)format | (byte) ParameterType.Constant | (byte)(value & ((byte)0x1f)));
b = (byte)((byte)ShortSign.Negative | b);
}
else{
if(value > ShortValueMax){
value = ShortValueMax;
}
b = (byte)((byte)format | (byte) ParameterType.Constant | (byte)(value & ((byte)0x1f)));
b = (byte)((byte)ShortSign.Positive | b);
}
Append (b);
}
else{
byte b = (byte)((byte)format | (byte) ParameterType.Constant | (byte) ConstantParameterType.Value | (byte)FollowType.OneByte);
Append (b);
Append (value);
}
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">byte to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(sbyte value, ConstantParameterType type)
{
Append(type, FollowType.OneByte);
Append (value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">byte to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(byte value, ConstantParameterType type)
{
Append(type, FollowType.OneByte);
Append (value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">Int16 to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(Int16 value , ConstantParameterType type){
Append(type, FollowType.TwoBytes);
Append (value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">Int32 to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(Int32 value, ConstantParameterType type){
Append(type, FollowType.FourBytes);
Append(value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">UInt32 to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(UInt32 value, ConstantParameterType type){
Append(type, FollowType.FourBytes);
Append(value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="value">Float to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(float value, ConstantParameterType type){
Append(type, FollowType.FourBytes);
Append(value);
}
/// <summary>
/// Append a constant parameter encoded
/// </summary>
/// <param name="s">String to append</param>
/// <param name="type">User either value or lable type</param>
public void Append(string s, ConstantParameterType type){
Append(type, FollowType.TerminatedString2);
Append (s);
}
/// <summary>
/// Append a variable parameter encoded byte in short format
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="scope">Select either global or local scope</param>
public void Append(byte value, VariableScope scope)
{
byte b = (byte)((byte)ParameterFormat.Short| (byte) ParameterType.Variable | (byte) scope | (byte)(value & ((byte)0x1f)));
Append (b);
}
private void Append(VariableScope scopeType, VariableType variableType, FollowType followType){
byte b = (byte)((byte)ParameterFormat.Long| (byte) ParameterType.Variable | (byte) scopeType | (byte)variableType | (byte)followType);
Append (b);
}
/// <summary>
/// Append a variable parameter encoded byte in long format
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="scope">Select either global or local scope</param>
/// <param name="type">Select either value or handle scope</param>
public void Append (byte value, VariableScope scope, VariableType type)
{
Append(scope, type, FollowType.OneByte);
Append (value);
}
/// <summary>
/// Append a variable parameter encoded Int16
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="scope">Select either global or local scope</param>
/// <param name="type">Select either value or handle scope</param>
public void Append(Int16 value , VariableScope scope, VariableType type){
Append(scope, type, FollowType.TwoBytes);
Append (value);
}
/// <summary>
/// Append a variable parameter encoded Int32
/// </summary>
/// <param name="value">Value to append</param>
/// <param name="scope">Select either global or local scope</param>
/// <param name="type">Select either value or handle scope</param>
public void Append(Int32 value, VariableScope scope, VariableType type){
Append(scope, type, FollowType.FourBytes);
Append(value);
}
/// <summary>
/// Append a variable parameter encoded string
/// </summary>
/// <param name="s">String to append</param>
/// <param name="scope">Select either global or local scope</param>
/// <param name="type">Select either value or handle scope</param>
public void Append(string s, VariableScope scope, VariableType type){
Append(scope, type, FollowType.TerminatedString2);
Append (s);
}
/// <summary>
/// Append the specified longType and followType.
/// </summary>
/// <param name="longType">Long type.</param>
/// <param name="followType">Follow type.</param>
private void Append(ConstantParameterType longType, FollowType followType){
byte b = (byte)((byte)ParameterFormat.Long| (byte) ParameterType.Constant | (byte) longType | (byte)followType);
Append (b);
}
internal void Print(){
byte[] arr = Data;
for(int i = 0; i < Length; i++){
Console.WriteLine("Command["+i+"]: " + arr[i].ToString("X"));
}
}
}
/// <summary>
/// Class for creating a EV3 reply
/// </summary>
public class Reply: BrickReply
{
/// <summary>
/// Gets a value indicating whether this instance has error.
/// </summary>
/// <value>
/// <c>true</c> if this instance has error; otherwise, <c>false</c>.
/// </value>
public bool HasError{
get{
CommandType type = (CommandType)dataArray[2];
if(type == CommandType.DirectReply || type == CommandType.SystemReply){
return false;
}
return true;
}
}
/// <summary>
/// Gets the type of error.
/// </summary>
/// <value>
/// The type of error
/// </value>
internal ErrorType ErrorType{
get{
return Error.ToErrorType(ErrorCode);
}
}
/// <summary>
/// Gets the error code.
/// </summary>
/// <value>
/// The error code
/// </value>
public byte ErrorCode {
get {
if (HasError) {
if(CommandType == CommandType.SystemReplyWithError){
if(dataArray.Length >=5){
byte error = dataArray[4];
if(Enum.IsDefined(typeof(EV3.BrickError),(int) error)){
return error;
}
}
}
return (byte)BrickError.UnknownError;
}
return 0;//no error
}
}
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Reply"/> class.
/// </summary>
public Reply ()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="MonoBrick.EV3.Reply"/> class.
/// </summary>
/// <param name='data'>
/// The byte array to be used for the reply
/// </param>
public Reply(byte[] data){
dataArray = data;
if(data.Length < 4){
throw new System.ArgumentException("Invalid EV3 Reply");
}
if(!Enum.IsDefined(typeof (CommandType),data[2])){
throw new System.ArgumentException("Invalid EV3 Reply");
}
CommandType type = (CommandType)data[2];
if(type == CommandType.SystemReply){
if(!Enum.IsDefined(typeof(SystemCommand),data[3])){
throw new System.ArgumentException("Invalid EV3 Reply");
}
}
if( type == CommandType.SystemCommand || type == CommandType.SystemCommand){
throw new System.ArgumentException("Invalid EV3 Reply");
}
}
/// <summary>
/// Gets the EV3 system command.
/// </summary>
/// <value>The system command.</value>
public SystemCommand SystemCommandType{
get{
if(CommandType == CommandType.SystemReply){
return (SystemCommand) dataArray[3];
}
return SystemCommand.None;
}
}
/// <summary>
/// Gets the EV3 command type
/// </summary>
/// <value>The type of the command.</value>
public CommandType CommandType {
get{return (CommandType) dataArray[2];}
}
/// <summary>
/// Gets the sequence number.
/// </summary>
/// <value>The sequence number.</value>
public UInt16 SequenceNumber {
get{return (UInt16)(0x0000 | dataArray[0] | (dataArray[1] << 2));}
}
/// <summary>
/// Gets the command byte as string.
/// </summary>
/// <value>
/// The command byte as string
/// </value>
public string CommandTypeAsString{
get{return BrickCommand.AddSpacesToString(CommandType.ToString()); }
}
internal void print(){
Console.WriteLine("Command: " + CommandType.ToString());
Console.WriteLine("Length: " + Length);
Console.WriteLine("Errorcode: " + ErrorCode);
for(int i = 0; i < Length; i++){
Console.WriteLine("Reply["+i+"]: " + dataArray[i].ToString("X"));
}
}
}
}
| apache-2.0 |
smanvi-pivotal/geode | geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizedBasedLoadProbe.java | 3005 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.cache.partitioned;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.internal.DataSerializableFixedID;
import org.apache.geode.internal.Version;
import org.apache.geode.internal.cache.BucketAdvisor;
import org.apache.geode.internal.cache.PartitionedRegion;
import org.apache.geode.internal.cache.PartitionedRegionDataStore;
/**
* A load probe which calculates the load of a pr using the size of the buckets in bytes.
*
* @since GemFire 6.0
*/
public class SizedBasedLoadProbe implements LoadProbe, DataSerializableFixedID {
private static final long serialVersionUID = 7040814060882774875L;
// TODO rebalancing come up with a better threshold for minumum bucket size?
public static final int MIN_BUCKET_SIZE =
Integer.getInteger(DistributionConfig.GEMFIRE_PREFIX + "MIN_BUCKET_SIZE", 1).intValue();
public PRLoad getLoad(PartitionedRegion pr) {
PartitionedRegionDataStore ds = pr.getDataStore();
int configuredBucketCount = pr.getTotalNumberOfBuckets();
PRLoad prLoad = new PRLoad(configuredBucketCount, pr.getLocalMaxMemory());
// key: bid, value: size
for (Integer bidInt : ds.getAllLocalBucketIds()) {
int bid = bidInt.intValue();
long bucketSize = ds.getBucketSize(bid);
if (bucketSize < MIN_BUCKET_SIZE) {
bucketSize = MIN_BUCKET_SIZE;
}
BucketAdvisor bucketAdvisor = pr.getRegionAdvisor().getBucket(bid).getBucketAdvisor();
// Wait for a primary to exist for this bucket, because
// it might be this member.
bucketAdvisor.getPrimary();
boolean isPrimary = pr.getRegionAdvisor().getBucket(bid).getBucketAdvisor().isPrimary();
prLoad.addBucket(bid, bucketSize, isPrimary ? 1 : 0);
}
return prLoad;
}
public void fromData(DataInput in) throws IOException, ClassNotFoundException {}
public void toData(DataOutput out) throws IOException {}
public int getDSFID() {
return SIZED_BASED_LOAD_PROBE;
}
@Override
public Version[] getSerializationVersions() {
// TODO Auto-generated method stub
return null;
}
}
| apache-2.0 |
halober/ovirt-engine | frontend/webadmin/modules/gwt-common/src/main/java/org/ovirt/engine/ui/common/widget/uicommon/storage/ImportIscsiStorageView.java | 9818 | package org.ovirt.engine.ui.common.widget.uicommon.storage;
import java.util.ArrayList;
import java.util.List;
import com.google.gwt.user.client.ui.SplitLayoutPanel;
import com.google.gwt.view.client.MultiSelectionModel;
import com.google.gwt.view.client.SelectionModel;
import org.ovirt.engine.core.common.businessentities.StorageDomain;
import org.ovirt.engine.ui.common.widget.HasValidation;
import org.ovirt.engine.ui.common.widget.ValidatedPanelWidget;
import org.ovirt.engine.ui.common.widget.editor.EntityModelCellTable;
import org.ovirt.engine.ui.common.widget.editor.ListModelObjectCellTable;
import org.ovirt.engine.ui.common.widget.table.column.CheckboxColumn;
import org.ovirt.engine.ui.common.widget.table.column.TextColumnWithTooltip;
import org.ovirt.engine.ui.common.widget.table.header.SelectAllCheckBoxHeader;
import org.ovirt.engine.ui.uicommonweb.models.EntityModel;
import org.ovirt.engine.ui.uicommonweb.models.ListModel;
import org.ovirt.engine.ui.uicommonweb.models.storage.ImportIscsiStorageModel;
import com.google.gwt.core.client.GWT;
import com.google.gwt.editor.client.SimpleBeanEditorDriver;
import com.google.gwt.resources.client.CssResource;
import com.google.gwt.uibinder.client.UiBinder;
import com.google.gwt.uibinder.client.UiField;
import com.google.gwt.user.client.ui.Widget;
import org.ovirt.engine.ui.uicommonweb.models.storage.SanTargetModel;
import org.ovirt.engine.ui.uicompat.Event;
import org.ovirt.engine.ui.uicompat.EventArgs;
import org.ovirt.engine.ui.uicompat.IEventListener;
import org.ovirt.engine.ui.uicompat.PropertyChangedEventArgs;
public class ImportIscsiStorageView extends AbstractStorageView<ImportIscsiStorageModel> implements HasValidation {
interface Driver extends SimpleBeanEditorDriver<ImportIscsiStorageModel, ImportIscsiStorageView> {
}
interface ViewUiBinder extends UiBinder<Widget, ImportIscsiStorageView> {
ViewUiBinder uiBinder = GWT.create(ViewUiBinder.class);
}
private final Driver driver = GWT.create(Driver.class);
@UiField
WidgetStyle style;
@UiField(provided = true)
@Ignore
IscsiDiscoverTargetsView iscsiDiscoverTargetsView;
@UiField(provided = true)
SplitLayoutPanel splitLayoutPanel;
@UiField(provided = true)
@Ignore
EntityModelCellTable<ListModel<SanTargetModel>> targetsTable;
@UiField(provided = true)
@Ignore
ListModelObjectCellTable<StorageDomain, ListModel> storageDomainsTable;
@UiField
ValidatedPanelWidget storageDomainsPanel;
public ImportIscsiStorageView() {
initViews();
initWidget(ViewUiBinder.uiBinder.createAndBindUi(this));
addStyles();
driver.initialize(this);
}
void addStyles() {
iscsiDiscoverTargetsView.setLoginButtonStyle(style.loginButton());
}
@Override
public void edit(final ImportIscsiStorageModel object) {
driver.edit(object);
iscsiDiscoverTargetsView.edit(object);
targetsTable.asEditor().edit(object.getTargets());
storageDomainsTable.asEditor().edit(object.getStorageDomains());
addEventsHandlers(object);
}
private void addEventsHandlers(final ImportIscsiStorageModel object) {
object.getPropertyChangedEvent().addListener(new IEventListener() {
@Override
public void eventRaised(Event ev, Object sender, EventArgs args) {
String propName = ((PropertyChangedEventArgs) args).propertyName;
if (propName.equals("IsValid")) { //$NON-NLS-1$
onIsValidPropertyChange(object);
}
}
});
object.getTargets().getSelectedItemsChangedEvent().addListener(new IEventListener() {
@Override
public void eventRaised(Event ev, Object sender, EventArgs args) {
if (object.getTargets().getSelectedItems() != null && object.getTargets().getSelectedItems().isEmpty()) {
// Clear items selection
((MultiSelectionModel) targetsTable.getSelectionModel()).clear();
}
}
});
}
private void initViews() {
// Create split layout panel
splitLayoutPanel = new SplitLayoutPanel(4);
// Create discover panel
iscsiDiscoverTargetsView = new IscsiDiscoverTargetsView();
// Create tables
createTargetsTable();
createSotrageDomainsTable();
}
private void createTargetsTable() {
targetsTable = new EntityModelCellTable<ListModel<SanTargetModel>>(true, true);
targetsTable.enableColumnResizing();
addTargetsSelectionColumn();
TextColumnWithTooltip<SanTargetModel> iqnColumn = new TextColumnWithTooltip<SanTargetModel>() {
@Override
public String getValue(SanTargetModel model) {
return model.getEntity().getiqn();
}
};
targetsTable.addColumn(iqnColumn, constants.iqn(), "60%"); //$NON-NLS-1$
TextColumnWithTooltip<SanTargetModel> addressColumn = new TextColumnWithTooltip<SanTargetModel>() {
@Override
public String getValue(SanTargetModel model) {
return model.getEntity().getconnection();
}
};
targetsTable.addColumn(addressColumn, constants.addressSanStorage(), "130px"); //$NON-NLS-1$
TextColumnWithTooltip<SanTargetModel> portColumn = new TextColumnWithTooltip<SanTargetModel>() {
@Override
public String getValue(SanTargetModel model) {
return model.getEntity().getport();
}
};
targetsTable.addColumn(portColumn, constants.portSanStorage(), "70px"); //$NON-NLS-1$
}
private void addTargetsSelectionColumn() {
SelectAllCheckBoxHeader<SanTargetModel> selectAllHeader = new SelectAllCheckBoxHeader<SanTargetModel>() {
@Override
protected void selectionChanged(Boolean value) {
ListModel listModel = targetsTable.asEditor().flush();
if (listModel == null || listModel.getItems() == null) {
return;
}
handleSelection(value, listModel, targetsTable.getSelectionModel());
}
@Override
public void handleSelection(Boolean value, ListModel listModel, SelectionModel selectionModel) {
if (!listModel.getItems().iterator().hasNext()) {
return;
}
ArrayList<SanTargetModel> selectedItems = new ArrayList<SanTargetModel>();
for (SanTargetModel entity : (Iterable<SanTargetModel>) listModel.getItems()) {
if (!entity.getIsLoggedIn()) {
if (value) {
selectedItems.add(entity);
}
selectionModel.setSelected(entity, value);
}
}
listModel.setSelectedItems(selectedItems);
}
@Override
public Boolean getValue() {
ListModel listModel = targetsTable.asEditor().flush();
if (listModel == null || listModel.getItems() == null) {
return false;
}
return getCheckValue(listModel.getItems(), targetsTable.getSelectionModel());
}
};
CheckboxColumn<SanTargetModel> checkColumn = new CheckboxColumn<SanTargetModel>() {
@Override
protected boolean canEdit(SanTargetModel object) {
return !object.getIsLoggedIn();
}
@Override
public Boolean getValue(SanTargetModel object) {
return targetsTable.getSelectionModel().isSelected(object) || object.getIsLoggedIn();
}
};
targetsTable.addColumn(checkColumn, selectAllHeader, "25px"); //$NON-NLS-1$
}
private void createSotrageDomainsTable() {
storageDomainsTable = new ListModelObjectCellTable<StorageDomain, ListModel>(true, true);
storageDomainsTable.enableColumnResizing();
TextColumnWithTooltip<StorageDomain> nameColumn = new TextColumnWithTooltip<StorageDomain>() {
@Override
public String getValue(StorageDomain object) {
return object.getStorageName();
}
};
storageDomainsTable.addColumn(nameColumn, constants.storageName(), "50%"); //$NON-NLS-1$
TextColumnWithTooltip<StorageDomain> storageIdColumn = new TextColumnWithTooltip<StorageDomain>() {
@Override
public String getValue(StorageDomain object) {
return object.getId().toString();
}
};
storageDomainsTable.addColumn(storageIdColumn, constants.storageIdVgName(), "50%"); //$NON-NLS-1$
}
private void onIsValidPropertyChange(EntityModel model) {
if (model.getIsValid()) {
markAsValid();
} else {
markAsInvalid(model.getInvalidityReasons());
}
}
@Override
public void markAsValid() {
storageDomainsPanel.markAsValid();
}
@Override
public void markAsInvalid(List<String> validationHints) {
storageDomainsPanel.markAsInvalid(validationHints);
}
@Override
public boolean isValid() {
return storageDomainsPanel.isValid();
}
@Override
public boolean isSubViewFocused() {
return iscsiDiscoverTargetsView.isDiscoverPanelFocused();
}
@Override
public ImportIscsiStorageModel flush() {
return driver.flush();
}
@Override
public void focus() {
}
interface WidgetStyle extends CssResource {
String loginButton();
}
}
| apache-2.0 |
ParBLiSS/parconnect | ext/CombBLAS/ReleaseTests/SpAsgnTiming.cpp | 2631 | #include <mpi.h>
#include <sys/time.h>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <sstream>
#include "../CombBLAS.h"
using namespace std;
#define ITERATIONS 10
#define EDGEFACTOR 8
int main(int argc, char* argv[])
{
int nprocs, myrank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(argc < 2)
{
if(myrank == 0)
{
cout << "Usage: ./IndexingTiming <Scale>" << endl;
}
MPI_Finalize();
return -1;
}
{
typedef SpParMat <int, double, SpDCCols<int,double> > PARDBMAT;
PARDBMAT *A, *B; // declare objects
double initiator[4] = {.6, .4/3, .4/3, .4/3};
DistEdgeList<int64_t> * DEL = new DistEdgeList<int64_t>();
int scale = static_cast<unsigned>(atoi(argv[1]));
ostringstream outs, outs2, outs3;
outs << "Forcing scale to : " << scale << endl;
SpParHelper::Print(outs.str());
DEL->GenGraph500Data(initiator, scale, EDGEFACTOR, true, true ); // generate packed edges
SpParHelper::Print("Generated renamed edge lists\n");
// conversion from distributed edge list, keeps self-loops, sums duplicates
A = new PARDBMAT(*DEL, false); // already creates renumbered vertices (hence balanced)
delete DEL; // free memory before symmetricizing
SpParHelper::Print("Created double Sparse Matrix\n");
float balance = A->LoadImbalance();
outs2 << "Load balance: " << balance << endl;
SpParHelper::Print(outs2.str());
A->PrintInfo();
for(unsigned i=1; i<4; i++)
{
DEL = new DistEdgeList<int64_t>();
DEL->GenGraph500Data(initiator, scale-i, ((double) EDGEFACTOR) / pow(2.0,i) , true, true ); // "i" scale smaller
B = new PARDBMAT(*DEL, false);
delete DEL;
SpParHelper::Print("Created RHS Matrix\n");
B->PrintInfo();
FullyDistVec<int,int> perm; // get a different permutation
perm.iota(A->getnrow(), 0);
perm.RandPerm();
//void FullyDistVec::iota(IT globalsize, NT first)
FullyDistVec<int,int> sel;
sel.iota(B->getnrow(), 0);
perm = perm(sel); // just get the first B->getnrow() entries of the permutation
perm.PrintInfo("Index vector");
A->SpAsgn(perm,perm,*B); // overriding A with a structurally similar piece.
A->PrintInfo();
double t1 = MPI_Wtime();
for(int j=0; j< ITERATIONS; ++j)
{
A->SpAsgn(perm,perm,*B);
}
double t2 = MPI_Wtime();
if(myrank == 0)
{
cout<< "Scale " << scale-i << " assignment iterations finished"<<endl;
printf("%.6lf seconds elapsed per iteration\n", (t2-t1)/(double)ITERATIONS);
}
delete B;
}
}
MPI_Finalize();
return 0;
}
| apache-2.0 |
ckamm/gerrit | gerrit-httpd/src/main/java/com/google/gerrit/httpd/auth/container/HttpAuthFilter.java | 6541 | // Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.httpd.auth.container;
import static com.google.common.base.Objects.firstNonNull;
import static com.google.common.base.Strings.emptyToNull;
import static com.google.common.net.HttpHeaders.AUTHORIZATION;
import static com.google.gerrit.reviewdb.client.AccountExternalId.SCHEME_GERRIT;
import com.google.gerrit.httpd.HtmlDomUtil;
import com.google.gerrit.httpd.WebSession;
import com.google.gerrit.httpd.raw.HostPageServlet;
import com.google.gerrit.reviewdb.client.AccountExternalId;
import com.google.gerrit.server.config.AuthConfig;
import com.google.gwtexpui.server.CacheHeaders;
import com.google.gwtjsonrpc.server.RPCServletUtils;
import com.google.inject.Inject;
import com.google.inject.Provider;
import com.google.inject.Singleton;
import org.eclipse.jgit.util.Base64;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Watches request for the host page and requires login if not yet signed in.
* <p>
* If HTTP authentication has been enabled on this server this filter is bound
* in front of the {@link HostPageServlet} and redirects users who are not yet
* signed in to visit {@code /login/}, so the web container can force login.
* This redirect is performed with JavaScript, such that any existing anchor
* token in the URL can be rewritten and preserved through the authentication
* process of any enterprise single sign-on solutions.
*/
@Singleton
class HttpAuthFilter implements Filter {
private final Provider<WebSession> sessionProvider;
private final byte[] signInRaw;
private final byte[] signInGzip;
private final String loginHeader;
@Inject
HttpAuthFilter(final Provider<WebSession> webSession,
final AuthConfig authConfig) throws IOException {
this.sessionProvider = webSession;
final String pageName = "LoginRedirect.html";
final String doc = HtmlDomUtil.readFile(getClass(), pageName);
if (doc == null) {
throw new FileNotFoundException("No " + pageName + " in webapp");
}
signInRaw = doc.getBytes(HtmlDomUtil.ENC);
signInGzip = HtmlDomUtil.compress(signInRaw);
loginHeader = firstNonNull(
emptyToNull(authConfig.getLoginHttpHeader()),
AUTHORIZATION);
}
@Override
public void doFilter(final ServletRequest request,
final ServletResponse response, final FilterChain chain)
throws IOException, ServletException {
if (isSessionValid((HttpServletRequest) request)) {
chain.doFilter(request, response);
} else {
// Not signed in yet. Since the browser state might have an anchor
// token which we want to capture and carry through the auth process
// we send back JavaScript now to capture that, and do the real work
// of redirecting to the authentication area.
//
final HttpServletRequest req = (HttpServletRequest) request;
final HttpServletResponse rsp = (HttpServletResponse) response;
final byte[] tosend;
if (RPCServletUtils.acceptsGzipEncoding(req)) {
rsp.setHeader("Content-Encoding", "gzip");
tosend = signInGzip;
} else {
tosend = signInRaw;
}
CacheHeaders.setNotCacheable(rsp);
rsp.setContentType("text/html");
rsp.setCharacterEncoding(HtmlDomUtil.ENC);
rsp.setContentLength(tosend.length);
final OutputStream out = rsp.getOutputStream();
try {
out.write(tosend);
} finally {
out.close();
}
}
}
private boolean isSessionValid(HttpServletRequest req) {
WebSession session = sessionProvider.get();
if (session.isSignedIn()) {
String user = getRemoteUser(req);
return user == null || correctUser(user, session);
}
return false;
}
private static boolean correctUser(String user, WebSession session) {
AccountExternalId.Key id = session.getLastLoginExternalId();
return id != null
&& id.equals(new AccountExternalId.Key(SCHEME_GERRIT, user));
}
String getRemoteUser(HttpServletRequest req) {
if (AUTHORIZATION.equals(loginHeader)) {
String user = emptyToNull(req.getRemoteUser());
if (user != null) {
// The container performed the authentication, and has the user
// identity already decoded for us. Honor that as we have been
// configured to honor HTTP authentication.
return user;
}
// If the container didn't do the authentication we might
// have done it in the front-end web server. Try to split
// the identity out of the Authorization header and honor it.
//
String auth = emptyToNull(req.getHeader(AUTHORIZATION));
if (auth == null) {
return null;
} else if (auth.startsWith("Basic ")) {
auth = auth.substring("Basic ".length());
auth = new String(Base64.decode(auth));
final int c = auth.indexOf(':');
return c > 0 ? auth.substring(0, c) : null;
} else if (auth.startsWith("Digest ")) {
int u = auth.indexOf("username=\"");
if (u <= 0) {
return null;
}
auth = auth.substring(u + 10);
int e = auth.indexOf('"');
return e > 0 ? auth.substring(0, auth.indexOf('"')) : null;
} else {
return null;
}
} else {
// Nonstandard HTTP header. We have been told to trust this
// header blindly as-is.
//
return emptyToNull(req.getHeader(loginHeader));
}
}
String getLoginHeader() {
return loginHeader;
}
@Override
public void init(final FilterConfig filterConfig) {
}
@Override
public void destroy() {
}
}
| apache-2.0 |
gemxd/gemfirexd-oss | gemfirexd/tools/src/testing/java/org/apache/derbyTesting/functionTests/tests/store/Derby3625Test.java | 10912 | package org.apache.derbyTesting.functionTests.tests.store;
import org.apache.derbyTesting.junit.BaseJDBCTestCase;
import org.apache.derbyTesting.junit.CleanDatabaseTestSetup;
import org.apache.derbyTesting.junit.DatabasePropertyTestSetup;
import org.apache.derbyTesting.junit.TestConfiguration;
import com.pivotal.gemfirexd.internal.shared.common.sanity.SanityManager;
import junit.framework.Assert;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.sql.CallableStatement;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
import java.sql.SQLException;
/*
Class org.apache.derbyTesting.functionTests.tests.jdbc4.Derby3650Test
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to you under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
Test to reproduce DERBY-3625, failure in inline compress, in some
circumstances depending on exact size of data and state of pages during
the defragment phase.
Would throw following error:
ERROR XSDA3: Limitation: Record cannot be updated or inserted due to lack of
space on the page. Use the parameters gemfirexd.storage.pageSize and/or
gemfirexd.storage.pageReservedSpace to work around this limitation.^M
**/
public class Derby3625Test extends StoreBaseTest
{
/**************************************************************************
* Fields of the class
**************************************************************************
*/
/**************************************************************************
* Constructors for This class:
**************************************************************************
*/
/**************************************************************************
* Private/Protected methods of This class:
**************************************************************************
*/
/**************************************************************************
* Public Methods of This class:
**************************************************************************
*/
/**************************************************************************
* Public Methods of XXXX class:
**************************************************************************
*/
public Derby3625Test(String name)
{
super(name);
}
/**
* DERBY-3625 test case
* <p>
* Derby 3625 is caused by a bug where compress calculates the space
* needed to fit a moved row from page A to B, and assumes that the space
* required on page B is the same on page A. The problem is that in
* some cases due to the stored format of the changing record id the space
* required on B may be more than A. In the case where there is exactly
* enough space by the initial calculation the move fails because one or
* 3 more bytes may be necessary to make the move and the compress fails.
* <p>
* To test:
* fill page 1 with dummy rows, page 1 has a special control row on it
* so it can't ever be empty so use page 2 instead.
* fill page 2 with dummy rows such and empty it such that the
* next row id on it is greater that 64 which takes 2 bytes to store
* vs. 1 for rowid's less * that 64.
* fill page 3 and 4 with some dummy rows which will be deleted to give
* compress table room to work during defragment.
* fill page 4 with 2 rows which fit on page 2 with 1 byte stored record
* id's but will not fit with 2 byte stored record id's.
* These will not be deleted and the bug is exercised as
* defragment tries to move these rows to page 2 after it has
* been reclaimed as a free page.
**/
public void testTwoToOneByteCase()
throws SQLException
{
PreparedStatement insert_stmt =
prepareStatement("INSERT INTO testCompress VALUES(?, ?, ?)");
// page 0 - container info/bit map, does not affect test
// page 1 - fill it up and leave rows on it. page 1 has a special
// row on it that can never be deleted so this page never can be
// made free.
// insert one blob padded row that will fill page 1
byte[] pad_blob = new byte[32630];
insert_stmt.setInt( 1, 1);
insert_stmt.setBytes( 2, pad_blob);
insert_stmt.setString( 3, "page 1");
insert_stmt.executeUpdate();
// page 2 - fill it completely with enough rows such that future
// rows will force a 2 byte row id, ie. more than 64 rows. Later
// in this test all the rows will be deleted from this page so that
// the page is on the free list for compress defragment to use it.
pad_blob = new byte[302];
insert_stmt.setInt( 1, 2);
insert_stmt.setBytes( 2, pad_blob);
insert_stmt.setString( 3, "page 2");
for (int i = 0; i < 98; i++)
{
insert_stmt.executeUpdate();
}
// page 3 - fill it for another free page.
insert_stmt.setInt( 1, 3);
insert_stmt.setBytes( 2, pad_blob);
insert_stmt.setString( 3, "page 3");
for (int i = 0; i < 98; i++)
{
insert_stmt.executeUpdate();
}
// page 4 - 2 rows, with one byte free. When these are moved to
// a free page with bigger rowid's they will take 2 more bytes and
// will not both fit on the page.
//
// I didn't track it down, but for some reason I could not fill a page
// completely if there was only one row on the page, it kept turning
// the blob column into a long row. I was just picking magic numbers
// for the blob column to make it fit.
//
// With 2 rows I was able to fill the page up to one empty byte.
// Then with the bug the first row would move to page 2 which is
// now free but take one more byte than it did on this page. And
// finally when the second row was moved it would think it would fit
// but throw an exception when the rowid compressed version would
// cause it to be one byte bigger than the original row.
pad_blob = new byte[100];
insert_stmt.setInt( 1, 4);
insert_stmt.setBytes( 2, pad_blob);
insert_stmt.setString( 3, "page 4");
insert_stmt.executeUpdate();
pad_blob = new byte[32534];
insert_stmt.setInt( 1, 4);
insert_stmt.setBytes( 2, pad_blob);
insert_stmt.setString( 3, "page 4");
insert_stmt.executeUpdate();
commit();
int space_info[] = getSpaceInfo("APP", "TESTCOMPRESS", true);
// space after initial insert setup should be 4 pages
// 0 - container info - not reflected in allocated page count,
// 1 - dummy data left on the page,
// 2 - bunch of short records to be deleted to make free page
// 3 - bunch of short records to be deleted to make free page
// 4 - short and long record to exercise bug.
Assert.assertEquals(
"wrong allocated page count in test setup",
4, space_info[SPACE_INFO_NUM_ALLOC]);
Statement stmt = createStatement();
// Delete rows on page 2 and 3 to allow defragment to try and move
// the page 4 row up.
stmt.executeUpdate("DELETE FROM testCompress where id = 2 or id = 3");
commit();
// Before fixing the bug, this compress call would throw the
// following exception:
//
// ERROR XSDA3: Limitation: Record cannot be updated or inserted due
// to lack of space on the page. Use the parameters
// gemfirexd.storage.pageSize and/or gemfirexd.storage.pageReservedSpace to
// work around this limitation.
CallableStatement call_compress =
prepareCall(
"CALL SYSCS_UTIL.INPLACE_COMPRESS_TABLE(?, ?, 1, 1, 1)");
call_compress.setString(1, "APP");
call_compress.setString(2, "TESTCOMPRESS");
call_compress.executeUpdate();
commit();
space_info = getSpaceInfo("APP", "TESTCOMPRESS", true);
// space after the test should be 3 pages:
// 0 - container info - not reflected in allocated page count,
// 1 - dummy data left on the page,
// 2 - one short record, but long record did not fit
// 3 - long record on an empty page.
Assert.assertEquals(
"wrong allocated page count", 3, space_info[SPACE_INFO_NUM_ALLOC]);
insert_stmt.close();
}
protected static Test baseSuite(String name)
{
TestSuite suite = new TestSuite(name);
suite.addTestSuite(Derby3625Test.class);
return new CleanDatabaseTestSetup(
DatabasePropertyTestSetup.setLockTimeouts(suite, 2, 4))
{
/**
* Creates the tables used in the test cases.
* @exception SQLException if a database error occurs
*/
protected void decorateSQL(Statement stmt) throws SQLException
{
Connection conn = stmt.getConnection();
CallableStatement set_dbprop = conn.prepareCall(
"CALL SYSCS_UTIL.SET_DATABASE_PROPERTY(?, ?)");
set_dbprop.setString(1,"gemfirexd.storage.pageReservedSpace");
set_dbprop.setString(2,"0");
set_dbprop.executeUpdate();
// create a table, with blob it will be 32k page size
stmt.executeUpdate(
"CREATE TABLE testCompress " +
"(id int, padcol blob(1M), c varchar(200))");
set_dbprop.setString(2, null);
set_dbprop.executeUpdate();
set_dbprop.close();
conn.setAutoCommit(false);
}
};
}
public static Test suite()
{
TestSuite suite = new TestSuite("Derby3625Test");
suite.addTest(baseSuite("Derby36625Test:embedded"));
return suite;
}
}
| apache-2.0 |
mbebenita/shumway.ts | tests/baselines/reference/aliasUsageInFunctionExpression.js | 983 | //// [aliasUsageInFunctionExpression_backbone.js]
var Model = (function () {
function Model() {
}
return Model;
})();
exports.Model = Model;
//// [aliasUsageInFunctionExpression_moduleA.js]
var __extends = this.__extends || function (d, b) {
for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p];
function __() { this.constructor = d; }
__.prototype = b.prototype;
d.prototype = new __();
};
var Backbone = require("aliasUsageInFunctionExpression_backbone");
var VisualizationModel = (function (_super) {
__extends(VisualizationModel, _super);
function VisualizationModel() {
_super.apply(this, arguments);
}
return VisualizationModel;
})(Backbone.Model);
exports.VisualizationModel = VisualizationModel;
//// [aliasUsageInFunctionExpression_main.js]
var moduleA = require("aliasUsageInFunctionExpression_moduleA");
var f = function (x) {
return x;
};
f = function (x) {
return moduleA;
};
| apache-2.0 |
li-ang/kubernetes | staging/src/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go | 14143 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"context"
"strconv"
"strings"
"sync"
"time"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
basemetricstestutil "k8s.io/component-base/metrics/testutil"
"k8s.io/utils/clock"
)
const (
namespace = "apiserver"
subsystem = "flowcontrol"
)
const (
requestKind = "request_kind"
priorityLevel = "priority_level"
flowSchema = "flow_schema"
phase = "phase"
mark = "mark"
)
var (
queueLengthBuckets = []float64{0, 10, 25, 50, 100, 250, 500, 1000}
requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30}
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
for _, metric := range metrics {
legacyregistry.MustRegister(metric)
}
})
}
type resettable interface {
Reset()
}
// Reset all metrics to zero
func Reset() {
for _, metric := range metrics {
rm := metric.(resettable)
rm.Reset()
}
}
// GatherAndCompare the given metrics with the given Prometheus syntax expected value
func GatherAndCompare(expected string, metricNames ...string) error {
return basemetricstestutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expected), metricNames...)
}
// Registerables is a slice of Registerable
type Registerables []compbasemetrics.Registerable
// Append adds more
func (rs Registerables) Append(more ...compbasemetrics.Registerable) Registerables {
return append(rs, more...)
}
var (
apiserverRejectedRequestsTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "rejected_requests_total",
Help: "Number of requests rejected by API Priority and Fairness system",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema, "reason"},
)
apiserverDispatchedRequestsTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dispatched_requests_total",
Help: "Number of requests released by API Priority and Fairness system for service",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
// PriorityLevelConcurrencyObserverPairGenerator creates pairs that observe concurrency for priority levels
PriorityLevelConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond,
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "priority_level_request_count_samples",
Help: "Periodic observations of the number of requests",
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
StabilityLevel: compbasemetrics.ALPHA,
},
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "priority_level_request_count_watermarks",
Help: "Watermarks of the number of requests",
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel},
)
// ReadWriteConcurrencyObserverPairGenerator creates pairs that observe concurrency broken down by mutating vs readonly
ReadWriteConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond,
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_vs_write_request_count_samples",
Help: "Periodic observations of the number of requests",
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
StabilityLevel: compbasemetrics.ALPHA,
},
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_vs_write_request_count_watermarks",
Help: "Watermarks of the number of requests",
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{requestKind},
)
apiserverCurrentR = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "current_r",
Help: "R(time of last change)",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel},
)
apiserverDispatchR = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dispatch_r",
Help: "R(time of last dispatch)",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel},
)
apiserverLatestS = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "latest_s",
Help: "S(most recently dispatched request)",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel},
)
apiserverNextSBounds = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "next_s_bounds",
Help: "min and max, over queues, of S(oldest waiting request in queue)",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, "bound"},
)
apiserverNextDiscountedSBounds = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "next_discounted_s_bounds",
Help: "min and max, over queues, of S(oldest waiting request in queue) - estimated work in progress",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, "bound"},
)
apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "current_inqueue_requests",
Help: "Number of requests currently pending in queues of the API Priority and Fairness system",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
apiserverRequestQueueLength = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_queue_length_after_enqueue",
Help: "Length of queue in the API Priority and Fairness system, as seen by each request after it is enqueued",
Buckets: queueLengthBuckets,
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
apiserverRequestConcurrencyLimit = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_concurrency_limit",
Help: "Shared concurrency limit in the API Priority and Fairness system",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel},
)
apiserverCurrentExecutingRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "current_executing_requests",
Help: "Number of requests in regular execution phase in the API Priority and Fairness system",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
apiserverRequestConcurrencyInUse = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_concurrency_in_use",
Help: "Concurrency (number of seats) occupided by the currently executing (all phases count) requests in the API Priority and Fairness system",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
apiserverRequestWaitingSeconds = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_wait_duration_seconds",
Help: "Length of time a request spent waiting in its queue",
Buckets: requestDurationSecondsBuckets,
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema, "execute"},
)
apiserverRequestExecutionSeconds = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_execution_seconds",
Help: "Duration of regular phase of request execution in the API Priority and Fairness system",
Buckets: requestDurationSecondsBuckets,
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, flowSchema},
)
apiserverEpochAdvances = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "epoch_advance_total",
Help: "Number of times the queueset's progress meter jumped backward",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{priorityLevel, "success"},
)
metrics = Registerables{
apiserverRejectedRequestsTotal,
apiserverDispatchedRequestsTotal,
apiserverCurrentR,
apiserverDispatchR,
apiserverLatestS,
apiserverNextSBounds,
apiserverNextDiscountedSBounds,
apiserverCurrentInqueueRequests,
apiserverRequestQueueLength,
apiserverRequestConcurrencyLimit,
apiserverRequestConcurrencyInUse,
apiserverCurrentExecutingRequests,
apiserverRequestWaitingSeconds,
apiserverRequestExecutionSeconds,
apiserverEpochAdvances,
}.
Append(PriorityLevelConcurrencyObserverPairGenerator.metrics()...).
Append(ReadWriteConcurrencyObserverPairGenerator.metrics()...)
)
// AddRequestsInQueues adds the given delta to the gauge of the # of requests in the queues of the specified flowSchema and priorityLevel
func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) {
apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
}
// AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel
func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) {
apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
}
// SetCurrentR sets the current-R (virtualTime) gauge for the given priority level
func SetCurrentR(priorityLevel string, r float64) {
apiserverCurrentR.WithLabelValues(priorityLevel).Set(r)
}
// SetLatestS sets the latest-S (virtual time of dispatched request) gauge for the given priority level
func SetDispatchMetrics(priorityLevel string, r, s, sMin, sMax, discountedSMin, discountedSMax float64) {
apiserverDispatchR.WithLabelValues(priorityLevel).Set(r)
apiserverLatestS.WithLabelValues(priorityLevel).Set(s)
apiserverNextSBounds.WithLabelValues(priorityLevel, "min").Set(sMin)
apiserverNextSBounds.WithLabelValues(priorityLevel, "max").Set(sMax)
apiserverNextDiscountedSBounds.WithLabelValues(priorityLevel, "min").Set(discountedSMin)
apiserverNextDiscountedSBounds.WithLabelValues(priorityLevel, "max").Set(discountedSMax)
}
// AddRequestConcurrencyInUse adds the given delta to the gauge of concurrency in use by
// the currently executing requests of the given flowSchema and priorityLevel
func AddRequestConcurrencyInUse(priorityLevel, flowSchema string, delta int) {
apiserverRequestConcurrencyInUse.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
}
// UpdateSharedConcurrencyLimit updates the value for the concurrency limit in flow control
func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) {
apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(limit))
}
// AddReject increments the # of rejected requests for flow control
func AddReject(ctx context.Context, priorityLevel, flowSchema, reason string) {
apiserverRejectedRequestsTotal.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema, reason).Add(1)
}
// AddDispatch increments the # of dispatched requests for flow control
func AddDispatch(ctx context.Context, priorityLevel, flowSchema string) {
apiserverDispatchedRequestsTotal.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema).Add(1)
}
// ObserveQueueLength observes the queue length for flow control
func ObserveQueueLength(ctx context.Context, priorityLevel, flowSchema string, length int) {
apiserverRequestQueueLength.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema).Observe(float64(length))
}
// ObserveWaitingDuration observes the queue length for flow control
func ObserveWaitingDuration(ctx context.Context, priorityLevel, flowSchema, execute string, waitTime time.Duration) {
apiserverRequestWaitingSeconds.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema, execute).Observe(waitTime.Seconds())
}
// ObserveExecutionDuration observes the execution duration for flow control
func ObserveExecutionDuration(ctx context.Context, priorityLevel, flowSchema string, executionTime time.Duration) {
apiserverRequestExecutionSeconds.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema).Observe(executionTime.Seconds())
}
func AddEpochAdvance(ctx context.Context, priorityLevel string, success bool) {
apiserverEpochAdvances.WithContext(ctx).WithLabelValues(priorityLevel, strconv.FormatBool(success)).Inc()
}
| apache-2.0 |
AdityaMili95/Wallte | vendor/github.com/knq/sysutil/sysutil_linux.go | 536 | // +build linux
package sysutil
import (
"bytes"
"io/ioutil"
"strconv"
"time"
)
var (
btimePrefix = []byte("btime ")
lineEnd = []byte("\n")
)
func init() {
buf, err := ioutil.ReadFile("/proc/stat")
if err != nil {
btime = time.Now()
return
}
for _, line := range bytes.SplitN(buf, lineEnd, -1) {
if bytes.HasPrefix(line, btimePrefix) {
t, err := strconv.ParseInt(string(line[len(btimePrefix):]), 10, 64)
if err != nil {
btime = time.Now()
return
}
btime = time.Unix(t, 0)
break
}
}
}
| apache-2.0 |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/brain/brain_collections.py | 1406 | # Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import astroid
def _collections_transform():
return astroid.parse('''
class defaultdict(dict):
default_factory = None
def __missing__(self, key): pass
def __getitem__(self, key): return default_factory
class deque(object):
maxlen = 0
def __init__(self, iterable=None, maxlen=None):
self.iterable = iterable
def append(self, x): pass
def appendleft(self, x): pass
def clear(self): pass
def count(self, x): return 0
def extend(self, iterable): pass
def extendleft(self, iterable): pass
def pop(self): pass
def popleft(self): pass
def remove(self, value): pass
def reverse(self): pass
def rotate(self, n): pass
def __iter__(self): return self
def __reversed__(self): return self.iterable[::-1]
def __getitem__(self, index): pass
def __setitem__(self, index, value): pass
def __delitem__(self, index): pass
def OrderedDict(dict):
def __reversed__(self): return self[::-1]
''')
astroid.register_module_extender(astroid.MANAGER, 'collections', _collections_transform)
| apache-2.0 |
peridotperiod/isis | mothballed/component/viewer/dnd/impl/src/main/java/org/apache/isis/viewer/dnd/DndViewer.java | 20039 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.viewer.dnd;
import java.awt.Dimension;
import java.util.StringTokenizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.isis.core.commons.authentication.AuthenticationSession;
import org.apache.isis.core.commons.config.IsisConfigurationException;
import org.apache.isis.core.commons.exceptions.IsisException;
import org.apache.isis.core.commons.factory.InstanceCreationException;
import org.apache.isis.core.commons.factory.InstanceUtil;
import org.apache.isis.core.runtime.authentication.AuthenticationRequest;
import org.apache.isis.core.runtime.authentication.exploration.AuthenticationRequestExploration;
import org.apache.isis.core.runtime.fixtures.authentication.AuthenticationRequestLogonFixture;
import org.apache.isis.core.runtime.system.context.IsisContext;
import org.apache.isis.core.runtime.userprofile.UserProfile;
import org.apache.isis.core.runtime.viewer.IsisViewerAbstract;
import org.apache.isis.viewer.dnd.awt.AwtImageFactory;
import org.apache.isis.viewer.dnd.awt.AwtToolkit;
import org.apache.isis.viewer.dnd.awt.LoginDialog;
import org.apache.isis.viewer.dnd.awt.ViewerFrame;
import org.apache.isis.viewer.dnd.awt.XViewer;
import org.apache.isis.viewer.dnd.calendar.CalendarSpecification;
import org.apache.isis.viewer.dnd.combined.ExpandableListSpecification;
import org.apache.isis.viewer.dnd.combined.FormWithTableSpecification;
import org.apache.isis.viewer.dnd.combined.TwoPartViewSpecification;
import org.apache.isis.viewer.dnd.configurable.ConfigurableObjectViewSpecification;
import org.apache.isis.viewer.dnd.configurable.GridListSpecification;
import org.apache.isis.viewer.dnd.configurable.NewViewSpecification;
import org.apache.isis.viewer.dnd.configurable.PanelViewSpecification;
import org.apache.isis.viewer.dnd.drawing.Bounds;
import org.apache.isis.viewer.dnd.drawing.Location;
import org.apache.isis.viewer.dnd.drawing.Size;
import org.apache.isis.viewer.dnd.field.CheckboxField;
import org.apache.isis.viewer.dnd.field.ColorField;
import org.apache.isis.viewer.dnd.field.DateFieldSpecification;
import org.apache.isis.viewer.dnd.field.EmptyField;
import org.apache.isis.viewer.dnd.field.FieldOfSpecification;
import org.apache.isis.viewer.dnd.field.ImageField;
import org.apache.isis.viewer.dnd.field.PasswordFieldSpecification;
import org.apache.isis.viewer.dnd.field.TextFieldSpecification;
import org.apache.isis.viewer.dnd.form.ExpandableFormSpecification;
import org.apache.isis.viewer.dnd.form.FormSpecification;
import org.apache.isis.viewer.dnd.form.FormWithDetailSpecification;
import org.apache.isis.viewer.dnd.form.InternalFormSpecification;
import org.apache.isis.viewer.dnd.form.SummaryFormSpecification;
import org.apache.isis.viewer.dnd.grid.GridSpecification;
import org.apache.isis.viewer.dnd.help.HelpViewer;
import org.apache.isis.viewer.dnd.help.InternalHelpViewer;
import org.apache.isis.viewer.dnd.histogram.HistogramSpecification;
import org.apache.isis.viewer.dnd.icon.LargeIconSpecification;
import org.apache.isis.viewer.dnd.icon.RootIconSpecification;
import org.apache.isis.viewer.dnd.icon.SubviewIconSpecification;
import org.apache.isis.viewer.dnd.list.InternalListSpecification;
import org.apache.isis.viewer.dnd.list.SimpleListSpecification;
import org.apache.isis.viewer.dnd.service.PerspectiveContent;
import org.apache.isis.viewer.dnd.service.ServiceIconSpecification;
import org.apache.isis.viewer.dnd.table.WindowTableSpecification;
import org.apache.isis.viewer.dnd.tree.ListWithDetailSpecification;
import org.apache.isis.viewer.dnd.tree.TreeSpecification;
import org.apache.isis.viewer.dnd.tree.TreeWithDetailSpecification;
import org.apache.isis.viewer.dnd.tree2.CollectionTreeNodeSpecification;
import org.apache.isis.viewer.dnd.tree2.TreeNodeSpecification;
import org.apache.isis.viewer.dnd.util.Properties;
import org.apache.isis.viewer.dnd.view.Axes;
import org.apache.isis.viewer.dnd.view.ShutdownListener;
import org.apache.isis.viewer.dnd.view.Toolkit;
import org.apache.isis.viewer.dnd.view.View;
import org.apache.isis.viewer.dnd.view.ViewRequirement;
import org.apache.isis.viewer.dnd.view.ViewSpecification;
import org.apache.isis.viewer.dnd.view.ViewUpdateNotifier;
import org.apache.isis.viewer.dnd.view.base.ViewUpdateNotifierImpl;
import org.apache.isis.viewer.dnd.view.message.DetailedMessageViewSpecification;
import org.apache.isis.viewer.dnd.view.message.MessageDialogSpecification;
import org.apache.isis.viewer.dnd.viewer.SkylarkViewFactory;
import org.apache.isis.viewer.dnd.viewer.basic.DragContentSpecification;
import org.apache.isis.viewer.dnd.viewer.basic.InnerWorkspaceSpecification;
import org.apache.isis.viewer.dnd.viewer.basic.RootWorkspaceSpecification;
import org.apache.isis.viewer.dnd.viewer.basic.WrappedTextFieldSpecification;
public class DndViewer extends IsisViewerAbstract {
private static final Logger LOG = LoggerFactory.getLogger(DndViewer.class);
private static final String SPECIFICATION_BASE = Properties.PROPERTY_BASE + "specification.";
private ViewUpdateNotifier updateNotifier;
private ViewerFrame frame;
private XViewer viewer;
private ShutdownListener shutdownListener;
private Bounds bounds;
private HelpViewer helpViewer;
private boolean acceptingLogIns = true;
// ////////////////////////////////////
// shutdown
// ////////////////////////////////////
@Override
public void shutdown() {
System.exit(0);
}
private Bounds calculateInitialWindowSize(final Dimension screenSize) {
int maxWidth = screenSize.width;
final int maxHeight = screenSize.height;
if ((screenSize.width / screenSize.height) >= 2) {
final int f = screenSize.width / screenSize.height;
maxWidth = screenSize.width / f;
}
final int width = maxWidth - 80;
final int height = maxHeight - 80;
final int x = 100;
final int y = 100;
final Size defaultWindowSize = new Size(width, height);
defaultWindowSize.limitWidth(800);
defaultWindowSize.limitHeight(600);
final Size size = Properties.getSize(Properties.PROPERTY_BASE + "initial.size", defaultWindowSize);
final Location location = Properties.getLocation(Properties.PROPERTY_BASE + "initial.location", new Location(x, y));
return new Bounds(location, size);
}
private ViewSpecification loadSpecification(final String name, final Class<?> cls) {
final String factoryName = IsisContext.getConfiguration().getString(SPECIFICATION_BASE + name);
ViewSpecification spec;
if (factoryName != null) {
spec = InstanceUtil.createInstance(factoryName, ViewSpecification.class);
} else {
spec = InstanceUtil.createInstance(cls.getName(), ViewSpecification.class);
}
return spec;
}
private synchronized void logOut() {
LOG.info("user log out");
saveDesktop();
final AuthenticationSession session = IsisContext.getAuthenticationSession();
getAuthenticationManager().closeSession(session);
viewer.close();
notify();
}
private void saveDesktop() {
if (!IsisContext.inSession()) {
// can't do anything
return;
}
viewer.saveOpenObjects();
}
protected void quit() {
LOG.info("user quit");
saveDesktop();
acceptingLogIns = false;
shutdown();
}
@Override
public synchronized void init() {
super.init();
new AwtImageFactory(IsisContext.getTemplateImageLoader());
new AwtToolkit();
setShutdownListener(new ShutdownListener() {
@Override
public void logOut() {
DndViewer.this.logOut();
}
@Override
public void quit() {
DndViewer.this.quit();
}
});
updateNotifier = new ViewUpdateNotifierImpl();
if (updateNotifier == null) {
throw new NullPointerException("No update notifier set for " + this);
}
if (shutdownListener == null) {
throw new NullPointerException("No shutdown listener set for " + this);
}
while (acceptingLogIns) {
if (login()) {
openViewer();
try {
wait();
} catch (final InterruptedException e) {
}
} else {
quit();
}
}
}
// ////////////////////////////////////
// login
// ////////////////////////////////////
// TODO: nasty
private boolean loggedInUsingLogonFixture = false;
/**
* TODO: there is similar code in
* <tt>AuthenticationSessionLookupStrategyDefault</tt>; should try to
* combine somehow...
*/
private boolean login() {
final AuthenticationRequest request = determineRequestIfPossible();
// we may have enough to get a session
AuthenticationSession session = getAuthenticationManager().authenticate(request);
clearAuthenticationRequestViaArgs();
if (session == null) {
session = loginDialogPrompt(request);
}
if (session == null) {
return false;
} else {
IsisContext.openSession(session);
return true;
}
}
private AuthenticationSession loginDialogPrompt(final AuthenticationRequest request) {
AuthenticationSession session;
final LoginDialog dialog = new LoginDialog(getAuthenticationManager());
if (request != null) {
dialog.setUserName(request.getName());
}
dialog.setVisible(true);
dialog.toFront();
dialog.login();
dialog.setVisible(false);
dialog.dispose();
session = dialog.getSession();
return session;
}
private AuthenticationRequest determineRequestIfPossible() {
// command line args
AuthenticationRequest request = getAuthenticationRequestViaArgs();
;
// exploration & (optionally) logon fixture provided
if (request == null) {
if (getDeploymentType().isExploring()) {
request = new AuthenticationRequestExploration(getLogonFixture());
}
}
// logon fixture provided
if (request == null) {
if (getLogonFixture() != null && !loggedInUsingLogonFixture) {
loggedInUsingLogonFixture = true;
request = new AuthenticationRequestLogonFixture(getLogonFixture());
}
}
return request;
}
private void openViewer() {
frame = new ViewerFrame();
if (bounds == null) {
bounds = calculateInitialWindowSize(frame.getToolkit().getScreenSize());
}
frame.pack(); // forces insets to be calculated, hence need to then set
// bounds
frame.setBounds(bounds.getX(), bounds.getY(), bounds.getWidth(), bounds.getHeight());
viewer = (XViewer) Toolkit.getViewer();
viewer.setRenderingArea(frame);
viewer.setUpdateNotifier(updateNotifier);
viewer.setListener(shutdownListener);
viewer.setExploration(isInExplorationMode());
viewer.setPrototype(isInPrototypeMode());
if (helpViewer == null) {
helpViewer = new InternalHelpViewer(viewer);
}
viewer.setHelpViewer(helpViewer);
frame.setViewer(viewer);
final AuthenticationSession currentSession = IsisContext.getAuthenticationSession();
if (currentSession == null) {
throw new NullPointerException("No session for " + this);
}
setupViewFactory();
final UserProfile userProfiler = IsisContext.getUserProfile();
// TODO viewer should be shown during init() (so login can take place on
// main window, and can quit
// before
// logging in), and should be updated during start to show context.
// TODO resolving should be done by the views?
// resolveApplicationContextCollection(rootObject, "services");
// resolveApplicationContextCollection(rootObject, "objects");
final RootWorkspaceSpecification spec = new RootWorkspaceSpecification();
final PerspectiveContent content = new PerspectiveContent(userProfiler.getPerspective());
if (spec.canDisplay(new ViewRequirement(content, ViewRequirement.CLOSED))) {
// View view = spec.createView(new RootObject(rootObject), null);
final View view = spec.createView(content, new Axes(), -1);
viewer.setRootView(view);
} else {
throw new IsisException();
}
viewer.init();
final String name = userProfiler.getPerspective().getName();
frame.setTitle(name);
frame.init();
viewer.initSize();
viewer.scheduleRepaint();
frame.setVisible(true);
frame.toFront();
}
private boolean isInExplorationMode() {
return getDeploymentType().isExploring();
}
private boolean isInPrototypeMode() {
return getDeploymentType().isPrototyping();
}
public void setBounds(final Bounds bounds) {
this.bounds = bounds;
}
public void setHelpViewer(final HelpViewer helpViewer) {
this.helpViewer = helpViewer;
}
public void setShutdownListener(final ShutdownListener shutdownListener) {
this.shutdownListener = shutdownListener;
}
private void setupViewFactory() throws IsisConfigurationException, InstanceCreationException {
final SkylarkViewFactory viewFactory = (SkylarkViewFactory) Toolkit.getViewFactory();
LOG.debug("setting up default views (provided by the framework)");
/*
* viewFactory.addValueFieldSpecification(loadSpecification("field.option"
* , OptionSelectionField.Specification.class));
* viewFactory.addValueFieldSpecification
* (loadSpecification("field.percentage",
* PercentageBarField.Specification.class));
* viewFactory.addValueFieldSpecification
* (loadSpecification("field.timeperiod",
* TimePeriodBarField.Specification.class));
*/
viewFactory.addSpecification(loadSpecification("field.image", ImageField.Specification.class));
viewFactory.addSpecification(loadSpecification("field.color", ColorField.Specification.class));
viewFactory.addSpecification(loadSpecification("field.password", PasswordFieldSpecification.class));
viewFactory.addSpecification(loadSpecification("field.wrappedtext", WrappedTextFieldSpecification.class));
viewFactory.addSpecification(loadSpecification("field.checkbox", CheckboxField.Specification.class));
viewFactory.addSpecification(loadSpecification("field.date", DateFieldSpecification.class));
viewFactory.addSpecification(loadSpecification("field.text", TextFieldSpecification.class));
viewFactory.addSpecification(new RootWorkspaceSpecification());
viewFactory.addSpecification(new InnerWorkspaceSpecification());
if (IsisContext.getConfiguration().getBoolean(SPECIFICATION_BASE + "defaults", true)) {
viewFactory.addSpecification(new FieldOfSpecification());
viewFactory.addSpecification(new InternalListSpecification());
viewFactory.addSpecification(new SimpleListSpecification());
viewFactory.addSpecification(new GridSpecification());
// TBA viewFactory.addSpecification(new
// ListWithExpandableElementsSpecification());
// TBA
viewFactory.addSpecification(new CalendarSpecification());
viewFactory.addSpecification(new ListWithDetailSpecification());
viewFactory.addSpecification(new HistogramSpecification());
viewFactory.addSpecification(new TreeWithDetailSpecification());
viewFactory.addSpecification(new FormSpecification());
viewFactory.addSpecification(new FormWithTableSpecification());
viewFactory.addSpecification(new WindowTableSpecification());
// TBA
viewFactory.addSpecification(new ExpandableFormSpecification());
viewFactory.addSpecification(new InternalFormSpecification());
viewFactory.addSpecification(new TwoPartViewSpecification());
// TBA
viewFactory.addSpecification(new FormWithDetailSpecification());
viewFactory.addSpecification(new SummaryFormSpecification());
viewFactory.addSpecification(new TreeSpecification());
// TODO allow window form to be used for objects with limited number
// of collections
// viewFactory.addSpecification(new TreeWithDetailSpecification(0,
// 3));
viewFactory.addDesignSpecification(new GridListSpecification());
viewFactory.addDesignSpecification(new ConfigurableObjectViewSpecification());
viewFactory.addDesignSpecification(new PanelViewSpecification());
viewFactory.addDesignSpecification(new NewViewSpecification());
}
viewFactory.addSpecification(new MessageDialogSpecification());
viewFactory.addSpecification(new DetailedMessageViewSpecification());
viewFactory.addEmptyFieldSpecification(loadSpecification("field.empty", EmptyField.Specification.class));
viewFactory.addSpecification(loadSpecification("icon.object", RootIconSpecification.class));
viewFactory.addSpecification(loadSpecification("icon.subview", SubviewIconSpecification.class));
viewFactory.addSpecification(loadSpecification("icon.collection", ExpandableListSpecification.class));
viewFactory.addSpecification(new LargeIconSpecification());
viewFactory.addSpecification(loadSpecification("icon.service", ServiceIconSpecification.class));
viewFactory.setDragContentSpecification(loadSpecification("drag-content", DragContentSpecification.class));
// TODO remove or move to better position
final ViewSpecification[] specifications = CollectionTreeNodeSpecification.create();
viewFactory.addSpecification(specifications[0]);
viewFactory.addSpecification(specifications[1]);
viewFactory.addSpecification(new TreeNodeSpecification());
installSpecsFromConfiguration(viewFactory);
viewFactory.loadUserViewSpecifications();
}
private void installSpecsFromConfiguration(final SkylarkViewFactory viewFactory) {
final String viewParams = IsisContext.getConfiguration().getString(SPECIFICATION_BASE + "view");
if (viewParams != null) {
final StringTokenizer st = new StringTokenizer(viewParams, ",");
while (st.hasMoreTokens()) {
final String specName = st.nextToken().trim();
if (specName != null && !specName.trim().equals("")) {
viewFactory.addSpecification(specName);
}
}
}
}
}
| apache-2.0 |
tomaszrogalski/gwt-jackson | gwt-jackson/src/test/java/com/github/nmorel/gwtjackson/jackson/annotations/JsonPropertyOrderJacksonTest.java | 2657 | /*
* Copyright 2013 Nicolas Morel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nmorel.gwtjackson.jackson.annotations;
import com.github.nmorel.gwtjackson.jackson.AbstractJacksonTest;
import com.github.nmorel.gwtjackson.shared.annotations.JsonPropertyOrderTester;
import org.junit.Ignore;
import org.junit.Test;
/**
* @author Nicolas Morel
*/
public class JsonPropertyOrderJacksonTest extends AbstractJacksonTest {
@Test
@Ignore("jackson has a different natural order")
public void testSerializeBeanWithPropertiesNotOrdered() {
JsonPropertyOrderTester.INSTANCE
.testSerializeBeanWithPropertiesNotOrdered( createWriter( JsonPropertyOrderTester.BeanWithPropertiesNotOrdered.class ) );
}
@Test
public void testSerializeBeanWithDefinedOrder() {
JsonPropertyOrderTester.INSTANCE
.testSerializeBeanWithDefinedOrder( createWriter( JsonPropertyOrderTester.BeanWithDefinedOrder.class ) );
}
@Test
public void testSerializeBeanWithSomeDefinedOrder() {
JsonPropertyOrderTester.INSTANCE
.testSerializeBeanWithSomeDefinedOrder( createWriter( JsonPropertyOrderTester.BeanWithSomeDefinedOrder.class ) );
}
@Test
public void testSerializeBeanWithAlphabeticOrder() {
JsonPropertyOrderTester.INSTANCE
.testSerializeBeanWithAlphabeticOrder( createWriter( JsonPropertyOrderTester.BeanWithAlphabeticOrder.class ) );
}
@Test
public void testSerializeBeanWithSomeDefinedAndRestAlphabeticOrder() {
JsonPropertyOrderTester.INSTANCE
.testSerializeBeanWithSomeDefinedAndRestAlphabeticOrder( createWriter( JsonPropertyOrderTester
.BeanWithSomeDefinedAndRestAlphabeticOrder.class ) );
}
@Test
@Ignore("jackson doesn't support it yet")
public void testDeserializeBeanWithMissingRequiredProperties() {
JsonPropertyOrderTester.INSTANCE
.testDeserializeBeanWithMissingRequiredProperties( createReader( JsonPropertyOrderTester.BeanWithPropertiesNotOrdered
.class ) );
}
}
| apache-2.0 |
ocono-tech/presto | presto-main/src/main/java/com/facebook/presto/type/DecimalParametricType.java | 1752 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.type;
import com.facebook.presto.spi.type.DecimalType;
import com.facebook.presto.spi.type.ParametricType;
import com.facebook.presto.spi.type.StandardTypes;
import com.facebook.presto.spi.type.Type;
import com.facebook.presto.spi.type.TypeParameter;
import java.util.List;
public class DecimalParametricType
implements ParametricType
{
public static final DecimalParametricType DECIMAL = new DecimalParametricType();
@Override
public String getName()
{
return StandardTypes.DECIMAL;
}
@Override
public Type createType(List<TypeParameter> parameters)
{
switch (parameters.size()) {
case 0:
return DecimalType.createDecimalType();
case 1:
return DecimalType.createDecimalType(parameters.get(0).getLongLiteral().intValue());
case 2:
return DecimalType.createDecimalType(parameters.get(0).getLongLiteral().intValue(), parameters.get(1).getLongLiteral().intValue());
default:
throw new IllegalArgumentException("Expected 0, 1 or 2 parameters for DECIMAL type constructor.");
}
}
}
| apache-2.0 |
contextio/contextio-ruby | lib/contextio/message_collection.rb | 1242 | require_relative 'api/resource_collection'
require_relative 'message'
class ContextIO
class MessageCollection
include ContextIO::API::ResourceCollection
self.resource_class = ContextIO::Message
self.association_name = :messages
belongs_to :account
# You can pass a Folder object and this'll use the source from it, or you
# can pass a folder label and a source label (from the API), if that's
# easier for you.
#
# This is private because AFAICT, the oauth gem doesn't do POST requests
# with Content-Type = 'multipart/form-data' easily. I think it might behoove
# us to replace that dependency in the future, anyway, and we can fix this
# at that point. In any case, this functionality was missing from previous
# releases of the contextio gem, too.
def create(raw_message, folder, source = nil)
if folder.is_a?(ContextIO::Folder)
folder_label = folder.name
source_label = source || folder.source.label
else
folder_label = folder.to_s
source_label = source.to_s
end
api.request(:post, resource_url, message: raw_message, dst_folder: folder_label, dst_source: source_label)['success']
end
private :create
end
end
| apache-2.0 |
nickjvm/grommet | src/js/components/icons/base/DocumentExe.js | 1828 | // (C) Copyright 2014-2015 Hewlett Packard Enterprise Development LP
import React, { Component, PropTypes } from 'react';
import classnames from 'classnames';
import CSSClassnames from '../../../utils/CSSClassnames';
import Intl from '../../../utils/Intl';
import Props from '../../../utils/Props';
const CLASS_ROOT = CSSClassnames.CONTROL_ICON;
const COLOR_INDEX = CSSClassnames.COLOR_INDEX;
export default class Icon extends Component {
render () {
const { className, colorIndex } = this.props;
let { a11yTitle, size, responsive } = this.props;
let { intl } = this.context;
const classes = classnames(
CLASS_ROOT,
`${CLASS_ROOT}-document-exe`,
className,
{
[`${CLASS_ROOT}--${size}`]: size,
[`${CLASS_ROOT}--responsive`]: responsive,
[`${COLOR_INDEX}-${colorIndex}`]: colorIndex
}
);
a11yTitle = a11yTitle || Intl.getMessage(intl, 'document-exe');
const restProps = Props.omit(this.props, Object.keys(Icon.propTypes));
return <svg {...restProps} version="1.1" viewBox="0 0 24 24" width="24px" height="24px" role="img" className={classes} aria-label={a11yTitle}><path fill="none" stroke="#000000" strokeWidth="2" d="M4.99787498,8.99999999 L4.99787498,0.999999992 L19.4999998,0.999999992 L22.9999998,4.50000005 L23,23 L4,23 M18,1 L18,6 L23,6 M14.25,11.5 L8.25,18.5 M8.25,11.5 L14.25,18.5 M20.5,12 L16.5,12 L16.5,18 L20.5,18 M19.5,15 L16.5,15 M7,12 L3,12 L3,18 L7,18 M6,15 L3,15"/></svg>;
}
};
Icon.contextTypes = {
intl: PropTypes.object
};
Icon.defaultProps = {
responsive: true
};
Icon.displayName = 'DocumentExe';
Icon.icon = true;
Icon.propTypes = {
a11yTitle: PropTypes.string,
colorIndex: PropTypes.string,
size: PropTypes.oneOf(['small', 'medium', 'large', 'xlarge', 'huge']),
responsive: PropTypes.bool
};
| apache-2.0 |
adinath/AxonFramework | core/src/main/java/org/axonframework/commandhandling/GenericCommandMessage.java | 3522 | /*
* Copyright (c) 2010-2016. Axon Framework
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.axonframework.commandhandling;
import org.axonframework.messaging.GenericMessage;
import org.axonframework.messaging.Message;
import org.axonframework.messaging.MessageDecorator;
import org.axonframework.messaging.MetaData;
import java.util.Map;
/**
* Implementation of the CommandMessage that takes all properties as constructor parameters.
*
* @param <T> The type of payload contained in this Message
* @author Allard Buijze
* @since 2.0
*/
public class GenericCommandMessage<T> extends MessageDecorator<T> implements CommandMessage<T> {
private final String commandName;
/**
* Returns the given command as a CommandMessage. If {@code command} already implements CommandMessage, it is
* returned as-is. Otherwise, the given {@code command} is wrapped into a GenericCommandMessage as its
* payload.
*
* @param command the command to wrap as CommandMessage
* @return a CommandMessage containing given {@code command} as payload, or {@code command} if it already implements
* CommandMessage.
*/
@SuppressWarnings("unchecked")
public static <C> CommandMessage<C> asCommandMessage(Object command) {
if (CommandMessage.class.isInstance(command)) {
return (CommandMessage<C>) command;
}
return new GenericCommandMessage<>((C) command, MetaData.emptyInstance());
}
/**
* Create a CommandMessage with the given {@code command} as payload and empty metaData
*
* @param payload the payload for the Message
*/
public GenericCommandMessage(T payload) {
this(payload, MetaData.emptyInstance());
}
/**
* Create a CommandMessage with the given {@code command} as payload.
*
* @param payload the payload for the Message
* @param metaData The meta data for this message
*/
public GenericCommandMessage(T payload, Map<String, ?> metaData) {
this(new GenericMessage<>(payload, metaData), payload.getClass().getName());
}
/**
* Create a CommandMessage from the given {@code delegate} message containing payload, metadata and message
* identifier, and the given {@code commandName}.
*
* @param delegate the delegate message
* @param commandName The name of the command
*/
public GenericCommandMessage(Message<T> delegate, String commandName) {
super(delegate);
this.commandName = commandName;
}
@Override
public String getCommandName() {
return commandName;
}
@Override
public GenericCommandMessage<T> withMetaData(Map<String, ?> metaData) {
return new GenericCommandMessage<>(getDelegate().withMetaData(metaData), commandName);
}
@Override
public GenericCommandMessage<T> andMetaData(Map<String, ?> metaData) {
return new GenericCommandMessage<>(getDelegate().andMetaData(metaData), commandName);
}
}
| apache-2.0 |
etirelli/jbpm | jbpm-human-task/jbpm-human-task-core/src/test/java/org/jbpm/services/task/TaskReminderBaseTest.java | 6582 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.services.task;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.mail.internet.MimeMessage;
import org.jbpm.services.task.impl.factories.TaskFactory;
import org.jbpm.services.task.util.CountDownTaskEventListener;
import org.junit.Test;
import org.kie.internal.task.api.model.InternalTask;
import org.subethamail.wiser.Wiser;
import org.subethamail.wiser.WiserMessage;
public abstract class TaskReminderBaseTest extends HumanTaskServicesBaseTest {
protected Wiser wiser;
@Test(timeout=10000)
public void testTaskReminderWithoutNotification() throws Exception {
CountDownTaskEventListener countDownListener = new CountDownTaskEventListener(1, false, true);
addCountDownListner(countDownListener);
Map<String, Object> vars = new HashMap<String, Object>();
vars.put("now", new Date());
Reader reader = new InputStreamReader(getClass().getResourceAsStream(MvelFilePath.ReminderWithoutNotification));
InternalTask task = (InternalTask) TaskFactory.evalTask(reader, vars);
System.out.println("testTaskReminderWithoutNotification " + task.getTaskData().getStatus());
assertNull(task.getDeadlines());
long taskId = taskService.addTask(task, new HashMap<String, Object>());
taskService.executeReminderForTask(taskId, "Luke Cage");
countDownListener.waitTillCompleted();
assertEquals(1, wiser.getMessages().size());
String receiver = wiser.getMessages().get(0).getEnvelopeReceiver();
assertEquals("tony@domain.com", receiver);
MimeMessage msg = ((WiserMessage) wiser.getMessages().get(0)).getMimeMessage();
assertEquals("You have a task ( Simple Test Task ) of process ( taskReminder )",
msg.getSubject());
}
@Test(timeout=10000)
public void testTaskReminderWithNotificationByTaskNostarted() throws Exception {
CountDownTaskEventListener countDownListener = new CountDownTaskEventListener(1, false, true);
addCountDownListner(countDownListener);
Map<String, Object> vars = new HashMap<String, Object>();
vars.put("now", new Date());
Reader reader = new InputStreamReader(getClass().getResourceAsStream(MvelFilePath
.ReminderWithNotificationReserved));
InternalTask task = (InternalTask) TaskFactory.evalTask(reader, vars);
System.out.println("testTaskReminderWithNotificationByTaskNostarted " + task.getTaskData().getStatus());
assertEquals(1, task.getDeadlines().getEndDeadlines().size());
assertEquals(1, task.getDeadlines().getStartDeadlines().size());
long taskId = taskService.addTask(task, new HashMap<String, Object>());
taskService.executeReminderForTask(taskId, "Luke Cage");
countDownListener.waitTillCompleted();
assertEquals(2, wiser.getMessages().size());
final List<String> list = new ArrayList<String>(2);
list.add(wiser.getMessages().get(0).getEnvelopeReceiver());
list.add(wiser.getMessages().get(1).getEnvelopeReceiver());
assertTrue(list.contains("tony@domain.com"));
assertTrue(list.contains("darth@domain.com"));
MimeMessage msg = ((WiserMessage) wiser.getMessages().get(0)).getMimeMessage();
assertEquals("ReminderWithNotificationReserved:you have new task to be started", msg.getSubject());
assertEquals("task is not started", msg.getContent());
msg = ((WiserMessage) wiser.getMessages().get(1)).getMimeMessage();
assertEquals("ReminderWithNotificationReserved:you have new task to be started", msg.getSubject());
assertEquals("task is not started", msg.getContent());
}
@Test(timeout=10000)
public void testTaskReminderWithNotificationByTaskNoCompleted() throws Exception {
CountDownTaskEventListener countDownListener = new CountDownTaskEventListener(1, false, true);
addCountDownListner(countDownListener);
Map<String, Object> vars = new HashMap<String, Object>();
vars.put("now", new Date());
Reader reader = new InputStreamReader(getClass().getResourceAsStream(MvelFilePath
.ReminderWithNotificationInProgress));
InternalTask task = (InternalTask) TaskFactory.evalTask(reader, vars);
System.out.println("testTaskReminderWithNotificationByTaskNoCompleted " + task.getTaskData().getStatus());
assertEquals(1, task.getDeadlines().getEndDeadlines().size());
assertEquals(1, task.getDeadlines().getStartDeadlines().size());
long taskId = taskService.addTask(task, new HashMap<String, Object>());
taskService.executeReminderForTask(taskId, "Luke Cage");
countDownListener.waitTillCompleted();
assertEquals(2, wiser.getMessages().size());
List<String> list = new ArrayList<String>(2);
list.add(wiser.getMessages().get(0).getEnvelopeReceiver());
list.add(wiser.getMessages().get(1).getEnvelopeReceiver());
assertTrue(list.contains("tony@domain.com"));
assertTrue(list.contains("darth@domain.com"));
MimeMessage msg = ((WiserMessage) wiser.getMessages().get(0)).getMimeMessage();
assertEquals("ReminderWithNotificationInProgress:you have new task to be completed",
msg.getSubject());
assertEquals("task is not completed", msg.getContent());
msg = ((WiserMessage) wiser.getMessages().get(1)).getMimeMessage();
assertEquals("ReminderWithNotificationInProgress:you have new task to be completed",
msg.getSubject());
assertEquals("task is not completed", msg.getContent());
}
}
| apache-2.0 |
kjniemi/activemq-artemis | artemis-core-client/src/main/java/org/apache/activemq/artemis/core/message/impl/CoreMessage.java | 39609 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.message.impl;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Objects;
import java.util.Set;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufUtil;
import io.netty.buffer.Unpooled;
import io.netty.buffer.UnpooledByteBufAllocator;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.api.core.ActiveMQBuffers;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.ActiveMQPropertyConversionException;
import org.apache.activemq.artemis.api.core.ICoreMessage;
import org.apache.activemq.artemis.api.core.Message;
import org.apache.activemq.artemis.api.core.RefCountMessage;
import org.apache.activemq.artemis.api.core.RoutingType;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.buffers.impl.ChannelBufferWrapper;
import org.apache.activemq.artemis.core.buffers.impl.ResetLimitWrappedActiveMQBuffer;
import org.apache.activemq.artemis.core.message.LargeBodyReader;
import org.apache.activemq.artemis.core.persistence.CoreMessageObjectPools;
import org.apache.activemq.artemis.core.persistence.Persister;
import org.apache.activemq.artemis.core.protocol.core.impl.PacketImpl;
import org.apache.activemq.artemis.reader.MessageUtil;
import org.apache.activemq.artemis.utils.DataConstants;
import org.apache.activemq.artemis.utils.UUID;
import org.apache.activemq.artemis.utils.collections.TypedProperties;
import org.jboss.logging.Logger;
/** Note: you shouldn't change properties using multi-threads. Change your properties before you can send it to multiple
* consumers */
public class CoreMessage extends RefCountMessage implements ICoreMessage {
public static final int BUFFER_HEADER_SPACE = PacketImpl.PACKET_HEADERS_SIZE;
private volatile int memoryEstimate = -1;
private static final Logger logger = Logger.getLogger(CoreMessage.class);
// There's an integer with the number of bytes for the body
public static final int BODY_OFFSET = DataConstants.SIZE_INT;
/** That is the readInto for the whole message, including properties..
it does not include the buffer for the Packet send and receive header on core protocol */
protected ByteBuf buffer;
private volatile boolean validBuffer = false;
protected volatile ResetLimitWrappedActiveMQBuffer writableBuffer;
Object body;
protected int endOfBodyPosition = -1;
protected int messageIDPosition = -1;
protected long messageID;
protected SimpleString address;
protected byte type;
protected boolean durable;
/**
* GMT milliseconds at which this message expires. 0 means never expires *
*/
private long expiration;
protected long timestamp;
protected byte priority;
private UUID userID;
private int propertiesLocation = -1;
protected volatile TypedProperties properties;
private final CoreMessageObjectPools coreMessageObjectPools;
private volatile Object owner;
public CoreMessage(final CoreMessageObjectPools coreMessageObjectPools) {
this.coreMessageObjectPools = coreMessageObjectPools;
}
public CoreMessage() {
this.coreMessageObjectPools = null;
}
/** On core there's no delivery annotation */
@Override
public Object getAnnotation(SimpleString key) {
return getObjectProperty(key);
}
/** On core there's no delivery annotation */
@Override
public Object removeAnnotation(SimpleString key) {
return removeProperty(key);
}
@Override
public void clearInternalProperties() {
final TypedProperties properties = this.properties;
if (properties != null && properties.clearInternalProperties()) {
messageChanged();
}
}
@Override
public Persister<Message> getPersister() {
return CoreMessagePersister.getInstance();
}
public CoreMessage initBuffer(final int initialMessageBufferSize) {
buffer = Unpooled.buffer(initialMessageBufferSize);
// There's a bug in netty which means a dynamic buffer won't resize until you write a byte
buffer.writeByte((byte) 0);
buffer.setIndex(BODY_OFFSET, BODY_OFFSET);
return this;
}
@Override
public SimpleString getReplyTo() {
return getSimpleStringProperty(MessageUtil.REPLYTO_HEADER_NAME);
}
@Override
public RoutingType getRoutingType() {
if (containsProperty(Message.HDR_ROUTING_TYPE)) {
return RoutingType.getType(getByteProperty(Message.HDR_ROUTING_TYPE));
}
return null;
}
@Override
public Message setRoutingType(RoutingType routingType) {
if (routingType == null) {
removeProperty(Message.HDR_ROUTING_TYPE);
} else {
putByteProperty(Message.HDR_ROUTING_TYPE, routingType.getType());
}
return this;
}
@Override
public CoreMessage setReplyTo(SimpleString address) {
if (address == null) {
getProperties().removeProperty(MessageUtil.REPLYTO_HEADER_NAME);
} else {
putStringProperty(MessageUtil.REPLYTO_HEADER_NAME, address);
}
return this;
}
@Override
public void receiveBuffer(ByteBuf buffer) {
this.buffer = buffer;
this.buffer.retain();
decode(false);
}
/** This will fix the incoming body of 1.x messages */
@Override
public void receiveBuffer_1X(ByteBuf buffer) {
this.buffer = buffer;
this.buffer.retain();
decode(true);
validBuffer = false;
}
@Override
public ActiveMQBuffer getReadOnlyBodyBuffer() {
checkEncode();
return new ChannelBufferWrapper(buffer.slice(BODY_OFFSET, endOfBodyPosition - BUFFER_HEADER_SPACE).setIndex(0, endOfBodyPosition - BUFFER_HEADER_SPACE).asReadOnly());
}
@Override
public int getBodyBufferSize() {
checkEncode();
return endOfBodyPosition - BUFFER_HEADER_SPACE;
}
/**
* This will return the proper buffer to represent the data of the Message. If compressed it will decompress.
* If large, it will read from the file or streaming.
* @return
*/
@Override
public ActiveMQBuffer getDataBuffer() {
ActiveMQBuffer buffer;
try {
if (isLargeMessage()) {
buffer = getLargeMessageBuffer();
} else {
buffer = getReadOnlyBodyBuffer();
}
if (Boolean.TRUE.equals(getBooleanProperty(Message.HDR_LARGE_COMPRESSED))) {
buffer = inflate(buffer);
}
} catch (Exception e) {
logger.warn(e.getMessage(), e);
return getReadOnlyBodyBuffer();
}
return buffer;
}
private ActiveMQBuffer getLargeMessageBuffer() throws ActiveMQException {
LargeBodyReader encoder = getLargeBodyReader();
encoder.open();
int bodySize = (int) encoder.getSize();
final ActiveMQBuffer buffer = new ChannelBufferWrapper(UnpooledByteBufAllocator.DEFAULT.heapBuffer(bodySize));
buffer.byteBuf().ensureWritable(bodySize);
final ByteBuffer nioBuffer = buffer.byteBuf().internalNioBuffer(0, bodySize);
encoder.readInto(nioBuffer);
buffer.writerIndex(bodySize);
encoder.close();
return buffer;
}
private ActiveMQBuffer inflate(ActiveMQBuffer buffer) throws DataFormatException {
final int bytesToRead = buffer.readableBytes();
Inflater inflater = new Inflater();
final byte[] input = new byte[bytesToRead];
buffer.readBytes(input);
inflater.setInput(input);
//get the real size of large message
long sizeBody = getLongProperty(Message.HDR_LARGE_BODY_SIZE);
byte[] data = new byte[(int) sizeBody];
inflater.inflate(data);
inflater.end();
ActiveMQBuffer qbuff = ActiveMQBuffers.wrappedBuffer(data);
qbuff.resetReaderIndex();
qbuff.resetWriterIndex();
qbuff.writeBytes(data);
buffer = qbuff;
return buffer;
}
@Override
public SimpleString getGroupID() {
return this.getSimpleStringProperty(Message.HDR_GROUP_ID);
}
@Override
public CoreMessage setGroupID(SimpleString groupId) {
return this.putStringProperty(Message.HDR_GROUP_ID, groupId);
}
@Override
public CoreMessage setGroupID(String groupId) {
return this.setGroupID(SimpleString.toSimpleString(groupId, coreMessageObjectPools == null ? null : coreMessageObjectPools.getGroupIdStringSimpleStringPool()));
}
@Override
public int getGroupSequence() {
return containsProperty(Message.HDR_GROUP_SEQUENCE) ? getIntProperty(Message.HDR_GROUP_SEQUENCE) : 0;
}
@Override
public CoreMessage setGroupSequence(int sequence) {
return this.putIntProperty(Message.HDR_GROUP_SEQUENCE, sequence);
}
@Override
public Object getCorrelationID() {
return getObjectProperty(MessageUtil.CORRELATIONID_HEADER_NAME);
}
@Override
public Message setCorrelationID(final Object correlationID) {
putObjectProperty(MessageUtil.CORRELATIONID_HEADER_NAME, correlationID);
return this;
}
/**
* @param sendBuffer
* @param deliveryCount Some protocols (AMQP) will have this as part of the message. ignored on core
*/
@Override
public synchronized void sendBuffer(ByteBuf sendBuffer, int deliveryCount) {
checkEncode();
sendBuffer.writeBytes(buffer, 0, buffer.writerIndex());
}
/**
* Recast the message as an 1.4 message
*/
@Override
public synchronized void sendBuffer_1X(ByteBuf sendBuffer) {
checkEncode();
ByteBuf tmpBuffer = buffer.duplicate();
sendBuffer.writeInt(endOfBodyPosition + DataConstants.SIZE_INT);
tmpBuffer.readerIndex(DataConstants.SIZE_INT);
tmpBuffer.readBytes(sendBuffer, endOfBodyPosition - BUFFER_HEADER_SPACE);
sendBuffer.writeInt(tmpBuffer.writerIndex() + DataConstants.SIZE_INT + BUFFER_HEADER_SPACE);
tmpBuffer.readBytes(sendBuffer, tmpBuffer.readableBytes());
sendBuffer.readerIndex(0);
}
protected synchronized void checkEncode() {
if (!validBuffer) {
encode();
}
internalWritableBuffer();
}
@Override
public Long getScheduledDeliveryTime() {
Object property = getProperties().getProperty(Message.HDR_SCHEDULED_DELIVERY_TIME);
if (property != null && property instanceof Number) {
return ((Number) property).longValue();
}
return 0L;
}
@Override
public CoreMessage setScheduledDeliveryTime(Long time) {
if (time == null || time == 0) {
getProperties().removeProperty(Message.HDR_SCHEDULED_DELIVERY_TIME);
} else {
putLongProperty(Message.HDR_SCHEDULED_DELIVERY_TIME, time);
}
return this;
}
@Override
public InputStream getBodyInputStream() {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public ActiveMQBuffer getBodyBuffer() {
// if using the writable buffer, we must parse properties
getProperties();
internalWritableBuffer();
return writableBuffer;
}
private void internalWritableBuffer() {
if (writableBuffer == null) {
synchronized (this) {
if (writableBuffer == null) {
ResetLimitWrappedActiveMQBuffer writableBuffer = new ResetLimitWrappedActiveMQBuffer(BODY_OFFSET, buffer.duplicate(), this);
if (endOfBodyPosition > 0) {
writableBuffer.byteBuf().setIndex(BODY_OFFSET, endOfBodyPosition - BUFFER_HEADER_SPACE + BODY_OFFSET);
writableBuffer.resetReaderIndex();
}
this.writableBuffer = writableBuffer;
}
}
}
}
@Override
public int getEndOfBodyPosition() {
if (endOfBodyPosition < 0) {
endOfBodyPosition = getBodyBuffer().writerIndex();
}
return endOfBodyPosition;
}
@Override
public synchronized void messageChanged() {
//a volatile store is a costly operation: better to check if is necessary
if (validBuffer) {
validBuffer = false;
}
}
protected CoreMessage(CoreMessage other) {
this(other, other.properties);
}
public CoreMessage(long id, int bufferSize) {
this(id, bufferSize, null);
}
public CoreMessage(long id, int bufferSize, CoreMessageObjectPools coreMessageObjectPools) {
this.initBuffer(bufferSize);
this.setMessageID(id);
this.coreMessageObjectPools = coreMessageObjectPools;
}
protected CoreMessage(CoreMessage other, TypedProperties copyProperties) {
// This MUST be synchronized using the monitor on the other message to prevent it running concurrently
// with getEncodedBuffer(), otherwise can introduce race condition when delivering concurrently to
// many subscriptions and bridging to other nodes in a cluster
synchronized (other) {
this.body = other.body;
this.endOfBodyPosition = other.endOfBodyPosition;
internalSetMessageID(other.messageID);
this.address = other.address;
this.type = other.type;
this.durable = other.durable;
this.expiration = other.expiration;
this.timestamp = other.timestamp;
this.priority = other.priority;
this.userID = other.userID;
this.coreMessageObjectPools = other.coreMessageObjectPools;
if (copyProperties != null) {
this.properties = new TypedProperties(copyProperties);
}
if (other.buffer != null) {
this.buffer = other.buffer.copy();
}
}
}
/** This method serves as a purpose of extension.
* Large Message on a Core Message will have to set the messageID on the attached NewLargeMessage */
protected void internalSetMessageID(final long messageID) {
this.messageID = messageID;
}
@Override
public void moveHeadersAndProperties(final Message msg) {
internalSetMessageID(msg.getMessageID());
address = msg.getAddressSimpleString();
userID = (UUID) msg.getUserID();
type = msg.toCore().getType();
durable = msg.isDurable();
expiration = msg.getExpiration();
timestamp = msg.getTimestamp();
priority = msg.getPriority();
if (msg instanceof CoreMessage) {
properties = new TypedProperties(((CoreMessage) msg).getProperties());
}
}
@Override
public Message copy() {
getProperties();
checkEncode();
return new CoreMessage(this);
}
@Override
public Message copy(long newID) {
return copy().setMessageID(newID);
}
@Override
public long getExpiration() {
return expiration;
}
@Override
public long getTimestamp() {
return timestamp;
}
@Override
public CoreMessage setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public long getMessageID() {
return messageID;
}
@Override
public byte getPriority() {
return priority;
}
@Override
public UUID getUserID() {
return userID;
}
@Override
public CoreMessage setUserID(Object uuid) {
this.userID = (UUID) uuid;
return this;
}
@Override
public String getValidatedUserID() {
return getStringProperty(Message.HDR_VALIDATED_USER);
}
@Override
public CoreMessage setValidatedUserID(String validatedUserID) {
putStringProperty(Message.HDR_VALIDATED_USER, value(validatedUserID));
return this;
}
@Override
public CoreMessage setMessageID(long messageID) {
internalSetMessageID(messageID);
if (messageIDPosition >= 0 && validBuffer) {
buffer.setLong(messageIDPosition, messageID);
}
return this;
}
@Override
public CoreMessage setAddress(SimpleString address) {
if (address == null && this.address == null) {
// no-op so just return
return this;
}
if (validBuffer && (address == null || !address.equals(this.address))) {
messageChanged();
}
this.address = address;
return this;
}
@Override
public SimpleString getAddressSimpleString() {
return address;
}
@Override
public CoreMessage setExpiration(long expiration) {
this.expiration = expiration;
messageChanged();
return this;
}
@Override
public CoreMessage setPriority(byte priority) {
this.priority = priority;
messageChanged();
return this;
}
public CoreMessage setUserID(UUID userID) {
this.userID = userID;
messageChanged();
return this;
}
/**
* I am keeping this synchronized as the decode of the Properties is lazy
*/
public final TypedProperties getProperties() {
TypedProperties properties = this.properties;
if (properties == null) {
properties = getOrInitializeTypedProperties();
}
return properties;
}
private synchronized TypedProperties getOrInitializeTypedProperties() {
try {
TypedProperties properties = this.properties;
if (properties == null) {
properties = new TypedProperties(INTERNAL_PROPERTY_NAMES_PREDICATE);
if (buffer != null && propertiesLocation >= 0) {
final ByteBuf byteBuf = buffer.duplicate().readerIndex(propertiesLocation);
properties.decode(byteBuf, coreMessageObjectPools == null ? null : coreMessageObjectPools.getPropertiesDecoderPools());
}
this.properties = properties;
}
return properties;
} catch (Throwable e) {
throw onCheckPropertiesError(e);
}
}
private RuntimeException onCheckPropertiesError(Throwable e) {
// This is not an expected error, hence no specific logger created
logger.warn("Could not decode properties for CoreMessage[messageID=" + messageID + ",durable=" + durable + ",userID=" + userID + ",priority=" + priority +
", timestamp=" + timestamp + ",expiration=" + expiration + ",address=" + address + ", propertiesLocation=" + propertiesLocation, e);
final ByteBuf buffer = this.buffer;
if (buffer != null) {
//risky: a racy modification to buffer indexes could break this duplicate operation
final ByteBuf duplicatebuffer = buffer.duplicate();
duplicatebuffer.readerIndex(0);
logger.warn("Failed message has messageID=" + messageID + " and the following buffer:\n" + ByteBufUtil.prettyHexDump(duplicatebuffer));
} else {
logger.warn("Failed message has messageID=" + messageID + " and the buffer was null");
}
return new RuntimeException(e.getMessage(), e);
}
@Override
public int getMemoryEstimate() {
if (memoryEstimate == -1) {
memoryEstimate = memoryOffset +
(buffer != null ? buffer.capacity() : 0) +
(properties != null ? properties.getMemoryOffset() : 0);
}
return memoryEstimate;
}
@Override
public boolean isServerMessage() {
// even though CoreMessage is used both on server and client
// callers are interested in knowing if this is a server large message
// as it will be used to send the body from the files.
//
// this may need further refactoring when we improve large messages
// and expose that functionality to other protocols.
return false;
}
@Override
public byte getType() {
return type;
}
@Override
public CoreMessage setType(byte type) {
this.type = type;
return this;
}
private void decode(boolean beforeAddress) {
decode(beforeAddress, coreMessageObjectPools);
}
private void decode(boolean beforeAddress, CoreMessageObjectPools pools) {
endOfBodyPosition = buffer.readInt();
buffer.skipBytes(endOfBodyPosition - BUFFER_HEADER_SPACE);
decodeHeadersAndProperties(buffer, true, pools);
buffer.readerIndex(0);
validBuffer = true;
if (beforeAddress) {
endOfBodyPosition = endOfBodyPosition - DataConstants.SIZE_INT;
}
internalWritableBuffer();
}
public void decodeHeadersAndProperties(final ByteBuf buffer) {
decodeHeadersAndProperties(buffer, false, coreMessageObjectPools);
}
private void decodeHeadersAndProperties(final ByteBuf buffer, boolean lazyProperties, CoreMessageObjectPools pools) {
messageIDPosition = buffer.readerIndex();
internalSetMessageID(buffer.readLong());
address = SimpleString.readNullableSimpleString(buffer, pools == null ? null : pools.getAddressDecoderPool());
if (buffer.readByte() == DataConstants.NOT_NULL) {
byte[] bytes = new byte[16];
buffer.readBytes(bytes);
userID = new UUID(UUID.TYPE_TIME_BASED, bytes);
} else {
userID = null;
}
type = buffer.readByte();
durable = buffer.readBoolean();
expiration = buffer.readLong();
timestamp = buffer.readLong();
priority = buffer.readByte();
if (lazyProperties) {
properties = null;
propertiesLocation = buffer.readerIndex();
} else {
properties = new TypedProperties(INTERNAL_PROPERTY_NAMES_PREDICATE);
properties.decode(buffer, pools == null ? null : pools.getPropertiesDecoderPools());
}
}
public synchronized CoreMessage encode() {
getProperties();
if (writableBuffer != null) {
// The message encode takes into consideration the PacketImpl which is not part of this encoding
// so we always need to take the BUFFER_HEADER_SPACE from packet impl into consideration
endOfBodyPosition = writableBuffer.writerIndex() + BUFFER_HEADER_SPACE - 4;
} else if (endOfBodyPosition <= 0) {
endOfBodyPosition = BUFFER_HEADER_SPACE + DataConstants.SIZE_INT;
}
buffer.setIndex(0, 0);
buffer.writeInt(endOfBodyPosition);
// The end of body position
buffer.writerIndex(endOfBodyPosition - BUFFER_HEADER_SPACE + DataConstants.SIZE_INT);
encodeHeadersAndProperties(buffer);
validBuffer = true;
return this;
}
public void encodeHeadersAndProperties(final ByteBuf buffer) {
final TypedProperties properties = getProperties();
messageIDPosition = buffer.writerIndex();
buffer.writeLong(messageID);
SimpleString.writeNullableSimpleString(buffer, address);
if (userID == null) {
buffer.writeByte(DataConstants.NULL);
} else {
buffer.writeByte(DataConstants.NOT_NULL);
buffer.writeBytes(userID.asBytes());
}
buffer.writeByte(type);
buffer.writeBoolean(durable);
buffer.writeLong(expiration);
buffer.writeLong(timestamp);
buffer.writeByte(priority);
properties.encode(buffer);
}
@Override
public int getHeadersAndPropertiesEncodeSize() {
return DataConstants.SIZE_LONG + // Message ID
DataConstants.SIZE_BYTE + // user id null?
(userID == null ? 0 : 16) +
/* address */SimpleString.sizeofNullableString(address) +
DataConstants./* Type */SIZE_BYTE +
DataConstants./* Durable */SIZE_BOOLEAN +
DataConstants./* Expiration */SIZE_LONG +
DataConstants./* Timestamp */SIZE_LONG +
DataConstants./* Priority */SIZE_BYTE +
/* PropertySize and Properties */getProperties().getEncodeSize();
}
@Override
public Object getDuplicateProperty() {
return getObjectProperty(Message.HDR_DUPLICATE_DETECTION_ID);
}
@Override
public SimpleString getLastValueProperty() {
return getSimpleStringProperty(Message.HDR_LAST_VALUE_NAME);
}
@Override
public Message setLastValueProperty(SimpleString lastValueName) {
return putStringProperty(Message.HDR_LAST_VALUE_NAME, lastValueName);
}
@Override
public int getEncodeSize() {
if (buffer == null) {
return -1;
}
checkEncode();
return buffer.writerIndex();
}
@Override
public boolean isLargeMessage() {
return false;
}
@Override
public String getAddress() {
if (address == null) {
return null;
} else {
return address.toString();
}
}
@Override
public CoreMessage setAddress(String address) {
messageChanged();
this.address = SimpleString.toSimpleString(address, coreMessageObjectPools == null ? null : coreMessageObjectPools.getAddressStringSimpleStringPool());
return this;
}
@Override
public CoreMessage setBuffer(ByteBuf buffer) {
this.buffer = buffer;
return this;
}
@Override
public ByteBuf getBuffer() {
return buffer;
}
@Override
public boolean isDurable() {
return durable;
}
@Override
public CoreMessage setDurable(boolean durable) {
messageChanged();
this.durable = durable;
return this;
}
@Override
public CoreMessage putBooleanProperty(final String key, final boolean value) {
return putBooleanProperty(key(key), value);
}
@Override
public CoreMessage putBooleanProperty(final SimpleString key, final boolean value) {
messageChanged();
getProperties().putBooleanProperty(key, value);
return this;
}
@Override
public Boolean getBooleanProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getBooleanProperty(key);
}
@Override
public Boolean getBooleanProperty(final String key) throws ActiveMQPropertyConversionException {
return getBooleanProperty(key(key));
}
@Override
public CoreMessage putByteProperty(final SimpleString key, final byte value) {
messageChanged();
getProperties().putByteProperty(key, value);
return this;
}
@Override
public CoreMessage putByteProperty(final String key, final byte value) {
return putByteProperty(key(key), value);
}
@Override
public Byte getByteProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getByteProperty(key);
}
@Override
public Byte getByteProperty(final String key) throws ActiveMQPropertyConversionException {
return getByteProperty(key(key));
}
@Override
public CoreMessage putBytesProperty(final SimpleString key, final byte[] value) {
messageChanged();
getProperties().putBytesProperty(key, value);
return this;
}
@Override
public CoreMessage putBytesProperty(final String key, final byte[] value) {
return putBytesProperty(key(key), value);
}
@Override
public byte[] getBytesProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getBytesProperty(key);
}
@Override
public byte[] getBytesProperty(final String key) throws ActiveMQPropertyConversionException {
return getBytesProperty(key(key));
}
@Override
public CoreMessage putCharProperty(SimpleString key, char value) {
messageChanged();
getProperties().putCharProperty(key, value);
return this;
}
@Override
public CoreMessage putCharProperty(String key, char value) {
return putCharProperty(key(key), value);
}
@Override
public CoreMessage putShortProperty(final SimpleString key, final short value) {
messageChanged();
getProperties().putShortProperty(key, value);
return this;
}
@Override
public CoreMessage putShortProperty(final String key, final short value) {
return putShortProperty(key(key), value);
}
@Override
public CoreMessage putIntProperty(final SimpleString key, final int value) {
messageChanged();
getProperties().putIntProperty(key, value);
return this;
}
@Override
public CoreMessage putIntProperty(final String key, final int value) {
return putIntProperty(key(key), value);
}
@Override
public Integer getIntProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getIntProperty(key);
}
@Override
public Integer getIntProperty(final String key) throws ActiveMQPropertyConversionException {
return getIntProperty(key(key));
}
@Override
public CoreMessage putLongProperty(final SimpleString key, final long value) {
messageChanged();
getProperties().putLongProperty(key, value);
return this;
}
@Override
public CoreMessage putLongProperty(final String key, final long value) {
return putLongProperty(key(key), value);
}
@Override
public Long getLongProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getLongProperty(key);
}
@Override
public Long getLongProperty(final String key) throws ActiveMQPropertyConversionException {
return getLongProperty(key(key));
}
@Override
public CoreMessage putFloatProperty(final SimpleString key, final float value) {
messageChanged();
getProperties().putFloatProperty(key, value);
return this;
}
@Override
public CoreMessage putFloatProperty(final String key, final float value) {
return putFloatProperty(key(key), value);
}
@Override
public CoreMessage putDoubleProperty(final SimpleString key, final double value) {
messageChanged();
getProperties().putDoubleProperty(key, value);
return this;
}
@Override
public CoreMessage putDoubleProperty(final String key, final double value) {
return putDoubleProperty(key(key), value);
}
@Override
public Double getDoubleProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getDoubleProperty(key);
}
@Override
public Double getDoubleProperty(final String key) throws ActiveMQPropertyConversionException {
return getDoubleProperty(key(key));
}
@Override
public CoreMessage putStringProperty(final SimpleString key, final SimpleString value) {
messageChanged();
getProperties().putSimpleStringProperty(key, value);
return this;
}
@Override
public CoreMessage putStringProperty(final SimpleString key, final String value) {
return putStringProperty(key, value(value));
}
@Override
public CoreMessage putStringProperty(final String key, final String value) {
return putStringProperty(key(key), value(value));
}
@Override
public CoreMessage putObjectProperty(final SimpleString key,
final Object value) throws ActiveMQPropertyConversionException {
messageChanged();
TypedProperties.setObjectProperty(key, value, getProperties());
return this;
}
@Override
public Object getObjectProperty(final String key) {
return getObjectProperty(key(key));
}
@Override
public Object getObjectProperty(final SimpleString key) {
return getProperties().getProperty(key);
}
@Override
public CoreMessage putObjectProperty(final String key, final Object value) throws ActiveMQPropertyConversionException {
return putObjectProperty(key(key), value);
}
@Override
public Short getShortProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getShortProperty(key);
}
@Override
public Short getShortProperty(final String key) throws ActiveMQPropertyConversionException {
return getShortProperty(key(key));
}
@Override
public Float getFloatProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getFloatProperty(key);
}
@Override
public Float getFloatProperty(final String key) throws ActiveMQPropertyConversionException {
return getFloatProperty(key(key));
}
@Override
public String getStringProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
SimpleString str = getSimpleStringProperty(key);
if (str == null) {
return null;
} else {
return str.toString();
}
}
@Override
public String getStringProperty(final String key) throws ActiveMQPropertyConversionException {
return getStringProperty(key(key));
}
@Override
public SimpleString getSimpleStringProperty(final SimpleString key) throws ActiveMQPropertyConversionException {
return getProperties().getSimpleStringProperty(key);
}
@Override
public SimpleString getSimpleStringProperty(final String key) throws ActiveMQPropertyConversionException {
return getSimpleStringProperty(key(key));
}
@Override
public Object removeProperty(final SimpleString key) {
Object oldValue = getProperties().removeProperty(key);
if (oldValue != null) {
messageChanged();
}
return oldValue;
}
@Override
public Object removeProperty(final String key) {
return removeProperty(key(key));
}
@Override
public boolean hasScheduledDeliveryTime() {
return searchProperty(Message.HDR_SCHEDULED_DELIVERY_TIME);
}
/**
* Differently from {@link #containsProperty(SimpleString)}, this method can save decoding the message,
* performing a search of the {@code key} property and falling back to {@link #containsProperty(SimpleString)}
* if not possible or if already decoded.
*/
public boolean searchProperty(SimpleString key) {
Objects.requireNonNull(key, "key cannot be null");
TypedProperties properties = this.properties;
if (properties != null) {
return properties.containsProperty(key);
}
synchronized (this) {
final ByteBuf buffer = this.buffer;
// acquiring the lock here, although heavy-weight, is the safer way to do this,
// because we cannot trust that a racing thread won't modify buffer
if (buffer == null) {
throw new NullPointerException("buffer cannot be null");
}
final int propertiesLocation = this.propertiesLocation;
if (propertiesLocation < 0) {
throw new IllegalStateException("propertiesLocation = " + propertiesLocation);
}
return TypedProperties.searchProperty(key, buffer, propertiesLocation);
}
}
@Override
public boolean containsProperty(final SimpleString key) {
return getProperties().containsProperty(key);
}
@Override
public boolean containsProperty(final String key) {
return containsProperty(key(key));
}
@Override
public Set<SimpleString> getPropertyNames() {
return getProperties().getPropertyNames();
}
@Override
public LargeBodyReader getLargeBodyReader() throws ActiveMQException {
return new CoreLargeBodyReaderImpl();
}
private final class CoreLargeBodyReaderImpl implements LargeBodyReader {
private int lastPos = 0;
private CoreLargeBodyReaderImpl() {
}
@Override
public void open() {
}
@Override
public void position(long position) throws ActiveMQException {
lastPos = (int)position;
}
@Override
public long position() {
return lastPos;
}
@Override
public void close() {
}
@Override
public long getSize() {
return buffer.writerIndex();
}
@Override
public int readInto(final ByteBuffer bufferRead) {
final int remaining = bufferRead.remaining();
buffer.getBytes(lastPos, bufferRead);
lastPos += remaining;
return remaining;
}
}
@Override
public int getPersistSize() {
checkEncode();
return buffer.writerIndex() + DataConstants.SIZE_INT;
}
@Override
public void persist(ActiveMQBuffer targetRecord) {
checkEncode();
targetRecord.writeInt(buffer.writerIndex());
targetRecord.writeBytes(buffer, 0, buffer.writerIndex());
}
@Override
public void reloadPersistence(ActiveMQBuffer record, CoreMessageObjectPools pools) {
int size = record.readInt();
initBuffer(size);
buffer.setIndex(0, 0).writeBytes(record.byteBuf(), size);
decode(false, pools);
}
@Override
public CoreMessage toCore() {
return this;
}
@Override
public CoreMessage toCore(CoreMessageObjectPools coreMessageObjectPools) {
return this;
}
@Override
public String toString() {
try {
final TypedProperties properties = getProperties();
return "CoreMessage[messageID=" + messageID + ",durable=" + isDurable() + ",userID=" + getUserID() + ",priority=" + this.getPriority() +
", timestamp=" + toDate(getTimestamp()) + ",expiration=" + toDate(getExpiration()) +
", durable=" + durable + ", address=" + getAddress() + ",size=" + getPersistentSize() + ",properties=" + properties + "]@" + System.identityHashCode(this);
} catch (Throwable e) {
logger.warn("Error creating String for message: ", e);
return "ServerMessage[messageID=" + messageID + "]";
}
}
private static String toDate(long timestamp) {
if (timestamp == 0) {
return "0";
} else {
return new java.util.Date(timestamp).toString();
}
}
private SimpleString key(String key) {
return SimpleString.toSimpleString(key, getPropertyKeysPool());
}
private SimpleString value(String value) {
return SimpleString.toSimpleString(value, getPropertyValuesPool());
}
private SimpleString.StringSimpleStringPool getPropertyKeysPool() {
return coreMessageObjectPools == null ? null : coreMessageObjectPools.getPropertiesStringSimpleStringPools().getPropertyKeysPool();
}
private SimpleString.StringSimpleStringPool getPropertyValuesPool() {
return coreMessageObjectPools == null ? null : coreMessageObjectPools.getPropertiesStringSimpleStringPools().getPropertyValuesPool();
}
@Override
public long getPersistentSize() throws ActiveMQException {
return getEncodeSize();
}
@Override
public Object getOwner() {
return owner;
}
@Override
public void setOwner(Object object) {
this.owner = object;
}
@Override
public String getStringBody() {
String body = null;
if (type == TEXT_TYPE) {
try {
SimpleString simpleBody = getDataBuffer().readNullableSimpleString();
if (simpleBody != null) {
body = simpleBody.toString();
}
} catch (Exception e) {
e.printStackTrace();
}
}
return body;
}
}
| apache-2.0 |
jcshen007/cloudstack | test/integration/smoke/test_affinity_groups.py | 5577 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.codes import FAILED
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
class TestDeployVmWithAffinityGroup(cloudstackTestCase):
"""
This test deploys a virtual machine into a user account
using the small service offering and builtin template
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployVmWithAffinityGroup, cls).getClsTestClient()
zone_name = cls.testClient.getZoneForTests()
cls.apiclient = cls.testClient.getApiClient()
cls.domain = get_domain(cls.apiclient)
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.ag = AffinityGroup.create(cls.apiclient, cls.services["virtual_machine"]["affinity"],
account=cls.account.name, domainid=cls.domain.id)
cls._cleanup = [
cls.service_offering,
cls.ag,
cls.account,
]
return
@attr(tags=["basic", "advanced", "multihost"], required_hardware="false")
def test_DeployVmAntiAffinityGroup(self):
"""
test DeployVM in anti-affinity groups
deploy VM1 and VM2 in the same host-anti-affinity groups
Verify that the vms are deployed on separate hosts
"""
#deploy VM1 in affinity group created in setUp
vm1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
affinitygroupnames=[self.ag.name]
)
list_vm1 = list_virtual_machines(
self.apiclient,
id=vm1.id
)
self.assertEqual(
isinstance(list_vm1, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm1),
0,
"Check VM available in List Virtual Machines"
)
vm1_response = list_vm1[0]
self.assertEqual(
vm1_response.state,
'Running',
msg="VM is not in Running state"
)
host_of_vm1 = vm1_response.hostid
#deploy VM2 in affinity group created in setUp
vm2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
affinitygroupnames=[self.ag.name]
)
list_vm2 = list_virtual_machines(
self.apiclient,
id=vm2.id
)
self.assertEqual(
isinstance(list_vm2, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_vm2),
0,
"Check VM available in List Virtual Machines"
)
vm2_response = list_vm2[0]
self.assertEqual(
vm2_response.state,
'Running',
msg="VM is not in Running state"
)
host_of_vm2 = vm2_response.hostid
self.assertNotEqual(host_of_vm1, host_of_vm2,
msg="Both VMs of affinity group %s are on the same host" % self.ag.name)
@classmethod
def tearDownClass(cls):
try:
#Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
| apache-2.0 |
jawilson/home-assistant | homeassistant/components/brother/const.py | 235 | """Constants for Brother integration."""
from __future__ import annotations
from typing import Final
DATA_CONFIG_ENTRY: Final = "config_entry"
DOMAIN: Final = "brother"
PRINTER_TYPES: Final = ["laser", "ink"]
SNMP: Final = "snmp"
| apache-2.0 |
svagionitis/aws-sdk-cpp | aws-cpp-sdk-elasticache/source/model/Parameter.cpp | 7215 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/elasticache/model/Parameter.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <utility>
using namespace Aws::Utils::Xml;
using namespace Aws::Utils;
namespace Aws
{
namespace ElastiCache
{
namespace Model
{
Parameter::Parameter() :
m_parameterNameHasBeenSet(false),
m_parameterValueHasBeenSet(false),
m_descriptionHasBeenSet(false),
m_sourceHasBeenSet(false),
m_dataTypeHasBeenSet(false),
m_allowedValuesHasBeenSet(false),
m_isModifiable(false),
m_isModifiableHasBeenSet(false),
m_minimumEngineVersionHasBeenSet(false),
m_changeType(ChangeType::NOT_SET),
m_changeTypeHasBeenSet(false)
{
}
Parameter::Parameter(const XmlNode& xmlNode) :
m_parameterNameHasBeenSet(false),
m_parameterValueHasBeenSet(false),
m_descriptionHasBeenSet(false),
m_sourceHasBeenSet(false),
m_dataTypeHasBeenSet(false),
m_allowedValuesHasBeenSet(false),
m_isModifiable(false),
m_isModifiableHasBeenSet(false),
m_minimumEngineVersionHasBeenSet(false),
m_changeType(ChangeType::NOT_SET),
m_changeTypeHasBeenSet(false)
{
*this = xmlNode;
}
Parameter& Parameter::operator =(const XmlNode& xmlNode)
{
XmlNode resultNode = xmlNode;
if(!resultNode.IsNull())
{
XmlNode parameterNameNode = resultNode.FirstChild("ParameterName");
if(!parameterNameNode.IsNull())
{
m_parameterName = StringUtils::Trim(parameterNameNode.GetText().c_str());
m_parameterNameHasBeenSet = true;
}
XmlNode parameterValueNode = resultNode.FirstChild("ParameterValue");
if(!parameterValueNode.IsNull())
{
m_parameterValue = StringUtils::Trim(parameterValueNode.GetText().c_str());
m_parameterValueHasBeenSet = true;
}
XmlNode descriptionNode = resultNode.FirstChild("Description");
if(!descriptionNode.IsNull())
{
m_description = StringUtils::Trim(descriptionNode.GetText().c_str());
m_descriptionHasBeenSet = true;
}
XmlNode sourceNode = resultNode.FirstChild("Source");
if(!sourceNode.IsNull())
{
m_source = StringUtils::Trim(sourceNode.GetText().c_str());
m_sourceHasBeenSet = true;
}
XmlNode dataTypeNode = resultNode.FirstChild("DataType");
if(!dataTypeNode.IsNull())
{
m_dataType = StringUtils::Trim(dataTypeNode.GetText().c_str());
m_dataTypeHasBeenSet = true;
}
XmlNode allowedValuesNode = resultNode.FirstChild("AllowedValues");
if(!allowedValuesNode.IsNull())
{
m_allowedValues = StringUtils::Trim(allowedValuesNode.GetText().c_str());
m_allowedValuesHasBeenSet = true;
}
XmlNode isModifiableNode = resultNode.FirstChild("IsModifiable");
if(!isModifiableNode.IsNull())
{
m_isModifiable = StringUtils::ConvertToBool(StringUtils::Trim(isModifiableNode.GetText().c_str()).c_str());
m_isModifiableHasBeenSet = true;
}
XmlNode minimumEngineVersionNode = resultNode.FirstChild("MinimumEngineVersion");
if(!minimumEngineVersionNode.IsNull())
{
m_minimumEngineVersion = StringUtils::Trim(minimumEngineVersionNode.GetText().c_str());
m_minimumEngineVersionHasBeenSet = true;
}
XmlNode changeTypeNode = resultNode.FirstChild("ChangeType");
if(!changeTypeNode.IsNull())
{
m_changeType = ChangeTypeMapper::GetChangeTypeForName(StringUtils::Trim(changeTypeNode.GetText().c_str()).c_str());
m_changeTypeHasBeenSet = true;
}
}
return *this;
}
void Parameter::OutputToStream(Aws::OStream& oStream, const char* location, unsigned index, const char* locationValue) const
{
if(m_parameterNameHasBeenSet)
{
oStream << location << index << locationValue << ".ParameterName=" << StringUtils::URLEncode(m_parameterName.c_str()) << "&";
}
if(m_parameterValueHasBeenSet)
{
oStream << location << index << locationValue << ".ParameterValue=" << StringUtils::URLEncode(m_parameterValue.c_str()) << "&";
}
if(m_descriptionHasBeenSet)
{
oStream << location << index << locationValue << ".Description=" << StringUtils::URLEncode(m_description.c_str()) << "&";
}
if(m_sourceHasBeenSet)
{
oStream << location << index << locationValue << ".Source=" << StringUtils::URLEncode(m_source.c_str()) << "&";
}
if(m_dataTypeHasBeenSet)
{
oStream << location << index << locationValue << ".DataType=" << StringUtils::URLEncode(m_dataType.c_str()) << "&";
}
if(m_allowedValuesHasBeenSet)
{
oStream << location << index << locationValue << ".AllowedValues=" << StringUtils::URLEncode(m_allowedValues.c_str()) << "&";
}
if(m_isModifiableHasBeenSet)
{
oStream << location << index << locationValue << ".IsModifiable=" << std::boolalpha << m_isModifiable << "&";
}
if(m_minimumEngineVersionHasBeenSet)
{
oStream << location << index << locationValue << ".MinimumEngineVersion=" << StringUtils::URLEncode(m_minimumEngineVersion.c_str()) << "&";
}
if(m_changeTypeHasBeenSet)
{
oStream << location << index << locationValue << ".ChangeType=" << ChangeTypeMapper::GetNameForChangeType(m_changeType) << "&";
}
}
void Parameter::OutputToStream(Aws::OStream& oStream, const char* location) const
{
if(m_parameterNameHasBeenSet)
{
oStream << location << ".ParameterName=" << StringUtils::URLEncode(m_parameterName.c_str()) << "&";
}
if(m_parameterValueHasBeenSet)
{
oStream << location << ".ParameterValue=" << StringUtils::URLEncode(m_parameterValue.c_str()) << "&";
}
if(m_descriptionHasBeenSet)
{
oStream << location << ".Description=" << StringUtils::URLEncode(m_description.c_str()) << "&";
}
if(m_sourceHasBeenSet)
{
oStream << location << ".Source=" << StringUtils::URLEncode(m_source.c_str()) << "&";
}
if(m_dataTypeHasBeenSet)
{
oStream << location << ".DataType=" << StringUtils::URLEncode(m_dataType.c_str()) << "&";
}
if(m_allowedValuesHasBeenSet)
{
oStream << location << ".AllowedValues=" << StringUtils::URLEncode(m_allowedValues.c_str()) << "&";
}
if(m_isModifiableHasBeenSet)
{
oStream << location << ".IsModifiable=" << std::boolalpha << m_isModifiable << "&";
}
if(m_minimumEngineVersionHasBeenSet)
{
oStream << location << ".MinimumEngineVersion=" << StringUtils::URLEncode(m_minimumEngineVersion.c_str()) << "&";
}
if(m_changeTypeHasBeenSet)
{
oStream << location << ".ChangeType=" << ChangeTypeMapper::GetNameForChangeType(m_changeType) << "&";
}
}
} // namespace Model
} // namespace ElastiCache
} // namespace Aws
| apache-2.0 |
amdharness/amdharness-app | lib/dojotoolkit.org/dojox/mobile/ComboBox.js | 10509 | define([
"dojo/_base/kernel",
"dojo/_base/declare",
"dojo/_base/lang",
"dojo/_base/window",
"dojo/dom-geometry",
"dojo/dom-style",
"dojo/dom-attr",
"dojo/window",
"dojo/touch",
"dijit/form/_AutoCompleterMixin",
"dijit/popup",
"./_ComboBoxMenu",
"./TextBox",
"./sniff"
], function(kernel, declare, lang, win, domGeometry, domStyle, domAttr, windowUtils, touch, AutoCompleterMixin, popup, ComboBoxMenu, TextBox, has){
kernel.experimental("dojox.mobile.ComboBox"); // should be using a more native search-type UI
return declare("dojox.mobile.ComboBox", [TextBox, AutoCompleterMixin], {
// summary:
// A non-templated auto-completing text box widget.
// dropDownClass: [protected extension] String
// Name of the drop-down widget class used to select a date/time.
// Should be specified by subclasses.
dropDownClass: "dojox.mobile._ComboBoxMenu",
// initially disable selection since iphone displays selection handles
// that makes it hard to pick from the list
// selectOnClick: Boolean
// Flag which enables the selection on click.
selectOnClick: false,
// autoComplete: Boolean
// Flag which enables the auto-completion.
autoComplete: false,
// dropDown: [protected] Widget
// The widget to display as a popup. This widget *must* be
// defined before the startup function is called.
dropDown: null,
// maxHeight: [protected] int
// The maximum height for the drop-down.
// Any drop-down taller than this value will have scrollbars.
// Set to -1 to limit the height to the available space in the viewport.
maxHeight: -1,
// dropDownPosition: [const] String[]
// This variable controls the position of the drop-down.
// It is an array of strings with the following values:
//
// - before: places drop down to the left of the target node/widget, or to the right in
// the case of RTL scripts like Hebrew and Arabic
// - after: places drop down to the right of the target node/widget, or to the left in
// the case of RTL scripts like Hebrew and Arabic
// - above: drop down goes above target node
// - below: drop down goes below target node
//
// The list is positions is tried, in order, until a position is found where the drop down fits
// within the viewport.
dropDownPosition: ["below","above"],
_throttleOpenClose: function(){
// summary:
// Prevents the open/close in rapid succession.
// tags:
// private
if(this._throttleHandler){
this._throttleHandler.remove();
}
this._throttleHandler = this.defer(function(){ this._throttleHandler = null; }, 500);
},
_onFocus: function(){
// summary:
// Shows drop-down if the user is selecting Next/Previous from the virtual keyboard.
// tags:
// private
this.inherited(arguments);
if(!this._opened && !this._throttleHandler){
this._startSearchAll();
}
if(has("windows-theme")) {
this.domNode.blur();
}
},
onInput: function(e){
if(!e || e.charCode !== 0){ // #18047
this._onKey(e);
this.inherited(arguments);
}
},
_setListAttr: function(v){
// tags:
// private
this._set('list', v); // needed for Firefox 4+ to prevent HTML5 mode
},
closeDropDown: function(){
// summary:
// Closes the drop down on this widget
// tags:
// protected
this._throttleOpenClose();
if(this.endHandler){
this.disconnect(this.startHandler);
this.disconnect(this.endHandler);
this.disconnect(this.moveHandler);
clearInterval(this.repositionTimer);
this.repositionTimer = this.endHandler = null;
}
this.inherited(arguments);
domAttr.remove(this.domNode, "aria-owns");
domAttr.set(this.domNode, "aria-expanded", "false");
popup.close(this.dropDown);
this._opened = false;
// Remove disable attribute to make input element clickable after context menu closed
if(has("windows-theme") && this.domNode.disabled){
this.defer(function(){
this.domNode.removeAttribute("disabled");
}, 300);
}
},
openDropDown: function(){
// summary:
// Opens the dropdown for this widget. To be called only when this.dropDown
// has been created and is ready to display (that is, its data is loaded).
// returns:
// Returns the value of popup.open().
// tags:
// protected
var wasClosed = !this._opened;
var dropDown = this.dropDown,
ddNode = dropDown.domNode,
aroundNode = this.domNode,
self = this;
domAttr.set(dropDown.domNode, "role", "listbox");
domAttr.set(this.domNode, "aria-expanded", "true");
if(dropDown.id){
domAttr.set(this.domNode, "aria-owns", dropDown.id);
}
if(has('touch')){
win.global.scrollBy(0, domGeometry.position(aroundNode, false).y); // don't call scrollIntoView since it messes up ScrollableView
}
// TODO: isn't maxHeight dependent on the return value from popup.open(),
// i.e., dependent on how much space is available (BK)
if(!this._preparedNode){
this._preparedNode = true;
// Check if we have explicitly set width and height on the dropdown widget dom node
if(ddNode.style.width){
this._explicitDDWidth = true;
}
if(ddNode.style.height){
this._explicitDDHeight = true;
}
}
// Code for resizing dropdown (height limitation, or increasing width to match my width)
var myStyle = {
display: "",
overflow: "hidden",
visibility: "hidden"
};
if(!this._explicitDDWidth){
myStyle.width = "";
}
if(!this._explicitDDHeight){
myStyle.height = "";
}
domStyle.set(ddNode, myStyle);
// Figure out maximum height allowed (if there is a height restriction)
var maxHeight = this.maxHeight;
if(maxHeight == -1){
// limit height to space available in viewport either above or below my domNode
// (whichever side has more room)
var viewport = windowUtils.getBox(),
position = domGeometry.position(aroundNode, false);
maxHeight = Math.floor(Math.max(position.y, viewport.h - (position.y + position.h)));
}
// Attach dropDown to DOM and make make visibility:hidden rather than display:none
// so we call startup() and also get the size
popup.moveOffScreen(dropDown);
if(dropDown.startup && !dropDown._started){
dropDown.startup(); // this has to be done after being added to the DOM
}
// Get size of drop down, and determine if vertical scroll bar needed
var mb = domGeometry.position(this.dropDown.containerNode, false);
var overHeight = (maxHeight && mb.h > maxHeight);
if(overHeight){
mb.h = maxHeight;
}
// Adjust dropdown width to match or be larger than my width
mb.w = Math.max(mb.w, aroundNode.offsetWidth);
domGeometry.setMarginBox(ddNode, mb);
var retVal = popup.open({
parent: this,
popup: dropDown,
around: aroundNode,
orient: has("windows-theme") ? ["above"] : this.dropDownPosition,
onExecute: function(){
self.closeDropDown();
},
onCancel: function(){
self.closeDropDown();
},
onClose: function(){
self._opened = false;
}
});
this._opened=true;
if(wasClosed){
var isGesture = false,
skipReposition = false,
active = false,
wrapper = dropDown.domNode.parentNode,
aroundNodePos = domGeometry.position(aroundNode, false),
popupPos = domGeometry.position(wrapper, false),
deltaX = popupPos.x - aroundNodePos.x,
deltaY = popupPos.y - aroundNodePos.y,
startX = -1, startY = -1;
// touchstart isn't really needed since touchmove implies touchstart, but
// mousedown is needed since mousemove doesn't know if the left button is down or not
this.startHandler = this.connect(win.doc.documentElement, touch.press,
function(e){
skipReposition = true;
active = true;
isGesture = false;
startX = e.clientX;
startY = e.clientY;
}
);
this.moveHandler = this.connect(win.doc.documentElement, touch.move,
function(e){
skipReposition = true;
if(e.touches){
active = isGesture = true; // touchmove implies touchstart
}else if(active && (e.clientX != startX || e.clientY != startY)){
isGesture = true;
}
}
);
this.clickHandler = this.connect(dropDown.domNode, "onclick",
function(){
skipReposition = true;
active = isGesture = false; // click implies no gesture movement
}
);
this.endHandler = this.connect(win.doc.documentElement, touch.release,
function(){
this.defer(function(){ // allow onclick to go first
skipReposition = true;
if(!isGesture && active){ // if click without move, then close dropdown
this.closeDropDown();
}
active = false;
});
}
);
this.repositionTimer = setInterval(lang.hitch(this, function(){
if(skipReposition){ // don't reposition if busy
skipReposition = false;
return;
}
var currentAroundNodePos = domGeometry.position(aroundNode, false),
currentPopupPos = domGeometry.position(wrapper, false),
currentDeltaX = currentPopupPos.x - currentAroundNodePos.x,
currentDeltaY = currentPopupPos.y - currentAroundNodePos.y;
// if the popup is no longer placed correctly, relocate it
if(Math.abs(currentDeltaX - deltaX) >= 1 || Math.abs(currentDeltaY - deltaY) >= 1){ // Firefox plays with partial pixels
domStyle.set(wrapper, { left: parseInt(domStyle.get(wrapper, "left")) + deltaX - currentDeltaX + 'px', top: parseInt(domStyle.get(wrapper, "top")) + deltaY - currentDeltaY + 'px' });
}
}), 50); // yield a short time to allow for consolidation for better CPU throughput
}
// We need to disable input control in order to prevent opening the soft keyboard in IE
if(has("windows-theme")){
this.domNode.setAttribute("disabled", true);
}
return retVal;
},
postCreate: function(){
this.inherited(arguments);
this.connect(this.domNode, "onclick", "_onClick");
domAttr.set(this.domNode, "role", "combobox");
domAttr.set(this.domNode, "aria-expanded", "false");
},
destroy: function(){
if(this.repositionTimer){
clearInterval(this.repositionTimer);
}
this.inherited(arguments);
},
_onClick: function(/*Event*/ e){
// tags:
// private
// throttle clicks to prevent double click from doing double actions
if(!this._throttleHandler){
if(this.opened){
this.closeDropDown();
}else{
this._startSearchAll();
}
}
}
});
});
| apache-2.0 |
foryou2030/incubator-carbondata | core/src/main/java/org/apache/carbondata/scan/expression/conditional/BinaryConditionalExpression.java | 1326 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.scan.expression.conditional;
import org.apache.carbondata.scan.expression.Expression;
import org.apache.carbondata.scan.expression.logical.BinaryLogicalExpression;
public abstract class BinaryConditionalExpression extends BinaryLogicalExpression
implements ConditionalExpression {
/**
*
*/
private static final long serialVersionUID = 1L;
public boolean isNull;
public BinaryConditionalExpression(Expression left, Expression right) {
super(left, right);
}
}
| apache-2.0 |
rhdedgar/openshift-tools | ansible/roles/lib_gcloud/build/lib/base.py | 16433 | # pylint: skip-file
'''
GcloudCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import random
# Not all genearated modules use this.
# pylint: disable=unused-import
import re
import shutil
import string
import subprocess
import tempfile
import yaml
# Not all genearated modules use this.
# pylint: disable=unused-import
import copy
# pylint: disable=import-error
from apiclient.discovery import build
# pylint: disable=import-error
from oauth2client.client import GoogleCredentials
from ansible.module_utils.basic import AnsibleModule
class GcloudCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GcloudCLI(object):
''' Class to wrap the command line tools '''
def __init__(self, credentials=None, project=None, verbose=False):
''' Constructor for GcloudCLI '''
self.scope = None
self._project = project
if not credentials:
self.credentials = GoogleCredentials.get_application_default()
else:
tmp = tempfile.NamedTemporaryFile()
tmp.write(json.dumps(credentials))
tmp.seek(0)
self.credentials = GoogleCredentials.from_stream(tmp.name)
tmp.close()
self.scope = build('compute', 'beta', credentials=self.credentials)
self.verbose = verbose
@property
def project(self):
'''property for project'''
return self._project
def _create_image(self, image_name, image_info):
'''create an image name'''
cmd = ['compute', 'images', 'create', image_name]
for key, val in image_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_image(self, image_name):
'''delete image by name '''
cmd = ['compute', 'images', 'delete', image_name]
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_images(self, image_name=None):
'''list images.
if name is supplied perform a describe and return
'''
cmd = ['compute', 'images']
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_deployments(self, simple=True):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'list']
if simple:
cmd.append('--simple-list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_deployment(self, dname):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'delete', dname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'create', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _update_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'update', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_manifests(self, deployment, mname=None):
''' list manifests
if a name is specified then perform a describe
'''
cmd = ['deployment-manager', 'manifests', '--deployment', deployment]
if mname:
cmd.extend(['describe', mname])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_address(self, aname):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses', 'delete', aname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_addresses(self, aname=None):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses']
if aname:
cmd.extend(['describe', aname])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_address(self, address_name, address_info, address=None, isglobal=False):
''' create a deployment'''
cmd = ['compute', 'addresses', 'create', address_name]
if address:
cmd.append(address)
if isglobal:
cmd.append('--global')
for key, val in address_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_metadata(self, resource_type, name=None, zone=None):
''' list metadata'''
cmd = ['compute', resource_type, 'describe']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _delete_metadata(self, resource_type, keys, remove_all=False, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'remove-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
if remove_all:
cmd.append('--all')
else:
cmd.append('--keys')
cmd.append(','.join(keys))
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _create_metadata(self, resource_type, metadata=None, metadata_from_file=None, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'add-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
data = None
if metadata_from_file:
cmd.append('--metadata-from-file')
data = metadata_from_file
else:
cmd.append('--metadata')
data = metadata
cmd.append(','.join(['%s=%s' % (key, val) for key, val in data.items()]))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_accounts(self, sa_name=None):
'''return service accounts '''
cmd = ['iam', 'service-accounts']
if sa_name:
cmd.extend(['describe', sa_name])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account(self, sa_name):
'''delete service account '''
cmd = ['iam', 'service-accounts', 'delete', sa_name, '-q']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account(self, sa_name, display_name=None):
'''create service account '''
cmd = ['iam', 'service-accounts', 'create', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _update_service_account(self, sa_name, display_name=None):
'''update service account '''
cmd = ['iam', 'service-accounts', 'update', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account_key(self, sa_name, key_id):
'''delete service account key'''
cmd = ['iam', 'service-accounts', 'keys', 'delete', key_id, '--iam-account', sa_name, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_account_keys(self, sa_name):
'''return service account keys '''
cmd = ['iam', 'service-accounts', 'keys', 'list', '--iam-account', sa_name]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account_key(self, sa_name, outputfile, key_format='p12'):
'''create service account key '''
# Ensure we remove the key file
atexit.register(Utils.cleanup, [outputfile])
cmd = ['iam', 'service-accounts', 'keys', 'create', outputfile,
'--iam-account', sa_name, '--key-file-type', key_format]
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_project_policy(self, project):
'''create service account key '''
cmd = ['projects', 'get-iam-policy', project]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _add_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _remove_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'remove-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _set_project_policy(self, project, policy_path):
'''create service account key '''
cmd = ['projects', 'set-iam-policy', project, policy_path]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_zones(self):
''' list zones '''
cmd = ['compute', 'zones', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _config_set(self, config_param, config_value, config_section):
''' set config params with gcloud config set '''
param = config_section + '/' + config_param
cmd = ['config', 'set', param, config_value]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_config(self):
'''return config '''
cmd = ['config', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def list_disks(self, zone=None, disk_name=None):
'''return a list of disk objects in this project and zone'''
cmd = ['beta', 'compute', 'disks']
if disk_name and zone:
cmd.extend(['describe', disk_name, '--zone', zone])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
# disabling too-many-arguments as these are all required for the disk labels
# pylint: disable=too-many-arguments
def _set_disk_labels(self, project, zone, dname, labels, finger_print):
'''create service account key '''
if labels == None:
labels = {}
self.scope = build('compute', 'beta', credentials=self.credentials)
body = {'labels': labels, 'labelFingerprint': finger_print}
result = self.scope.disks().setLabels(project=project,
zone=zone,
resource=dname,
body=body,
).execute()
return result
def gcloud_cmd(self, cmd, output=False, output_type='json'):
'''Base command for gcloud '''
cmds = ['/usr/bin/gcloud']
if self.project:
cmds.extend(['--project', self.project])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
################################################################################
# utilities and helpers for generation
################################################################################
class Utils(object):
''' utilities for openshiftcli modules '''
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def global_compute_url(project, collection, rname):
'''build the global compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', rname])
@staticmethod
def zonal_compute_url(project, zone, collection, rname):
'''build the zone compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', rname])
@staticmethod
def generate_random_name(size):
'''generate a random string of lowercase and digits the length of size'''
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
| apache-2.0 |
gurbuzali/hazelcast-jet | hazelcast-jet-core/src/test/java/com/hazelcast/jet/pipeline/SourceBuilder_TopologyChangeTest.java | 5969 | /*
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.pipeline;
import com.hazelcast.collection.IList;
import com.hazelcast.internal.util.UuidUtil;
import com.hazelcast.jet.JetInstance;
import com.hazelcast.jet.Job;
import com.hazelcast.jet.aggregate.AggregateOperations;
import com.hazelcast.jet.config.JetConfig;
import com.hazelcast.jet.config.JobConfig;
import com.hazelcast.jet.core.JetTestSupport;
import com.hazelcast.jet.core.JobStatus;
import com.hazelcast.jet.datamodel.WindowResult;
import com.hazelcast.jet.impl.JobRepository;
import com.hazelcast.test.HazelcastSerialClassRunner;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.Serializable;
import java.util.Iterator;
import java.util.function.Consumer;
import java.util.function.Supplier;
import static com.hazelcast.jet.config.ProcessingGuarantee.EXACTLY_ONCE;
import static com.hazelcast.jet.pipeline.WindowDefinition.tumbling;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@RunWith(HazelcastSerialClassRunner.class)
public class SourceBuilder_TopologyChangeTest extends JetTestSupport {
private static volatile boolean stateRestored;
@Test
public void test_restartJob_nodeShutDown() {
testTopologyChange(() -> createJetMember(), node -> node.shutdown(), true);
}
@Test
public void test_restartJob_nodeTerminated() {
testTopologyChange(() -> createJetMember(), node -> node.getHazelcastInstance().getLifecycleService().terminate(),
false);
}
@Test
public void test_restartJob_nodeAdded() {
testTopologyChange(() -> null, ignore -> createJetMember(), true);
}
private void testTopologyChange(
Supplier<JetInstance> secondMemberSupplier,
Consumer<JetInstance> changeTopologyFn,
boolean assertMonotonicity) {
stateRestored = false;
StreamSource<Integer> source = SourceBuilder
.timestampedStream("src", ctx -> new NumberGeneratorContext())
.<Integer>fillBufferFn((src, buffer) -> {
long expectedCount = NANOSECONDS.toMillis(System.nanoTime() - src.startTime);
expectedCount = Math.min(expectedCount, src.current + 100);
while (src.current < expectedCount) {
buffer.add(src.current, src.current);
src.current++;
}
})
.createSnapshotFn(src -> {
System.out.println("Will save " + src.current + " to snapshot");
return src;
})
.restoreSnapshotFn((src, states) -> {
stateRestored = true;
assert states.size() == 1;
src.restore(states.get(0));
System.out.println("Restored " + src.current + " from snapshot");
})
.build();
JetConfig jetConfig = new JetConfig();
jetConfig.getInstanceConfig().setScaleUpDelayMillis(1000); // restart sooner after member add
JetInstance jet = createJetMember(jetConfig);
JetInstance possibleSecondNode = secondMemberSupplier.get();
long windowSize = 100;
IList<WindowResult<Long>> result = jet.getList("result-" + UuidUtil.newUnsecureUuidString());
Pipeline p = Pipeline.create();
p.readFrom(source)
.withNativeTimestamps(0)
.window(tumbling(windowSize))
.aggregate(AggregateOperations.counting())
.peek()
.writeTo(Sinks.list(result));
Job job = jet.newJob(p, new JobConfig().setProcessingGuarantee(EXACTLY_ONCE).setSnapshotIntervalMillis(500));
assertTrueEventually(() -> assertFalse("result list is still empty", result.isEmpty()));
assertJobStatusEventually(job, JobStatus.RUNNING);
JobRepository jr = new JobRepository(jet);
waitForFirstSnapshot(jr, job.getId(), 10, false);
assertFalse(stateRestored);
changeTopologyFn.accept(possibleSecondNode);
assertTrueEventually(() -> assertTrue("restoreSnapshotFn was not called", stateRestored));
// wait until more results are added
int oldSize = result.size();
assertTrueEventually(() -> assertTrue("no more results added to the list", result.size() > oldSize));
cancelAndJoin(job);
// results should contain sequence of results, each with count=windowSize, monotonic, if job was
// allowed to terminate gracefully
Iterator<WindowResult<Long>> iterator = result.iterator();
for (int i = 0; i < result.size(); i++) {
WindowResult<Long> next = iterator.next();
assertEquals(windowSize, (long) next.result());
if (assertMonotonicity) {
assertEquals(i * windowSize, next.start());
}
}
}
private static final class NumberGeneratorContext implements Serializable {
long startTime = System.nanoTime();
int current;
void restore(NumberGeneratorContext other) {
this.startTime = other.startTime;
this.current = other.current;
}
}
}
| apache-2.0 |
AutorestCI/azure-sdk-for-node | lib/services/networkManagement2/lib/models/azureReachabilityReportParameters.js | 2974 | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
const models = require('./index');
/**
* Geographic and time constraints for Azure reachability report.
*
*/
class AzureReachabilityReportParameters {
/**
* Create a AzureReachabilityReportParameters.
* @member {object} providerLocation
* @member {string} [providerLocation.country] The name of the country.
* @member {string} [providerLocation.state] The name of the state.
* @member {string} [providerLocation.city] The name of the city or town.
* @member {array} [providers] List of Internet service providers.
* @member {array} [azureLocations] Optional Azure regions to scope the query
* to.
* @member {date} startTime The start time for the Azure reachability report.
* @member {date} endTime The end time for the Azure reachability report.
*/
constructor() {
}
/**
* Defines the metadata of AzureReachabilityReportParameters
*
* @returns {object} metadata of AzureReachabilityReportParameters
*
*/
mapper() {
return {
required: false,
serializedName: 'AzureReachabilityReportParameters',
type: {
name: 'Composite',
className: 'AzureReachabilityReportParameters',
modelProperties: {
providerLocation: {
required: true,
serializedName: 'providerLocation',
type: {
name: 'Composite',
className: 'AzureReachabilityReportLocation'
}
},
providers: {
required: false,
serializedName: 'providers',
type: {
name: 'Sequence',
element: {
required: false,
serializedName: 'StringElementType',
type: {
name: 'String'
}
}
}
},
azureLocations: {
required: false,
serializedName: 'azureLocations',
type: {
name: 'Sequence',
element: {
required: false,
serializedName: 'StringElementType',
type: {
name: 'String'
}
}
}
},
startTime: {
required: true,
serializedName: 'startTime',
type: {
name: 'DateTime'
}
},
endTime: {
required: true,
serializedName: 'endTime',
type: {
name: 'DateTime'
}
}
}
}
};
}
}
module.exports = AzureReachabilityReportParameters;
| apache-2.0 |
BrynCooke/incubator-tinkerpop | gremlin-test/src/main/java/org/apache/tinkerpop/gremlin/structure/GraphWritePerformanceTest.java | 5493 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.structure;
import com.carrotsearch.junitbenchmarks.BenchmarkOptions;
import com.carrotsearch.junitbenchmarks.BenchmarkRule;
import com.carrotsearch.junitbenchmarks.annotation.AxisRange;
import com.carrotsearch.junitbenchmarks.annotation.BenchmarkHistoryChart;
import com.carrotsearch.junitbenchmarks.annotation.BenchmarkMethodChart;
import com.carrotsearch.junitbenchmarks.annotation.LabelType;
import org.apache.tinkerpop.gremlin.AbstractGremlinTest;
import org.apache.tinkerpop.gremlin.LoadGraphWith;
import org.apache.tinkerpop.gremlin.structure.io.GraphWriter;
import org.apache.tinkerpop.gremlin.structure.io.graphml.GraphMLIo;
import org.apache.tinkerpop.gremlin.structure.io.graphson.GraphSONIo;
import org.apache.tinkerpop.gremlin.structure.io.gryo.GryoIo;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.runners.Enclosed;
import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.util.Optional;
/**
* @author Stephen Mallette (http://stephen.genoprime.com)
* @deprecated As of release 3.2.1, replaced by gremlin-benchmark.
*/
@RunWith(Enclosed.class)
@Deprecated
public class GraphWritePerformanceTest {
@AxisRange(min = 0, max = 1)
@BenchmarkMethodChart(filePrefix = "structure-write")
@BenchmarkHistoryChart(labelWith = LabelType.CUSTOM_KEY, maxRuns = 20, filePrefix = "hx-structure-write")
public static class WriteToGraph extends AbstractGremlinTest {
@Rule
public TestRule benchmarkRun = new BenchmarkRule();
@Test
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 0, concurrency = BenchmarkOptions.CONCURRENCY_SEQUENTIAL)
public void writeEmptyVertices() throws Exception {
final int verticesToGenerate = 100000;
for (int ix = 0; ix < verticesToGenerate; ix++) {
graph.addVertex();
tryBatchCommit(graph, ix);
}
assertVertexEdgeCounts(graph, verticesToGenerate, 0);
}
@Test
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 0, concurrency = BenchmarkOptions.CONCURRENCY_SEQUENTIAL)
public void writeEmptyVerticesAndEdges() throws Exception {
final int verticesToGenerate = 100000;
Optional<Vertex> lastVertex = Optional.empty();
for (int ix = 0; ix < verticesToGenerate; ix++) {
final Vertex v = graph.addVertex();
if (lastVertex.isPresent())
v.addEdge("parent", lastVertex.get());
lastVertex = Optional.of(v);
tryBatchCommit(graph, ix);
}
assertVertexEdgeCounts(graph, verticesToGenerate, verticesToGenerate - 1);
}
}
@AxisRange(min = 0, max = 1)
@BenchmarkMethodChart(filePrefix = "io-write")
@BenchmarkHistoryChart(labelWith = LabelType.CUSTOM_KEY, maxRuns = 20, filePrefix = "hx-io-write")
public static class WriteToIO extends AbstractGremlinTest {
@Rule
public TestRule benchmarkRun = new BenchmarkRule();
@Test
@LoadGraphWith(LoadGraphWith.GraphData.GRATEFUL)
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 0, concurrency = BenchmarkOptions.CONCURRENCY_SEQUENTIAL)
public void writeGryo() throws Exception {
final GraphWriter writer = graph.io(GryoIo.build()).writer().create();
final OutputStream os = new ByteArrayOutputStream();
writer.writeGraph(os, graph);
}
@Test
@LoadGraphWith(LoadGraphWith.GraphData.GRATEFUL)
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 0, concurrency = BenchmarkOptions.CONCURRENCY_SEQUENTIAL)
public void writeGraphML() throws Exception {
final GraphWriter writer = graph.io(GraphMLIo.build()).writer().create();
final OutputStream os = new ByteArrayOutputStream();
writer.writeGraph(os, graph);
}
@Test
@LoadGraphWith(LoadGraphWith.GraphData.GRATEFUL)
@BenchmarkOptions(benchmarkRounds = 10, warmupRounds = 0, concurrency = BenchmarkOptions.CONCURRENCY_SEQUENTIAL)
public void writeGraphSON() throws Exception {
final GraphWriter writer = graph.io(GraphSONIo.build()).writer().create();
final OutputStream os = new ByteArrayOutputStream();
writer.writeGraph(os, graph);
}
}
private static void tryBatchCommit(final Graph g, int ix) {
if (g.features().graph().supportsTransactions() && ix % 1000 == 0)
g.tx().commit();
}
}
| apache-2.0 |
elevennl/stagemonitor | stagemonitor-web/src/main/java/org/stagemonitor/web/monitor/widget/RequestTraceServlet.java | 2976 | package org.stagemonitor.web.monitor.widget;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.stagemonitor.core.Stagemonitor;
import org.stagemonitor.core.configuration.Configuration;
import org.stagemonitor.requestmonitor.RequestMonitor;
import org.stagemonitor.requestmonitor.RequestMonitorPlugin;
import org.stagemonitor.web.monitor.HttpRequestTrace;
public class RequestTraceServlet extends HttpServlet {
private static final long DEFAULT_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(25);
private final Logger logger = LoggerFactory.getLogger(getClass());
private final RequestMonitor requestMonitor;
private final long requestTimeout;
private WidgetAjaxRequestTraceReporter widgetAjaxRequestTraceReporter;
public RequestTraceServlet() {
this(Stagemonitor.getConfiguration(), new WidgetAjaxRequestTraceReporter(), DEFAULT_REQUEST_TIMEOUT);
}
public RequestTraceServlet(Configuration configuration, WidgetAjaxRequestTraceReporter reporter, long requestTimeout) {
this.widgetAjaxRequestTraceReporter = reporter;
this.requestTimeout = requestTimeout;
this.requestMonitor = configuration.getConfig(RequestMonitorPlugin.class).getRequestMonitor();
requestMonitor.addReporter(widgetAjaxRequestTraceReporter);
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
final String connectionId = req.getParameter("connectionId");
if (connectionId != null && !connectionId.trim().isEmpty()) {
writeRequestTracesToResponse(resp, widgetAjaxRequestTraceReporter.getRequestTraces(connectionId, requestTimeout));
} else {
resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
}
}
private void writeRequestTracesToResponse(HttpServletResponse response, Collection<HttpRequestTrace> requestTraces)
throws IOException {
if (requestTraces == null) {
requestTraces = Collections.emptyList();
}
response.setContentType("application/json");
response.setHeader("Pragma", "no-cache");
response.setHeader("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate");
response.setHeader("Expires", "0");
response.setCharacterEncoding("UTF-8");
final ArrayList<String> jsonResponse = new ArrayList<String>(requestTraces.size());
for (HttpRequestTrace requestTrace : requestTraces) {
logger.debug("writeRequestTracesToResponse {} ({})", requestTrace.getName(), requestTrace.getTimestamp());
jsonResponse.add(requestTrace.toJson());
}
response.getWriter().print(jsonResponse.toString());
response.getWriter().close();
}
@Override
public void destroy() {
widgetAjaxRequestTraceReporter.close();
}
}
| apache-2.0 |
karuradev/open-source-search-engine | test_norm.cpp | 799 | #include "gb-include.h"
//#include <unicode/unorm.h>
#include "Unicode.h"
bool mainShutdown ( bool urgent ) ;
bool mainShutdown ( bool urgent ) {return true;}
// Test first 255 chars of unicode (iso-8859-1) for normalization
int main(int argc, char*argv){
ucInit();
long count = 0;
for (UChar32 c = 0; c < 0x10000; c++){
//UErrorCode err = U_ZERO_ERROR;
//bool isNorm = unorm_isNormalized(&c,
// 1, UNORM_NFKC,&err);
//if (U_FAILURE(err)) printf("0x%02x: Error: %s\n",
// c, u_errorName(err));
//else// if (!isNorm)
// printf("0x%02x(%c): %s %s\n", c,c,
// isNorm?"Normal":"NOT Normal",
// is_alnum((char)c)?"":"not alnum");
if (ucIsWhiteSpace(c)){
count++;
printf("0x%02x (%c): whitespace\n", c, c);
}
}
printf("Count: %d\n", count);
}
| apache-2.0 |
keighrim/kaldi-yesno-tutorial | steps/cleanup/make_biased_lms.py | 4190 | #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import math
import subprocess
from collections import defaultdict
parser = argparse.ArgumentParser(description="""
This script is a wrapper for make_one_biased_lm.py that reads a Kaldi archive
of (integerized) text data from the standard input and writes a Kaldi archive of
backoff-language-model FSTs to the standard-output. It takes care of
grouping utterances to respect the --min-words-per-graph option. It writes
the graphs to the standard output and also outputs a map from input utterance-ids
to the per-group utterance-ids that index the output graphs.""")
parser.add_argument("--lm-opts", type = str, default = "",
help = "Options to pass in to make_one_biased_lm.py (which "
"creates the individual LM graphs), e.g. '--word-disambig-symbol=8721'.")
parser.add_argument("--min-words-per-graph", type = int, default = 100,
help = "Minimum number of words per utterance group; this program "
"will try to arrange the input utterances into groups such that each "
"one has at least this many words in total.")
parser.add_argument("utterance_map", type = str,
help = "Filename to which a map from input utterances to grouped "
"utterances, is written")
args = parser.parse_args()
try:
utterance_map_file = open(args.utterance_map, "w")
except:
sys.exit("make_biased_lms.py: error opening {0} to write utterance map".format(
args.utterance_map))
# This processes one group of input lines; 'group_of_lines' is
# an array of lines of input integerized text, e.g.
# [ 'utt1 67 89 432', 'utt2 89 48 62' ]
def ProcessGroupOfLines(group_of_lines):
num_lines = len(group_of_lines)
try:
first_utterance_id = group_of_lines[0].split()[0]
except:
sys.exit("make_biased_lms.py: empty input line")
group_utterance_id = '{0}-group-of-{1}'.format(first_utterance_id, num_lines)
# print the group utterance-id to the stdout; it forms the name in
# the text-form archive.
print(group_utterance_id)
sys.stdout.flush()
try:
command = "steps/cleanup/internal/make_one_biased_lm.py " + args.lm_opts
p = subprocess.Popen(command, shell = True, stdin = subprocess.PIPE,
stdout = sys.stdout, stderr = sys.stderr)
for line in group_of_lines:
a = line.split()
if len(a) == 0:
sys.exit("make_biased_lms.py: empty input line")
utterance_id = a[0]
# print <utt> <utt-group> to utterance-map file
print(utterance_id, group_utterance_id, file = utterance_map_file)
rest_of_line = ' '.join(a[1:]) # get rid of utterance id.
print(rest_of_line, file=p.stdin)
p.stdin.close()
assert p.wait() == 0
except Exception as e:
sys.exit("make_biased_lms.py: error calling subprocess, command was: " +
command + ", error was : " + str(e))
# Print a blank line; this terminates the FST in the Kaldi fst-archive
# format.
print("")
sys.stdout.flush()
num_words_this_group = 0
this_group_of_lines = [] # An array of strings, one per line
while True:
line = sys.stdin.readline();
num_words_this_group += len(line.split())
if line != '':
this_group_of_lines.append(line)
if num_words_this_group >= args.min_words_per_graph or \
(line == '' and len(this_group_of_lines) != 0):
ProcessGroupOfLines(this_group_of_lines)
num_words_this_group = 0
this_group_of_lines = []
if line == '':
break
# test comand [to be run from ../..]
#
# (echo 1 0.5; echo 2 0.25) > top_words.txt
# (echo utt1 6 7 8 4; echo utt2 7 8 9; echo utt3 7 8) | steps/cleanup/make_biased_lms.py --lm-opts='--word-disambig-symbol=1000 --top-words=top_words.txt' foo; cat foo
# (echo utt1 6 7 8 4; echo utt2 7 8 9; echo utt3 7 8) | steps/cleanup/make_biased_lms.py --min-words-per-graph=4 --lm-opts='--word-disambig-symbol=1000 --top-words=top_words.txt' foo; cat foo
| apache-2.0 |
sonatype/owf | web-app/js-lib/dojo-release-1.5.0/dijit/_HasDropDown.js | 13475 | /*
Copyright (c) 2004-2010, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dijit._HasDropDown"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code.
dojo._hasResource["dijit._HasDropDown"] = true;
dojo.provide("dijit._HasDropDown");
dojo.require("dijit._base.place");
dojo.require("dijit._Widget");
dojo.declare("dijit._HasDropDown",
null,
{
// summary:
// Mixin for widgets that need drop down ability.
// _buttonNode: [protected] DomNode
// The button/icon/node to click to display the drop down.
// Can be set via a dojoAttachPoint assignment.
// If missing, then either focusNode or domNode (if focusNode is also missing) will be used.
_buttonNode: null,
// _arrowWrapperNode: [protected] DomNode
// Will set CSS class dijitUpArrow, dijitDownArrow, dijitRightArrow etc. on this node depending
// on where the drop down is set to be positioned.
// Can be set via a dojoAttachPoint assignment.
// If missing, then _buttonNode will be used.
_arrowWrapperNode: null,
// _popupStateNode: [protected] DomNode
// The node to set the popupActive class on.
// Can be set via a dojoAttachPoint assignment.
// If missing, then focusNode or _buttonNode (if focusNode is missing) will be used.
_popupStateNode: null,
// _aroundNode: [protected] DomNode
// The node to display the popup around.
// Can be set via a dojoAttachPoint assignment.
// If missing, then domNode will be used.
_aroundNode: null,
// dropDown: [protected] Widget
// The widget to display as a popup. This widget *must* be
// defined before the startup function is called.
dropDown: null,
// autoWidth: [protected] Boolean
// Set to true to make the drop down at least as wide as this
// widget. Set to false if the drop down should just be its
// default width
autoWidth: true,
// forceWidth: [protected] Boolean
// Set to true to make the drop down exactly as wide as this
// widget. Overrides autoWidth.
forceWidth: false,
// maxHeight: [protected] Integer
// The max height for our dropdown. Set to 0 for no max height.
// any dropdown taller than this will have scrollbars
maxHeight: 0,
// dropDownPosition: [const] String[]
// This variable controls the position of the drop down.
// It's an array of strings with the following values:
//
// * before: places drop down to the left of the target node/widget, or to the right in
// the case of RTL scripts like Hebrew and Arabic
// * after: places drop down to the right of the target node/widget, or to the left in
// the case of RTL scripts like Hebrew and Arabic
// * above: drop down goes above target node
// * below: drop down goes below target node
//
// The list is positions is tried, in order, until a position is found where the drop down fits
// within the viewport.
//
dropDownPosition: ["below","above"],
// _stopClickEvents: Boolean
// When set to false, the click events will not be stopped, in
// case you want to use them in your subwidget
_stopClickEvents: true,
_onDropDownMouseDown: function(/*Event*/ e){
// summary:
// Callback when the user mousedown's on the arrow icon
if(this.disabled || this.readOnly){ return; }
this._docHandler = this.connect(dojo.doc, "onmouseup", "_onDropDownMouseUp");
this.toggleDropDown();
},
_onDropDownMouseUp: function(/*Event?*/ e){
// summary:
// Callback when the user lifts their mouse after mouse down on the arrow icon.
// If the drop is a simple menu and the mouse is over the menu, we execute it, otherwise, we focus our
// dropDown node. If the event is missing, then we are not
// a mouseup event.
//
// This is useful for the common mouse movement pattern
// with native browser <select> nodes:
// 1. mouse down on the select node (probably on the arrow)
// 2. move mouse to a menu item while holding down the mouse button
// 3. mouse up. this selects the menu item as though the user had clicked it.
if(e && this._docHandler){
this.disconnect(this._docHandler);
}
var dropDown = this.dropDown, overMenu = false;
if(e && this._opened){
// This code deals with the corner-case when the drop down covers the original widget,
// because it's so large. In that case mouse-up shouldn't select a value from the menu.
// Find out if our target is somewhere in our dropdown widget,
// but not over our _buttonNode (the clickable node)
var c = dojo.position(this._buttonNode, true);
if(!(e.pageX >= c.x && e.pageX <= c.x + c.w) ||
!(e.pageY >= c.y && e.pageY <= c.y + c.h)){
var t = e.target;
while(t && !overMenu){
if(dojo.hasClass(t, "dijitPopup")){
overMenu = true;
}else{
t = t.parentNode;
}
}
if(overMenu){
t = e.target;
if(dropDown.onItemClick){
var menuItem;
while(t && !(menuItem = dijit.byNode(t))){
t = t.parentNode;
}
if(menuItem && menuItem.onClick && menuItem.getParent){
menuItem.getParent().onItemClick(menuItem, e);
}
}
return;
}
}
}
if(this._opened && dropDown.focus){
// Focus the dropdown widget - do it on a delay so that we
// don't steal our own focus.
window.setTimeout(dojo.hitch(dropDown, "focus"), 1);
}
},
_onDropDownClick: function(/*Event*/ e){
// the drop down was already opened on mousedown/keydown; just need to call stopEvent()
if(this._stopClickEvents){
dojo.stopEvent(e);
}
},
_setupDropdown: function(){
// summary:
// set up nodes and connect our mouse and keypress events
this._buttonNode = this._buttonNode || this.focusNode || this.domNode;
this._popupStateNode = this._popupStateNode || this.focusNode || this._buttonNode;
this._aroundNode = this._aroundNode || this.domNode;
this.connect(this._buttonNode, "onmousedown", "_onDropDownMouseDown");
this.connect(this._buttonNode, "onclick", "_onDropDownClick");
this.connect(this._buttonNode, "onkeydown", "_onDropDownKeydown");
this.connect(this._buttonNode, "onkeyup", "_onKey");
// If we have a _setStateClass function (which happens when
// we are a form widget), then we need to connect our open/close
// functions to it
if(this._setStateClass){
this.connect(this, "openDropDown", "_setStateClass");
this.connect(this, "closeDropDown", "_setStateClass");
}
// Add a class to the "dijitDownArrowButton" type class to _buttonNode so theme can set direction of arrow
// based on where drop down will normally appear
var defaultPos = {
"after" : this.isLeftToRight() ? "Right" : "Left",
"before" : this.isLeftToRight() ? "Left" : "Right",
"above" : "Up",
"below" : "Down",
"left" : "Left",
"right" : "Right"
}[this.dropDownPosition[0]] || this.dropDownPosition[0] || "Down";
dojo.addClass(this._arrowWrapperNode || this._buttonNode, "dijit" + defaultPos + "ArrowButton");
},
postCreate: function(){
this._setupDropdown();
this.inherited(arguments);
},
destroyDescendants: function(){
if(this.dropDown){
// Destroy the drop down, unless it's already been destroyed. This can happen because
// the drop down is a direct child of <body> even though it's logically my child.
if(!this.dropDown._destroyed){
this.dropDown.destroyRecursive();
}
delete this.dropDown;
}
this.inherited(arguments);
},
_onDropDownKeydown: function(/*Event*/ e){
if(e.keyCode == dojo.keys.DOWN_ARROW || e.keyCode == dojo.keys.ENTER || e.keyCode == dojo.keys.SPACE){
e.preventDefault(); // stop IE screen jump
}
},
_onKey: function(/*Event*/ e){
// summary:
// Callback when the user presses a key while focused on the button node
if(this.disabled || this.readOnly){ return; }
var d = this.dropDown;
if(d && this._opened && d.handleKey){
if(d.handleKey(e) === false){ return; }
}
if(d && this._opened && e.keyCode == dojo.keys.ESCAPE){
this.toggleDropDown();
}else if(d && !this._opened &&
(e.keyCode == dojo.keys.DOWN_ARROW || e.keyCode == dojo.keys.ENTER || e.keyCode == dojo.keys.SPACE)){
this.toggleDropDown();
if(d.focus){
setTimeout(dojo.hitch(d, "focus"), 1);
}
}
},
_onBlur: function(){
// summary:
// Called magically when focus has shifted away from this widget and it's dropdown
this.closeDropDown();
// don't focus on button. the user has explicitly focused on something else.
this.inherited(arguments);
},
isLoaded: function(){
// summary:
// Returns whether or not the dropdown is loaded. This can
// be overridden in order to force a call to loadDropDown().
// tags:
// protected
return true;
},
loadDropDown: function(/* Function */ loadCallback){
// summary:
// Loads the data for the dropdown, and at some point, calls
// the given callback
// tags:
// protected
loadCallback();
},
toggleDropDown: function(){
// summary:
// Toggle the drop-down widget; if it is up, close it, if not, open it
// tags:
// protected
if(this.disabled || this.readOnly){ return; }
this.focus();
var dropDown = this.dropDown;
if(!dropDown){ return; }
if(!this._opened){
// If we aren't loaded, load it first so there isn't a flicker
if(!this.isLoaded()){
this.loadDropDown(dojo.hitch(this, "openDropDown"));
return;
}else{
this.openDropDown();
}
}else{
this.closeDropDown();
}
},
openDropDown: function(){
// summary:
// Opens the dropdown for this widget - it returns the
// return value of dijit.popup.open
// tags:
// protected
var dropDown = this.dropDown;
var ddNode = dropDown.domNode;
var self = this;
// Prepare our popup's height and honor maxHeight if it exists.
// TODO: isn't maxHeight dependent on the return value from dijit.popup.open(),
// ie, dependent on how much space is available (BK)
if(!this._preparedNode){
dijit.popup.moveOffScreen(ddNode);
this._preparedNode = true;
// Check if we have explicitly set width and height on the dropdown widget dom node
if(ddNode.style.width){
this._explicitDDWidth = true;
}
if(ddNode.style.height){
this._explicitDDHeight = true;
}
}
// Code for resizing dropdown (height limitation, or increasing width to match my width)
if(this.maxHeight || this.forceWidth || this.autoWidth){
var myStyle = {
display: "",
visibility: "hidden"
};
if(!this._explicitDDWidth){
myStyle.width = "";
}
if(!this._explicitDDHeight){
myStyle.height = "";
}
dojo.style(ddNode, myStyle);
// Get size of drop down, and determine if vertical scroll bar needed
var mb = dojo.marginBox(ddNode);
var overHeight = (this.maxHeight && mb.h > this.maxHeight);
dojo.style(ddNode, {
overflowX: "hidden",
overflowY: overHeight ? "auto" : "hidden"
});
if(overHeight){
mb.h = this.maxHeight;
if("w" in mb){
mb.w += 16; // room for vertical scrollbar
}
}else{
delete mb.h;
}
delete mb.t;
delete mb.l;
// Adjust dropdown width to match or be larger than my width
if(this.forceWidth){
mb.w = this.domNode.offsetWidth;
}else if(this.autoWidth){
mb.w = Math.max(mb.w, this.domNode.offsetWidth);
}else{
delete mb.w;
}
// And finally, resize the dropdown to calculated height and width
if(dojo.isFunction(dropDown.resize)){
dropDown.resize(mb);
}else{
dojo.marginBox(ddNode, mb);
}
}
var retVal = dijit.popup.open({
parent: this,
popup: dropDown,
around: this._aroundNode,
orient: dijit.getPopupAroundAlignment((this.dropDownPosition && this.dropDownPosition.length) ? this.dropDownPosition : ["below"],this.isLeftToRight()),
onExecute: function(){
self.closeDropDown(true);
},
onCancel: function(){
self.closeDropDown(true);
},
onClose: function(){
dojo.attr(self._popupStateNode, "popupActive", false);
dojo.removeClass(self._popupStateNode, "dijitHasDropDownOpen");
self._opened = false;
self.state = "";
}
});
dojo.attr(this._popupStateNode, "popupActive", "true");
dojo.addClass(self._popupStateNode, "dijitHasDropDownOpen");
this._opened=true;
this.state="Opened";
// TODO: set this.checked and call setStateClass(), to affect button look while drop down is shown
return retVal;
},
closeDropDown: function(/*Boolean*/ focus){
// summary:
// Closes the drop down on this widget
// tags:
// protected
if(this._opened){
if(focus){ this.focus(); }
dijit.popup.close(this.dropDown);
this._opened = false;
this.state = "";
}
}
}
);
}
| apache-2.0 |
googleapis/google-cloud-php-compute | src/V1/AllocationSpecificSKUReservation.php | 4932 | <?php
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/compute/v1/compute.proto
namespace Google\Cloud\Compute\V1;
use Google\Protobuf\Internal\GPBType;
use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
* This reservation type allows to pre allocate specific instance configuration. Next ID: 5
*
* Generated from protobuf message <code>google.cloud.compute.v1.AllocationSpecificSKUReservation</code>
*/
class AllocationSpecificSKUReservation extends \Google\Protobuf\Internal\Message
{
/**
* Specifies the number of resources that are allocated.
*
* Generated from protobuf field <code>optional int64 count = 94851343;</code>
*/
private $count = null;
/**
* [Output Only] Indicates how many instances are in use.
*
* Generated from protobuf field <code>optional int64 in_use_count = 493458877;</code>
*/
private $in_use_count = null;
/**
* The instance properties for the reservation.
*
* Generated from protobuf field <code>optional .google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties instance_properties = 215355165;</code>
*/
private $instance_properties = null;
/**
* Constructor.
*
* @param array $data {
* Optional. Data for populating the Message object.
*
* @type int|string $count
* Specifies the number of resources that are allocated.
* @type int|string $in_use_count
* [Output Only] Indicates how many instances are in use.
* @type \Google\Cloud\Compute\V1\AllocationSpecificSKUAllocationReservedInstanceProperties $instance_properties
* The instance properties for the reservation.
* }
*/
public function __construct($data = NULL) {
\GPBMetadata\Google\Cloud\Compute\V1\Compute::initOnce();
parent::__construct($data);
}
/**
* Specifies the number of resources that are allocated.
*
* Generated from protobuf field <code>optional int64 count = 94851343;</code>
* @return int|string
*/
public function getCount()
{
return isset($this->count) ? $this->count : 0;
}
public function hasCount()
{
return isset($this->count);
}
public function clearCount()
{
unset($this->count);
}
/**
* Specifies the number of resources that are allocated.
*
* Generated from protobuf field <code>optional int64 count = 94851343;</code>
* @param int|string $var
* @return $this
*/
public function setCount($var)
{
GPBUtil::checkInt64($var);
$this->count = $var;
return $this;
}
/**
* [Output Only] Indicates how many instances are in use.
*
* Generated from protobuf field <code>optional int64 in_use_count = 493458877;</code>
* @return int|string
*/
public function getInUseCount()
{
return isset($this->in_use_count) ? $this->in_use_count : 0;
}
public function hasInUseCount()
{
return isset($this->in_use_count);
}
public function clearInUseCount()
{
unset($this->in_use_count);
}
/**
* [Output Only] Indicates how many instances are in use.
*
* Generated from protobuf field <code>optional int64 in_use_count = 493458877;</code>
* @param int|string $var
* @return $this
*/
public function setInUseCount($var)
{
GPBUtil::checkInt64($var);
$this->in_use_count = $var;
return $this;
}
/**
* The instance properties for the reservation.
*
* Generated from protobuf field <code>optional .google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties instance_properties = 215355165;</code>
* @return \Google\Cloud\Compute\V1\AllocationSpecificSKUAllocationReservedInstanceProperties|null
*/
public function getInstanceProperties()
{
return $this->instance_properties;
}
public function hasInstanceProperties()
{
return isset($this->instance_properties);
}
public function clearInstanceProperties()
{
unset($this->instance_properties);
}
/**
* The instance properties for the reservation.
*
* Generated from protobuf field <code>optional .google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties instance_properties = 215355165;</code>
* @param \Google\Cloud\Compute\V1\AllocationSpecificSKUAllocationReservedInstanceProperties $var
* @return $this
*/
public function setInstanceProperties($var)
{
GPBUtil::checkMessage($var, \Google\Cloud\Compute\V1\AllocationSpecificSKUAllocationReservedInstanceProperties::class);
$this->instance_properties = $var;
return $this;
}
}
| apache-2.0 |
atxwebs/lxd | lxd/db/query/objects_test.go | 4617 | package query_test
import (
"database/sql"
"testing"
"github.com/lxc/lxd/lxd/db/query"
"github.com/mpvl/subtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Exercise possible failure modes.
func TestSelectObjects_Error(t *testing.T) {
cases := []struct {
dest query.Dest
query string
error string
}{
{
func(int) []interface{} { return make([]interface{}, 1) },
"SELECT id, name FROM test",
"sql: expected 2 destination arguments in Scan, not 1",
},
}
for _, c := range cases {
t.Run(c.query, func(t *testing.T) {
tx := newTxForObjects(t)
stmt, err := tx.Prepare(c.query)
require.NoError(t, err)
err = query.SelectObjects(stmt, c.dest)
assert.EqualError(t, err, c.error)
})
}
}
// Scan rows yielded by the query.
func TestSelectObjects(t *testing.T) {
tx := newTxForObjects(t)
objects := make([]struct {
ID int
Name string
}, 1)
object := objects[0]
dest := func(i int) []interface{} {
require.Equal(t, 0, i, "expected at most one row to be yielded")
return []interface{}{&object.ID, &object.Name}
}
stmt, err := tx.Prepare("SELECT id, name FROM test WHERE name=?")
require.NoError(t, err)
err = query.SelectObjects(stmt, dest, "bar")
require.NoError(t, err)
assert.Equal(t, 1, object.ID)
assert.Equal(t, "bar", object.Name)
}
// Exercise possible failure modes.
func TestUpsertObject_Error(t *testing.T) {
cases := []struct {
columns []string
values []interface{}
error string
}{
{
[]string{},
[]interface{}{},
"columns length is zero",
},
{
[]string{"id"},
[]interface{}{2, "egg"},
"columns length does not match values length",
},
}
for _, c := range cases {
subtest.Run(t, c.error, func(t *testing.T) {
tx := newTxForObjects(t)
id, err := query.UpsertObject(tx, "foo", c.columns, c.values)
assert.Equal(t, int64(-1), id)
assert.EqualError(t, err, c.error)
})
}
}
// Insert a new row.
func TestUpsertObject_Insert(t *testing.T) {
tx := newTxForObjects(t)
id, err := query.UpsertObject(tx, "test", []string{"name"}, []interface{}{"egg"})
require.NoError(t, err)
assert.Equal(t, int64(2), id)
objects := make([]struct {
ID int
Name string
}, 1)
object := objects[0]
dest := func(i int) []interface{} {
require.Equal(t, 0, i, "expected at most one row to be yielded")
return []interface{}{&object.ID, &object.Name}
}
stmt, err := tx.Prepare("SELECT id, name FROM test WHERE name=?")
require.NoError(t, err)
err = query.SelectObjects(stmt, dest, "egg")
require.NoError(t, err)
assert.Equal(t, 2, object.ID)
assert.Equal(t, "egg", object.Name)
}
// Update an existing row.
func TestUpsertObject_Update(t *testing.T) {
tx := newTxForObjects(t)
id, err := query.UpsertObject(tx, "test", []string{"id", "name"}, []interface{}{1, "egg"})
require.NoError(t, err)
assert.Equal(t, int64(1), id)
objects := make([]struct {
ID int
Name string
}, 1)
object := objects[0]
dest := func(i int) []interface{} {
require.Equal(t, 0, i, "expected at most one row to be yielded")
return []interface{}{&object.ID, &object.Name}
}
stmt, err := tx.Prepare("SELECT id, name FROM test WHERE name=?")
require.NoError(t, err)
err = query.SelectObjects(stmt, dest, "egg")
require.NoError(t, err)
assert.Equal(t, 1, object.ID)
assert.Equal(t, "egg", object.Name)
}
// Exercise possible failure modes.
func TestDeleteObject_Error(t *testing.T) {
tx := newTxForObjects(t)
deleted, err := query.DeleteObject(tx, "foo", 1)
assert.False(t, deleted)
assert.EqualError(t, err, "no such table: foo")
}
// If an row was actually deleted, the returned flag is true.
func TestDeleteObject_Deleted(t *testing.T) {
tx := newTxForObjects(t)
deleted, err := query.DeleteObject(tx, "test", 1)
assert.True(t, deleted)
assert.NoError(t, err)
}
// If no row was actually deleted, the returned flag is false.
func TestDeleteObject_NotDeleted(t *testing.T) {
tx := newTxForObjects(t)
deleted, err := query.DeleteObject(tx, "test", 1000)
assert.False(t, deleted)
assert.NoError(t, err)
}
// Return a new transaction against an in-memory SQLite database with a single
// test table populated with a few rows for testing object-related queries.
func newTxForObjects(t *testing.T) *sql.Tx {
db, err := sql.Open("sqlite3", ":memory:")
assert.NoError(t, err)
_, err = db.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)")
assert.NoError(t, err)
_, err = db.Exec("INSERT INTO test VALUES (0, 'foo'), (1, 'bar')")
assert.NoError(t, err)
tx, err := db.Begin()
assert.NoError(t, err)
return tx
}
| apache-2.0 |
lincoln-lil/flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/MergingSharedSlotProfileRetrieverTest.java | 10048 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.scheduler;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.TestLogger;
import org.junit.Test;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
/**
* Tests for {@link org.apache.flink.runtime.scheduler.MergingSharedSlotProfileRetrieverFactory}.
*/
public class MergingSharedSlotProfileRetrieverTest extends TestLogger {
private static final SyncPreferredLocationsRetriever EMPTY_PREFERRED_LOCATIONS_RETRIEVER =
(executionVertexId, producersToIgnore) -> Collections.emptyList();
@Test
public void testGetEmptySlotProfile() throws ExecutionException, InterruptedException {
SharedSlotProfileRetriever sharedSlotProfileRetriever =
new MergingSharedSlotProfileRetrieverFactory(
EMPTY_PREFERRED_LOCATIONS_RETRIEVER,
executionVertexID -> new AllocationID(),
() -> Collections.emptySet())
.createFromBulk(Collections.emptySet());
SlotProfile slotProfile =
sharedSlotProfileRetriever.getSlotProfile(
new ExecutionSlotSharingGroup(), ResourceProfile.ZERO);
assertThat(slotProfile.getTaskResourceProfile(), is(ResourceProfile.ZERO));
assertThat(slotProfile.getPhysicalSlotResourceProfile(), is(ResourceProfile.ZERO));
assertThat(slotProfile.getPreferredLocations(), hasSize(0));
assertThat(slotProfile.getPreferredAllocations(), hasSize(0));
assertThat(slotProfile.getReservedAllocations(), hasSize(0));
}
@Test
public void testResourceProfileOfSlotProfile() throws ExecutionException, InterruptedException {
ResourceProfile resourceProfile =
ResourceProfile.newBuilder()
.setCpuCores(1.0)
.setTaskHeapMemory(MemorySize.ofMebiBytes(1))
.build();
SlotProfile slotProfile =
getSlotProfile(resourceProfile, Collections.nCopies(3, new AllocationID()), 2);
assertThat(slotProfile.getTaskResourceProfile(), is(resourceProfile));
assertThat(slotProfile.getPhysicalSlotResourceProfile(), is(resourceProfile));
}
@Test
public void testPreferredLocationsOfSlotProfile()
throws ExecutionException, InterruptedException {
// preferred locations
List<ExecutionVertexID> executions =
IntStream.range(0, 3)
.mapToObj(i -> new ExecutionVertexID(new JobVertexID(), 0))
.collect(Collectors.toList());
List<TaskManagerLocation> allLocations =
executions.stream()
.map(e -> createTaskManagerLocation())
.collect(Collectors.toList());
Map<ExecutionVertexID, Collection<TaskManagerLocation>> locations = new HashMap<>();
locations.put(executions.get(0), Arrays.asList(allLocations.get(0), allLocations.get(1)));
locations.put(executions.get(1), Arrays.asList(allLocations.get(1), allLocations.get(2)));
List<AllocationID> prevAllocationIds = Collections.nCopies(3, new AllocationID());
SlotProfile slotProfile =
getSlotProfile(
(executionVertexId, producersToIgnore) -> {
assertThat(producersToIgnore, containsInAnyOrder(executions.toArray()));
return locations.get(executionVertexId);
},
executions,
ResourceProfile.ZERO,
prevAllocationIds,
prevAllocationIds,
2);
assertThat(
slotProfile.getPreferredLocations().stream()
.filter(allLocations.get(0)::equals)
.count(),
is(1L));
assertThat(
slotProfile.getPreferredLocations().stream()
.filter(allLocations.get(1)::equals)
.count(),
is(2L));
assertThat(
slotProfile.getPreferredLocations().stream()
.filter(allLocations.get(2)::equals)
.count(),
is(1L));
}
@Test
public void testPreferredAllocationsOfSlotProfile()
throws ExecutionException, InterruptedException {
AllocationID prevAllocationID1 = new AllocationID();
AllocationID prevAllocationID2 = new AllocationID();
List<AllocationID> prevAllocationIDs =
Arrays.asList(prevAllocationID1, prevAllocationID2, new AllocationID());
SlotProfile slotProfile = getSlotProfile(ResourceProfile.ZERO, prevAllocationIDs, 2);
assertThat(
slotProfile.getPreferredAllocations(),
containsInAnyOrder(prevAllocationID1, prevAllocationID2));
}
@Test
public void testReservedAllocationsOfSlotProfile()
throws ExecutionException, InterruptedException {
List<AllocationID> reservedAllocationIds =
Arrays.asList(new AllocationID(), new AllocationID(), new AllocationID());
SlotProfile slotProfile =
getSlotProfile(
EMPTY_PREFERRED_LOCATIONS_RETRIEVER,
Collections.emptyList(),
ResourceProfile.ZERO,
Collections.emptyList(),
reservedAllocationIds,
0);
assertThat(
slotProfile.getReservedAllocations(),
containsInAnyOrder(reservedAllocationIds.toArray()));
}
private static SlotProfile getSlotProfile(
ResourceProfile resourceProfile,
List<AllocationID> prevAllocationIDs,
int executionSlotSharingGroupSize)
throws ExecutionException, InterruptedException {
List<ExecutionVertexID> executions =
prevAllocationIDs.stream()
.map(stub -> new ExecutionVertexID(new JobVertexID(), 0))
.collect(Collectors.toList());
return getSlotProfile(
EMPTY_PREFERRED_LOCATIONS_RETRIEVER,
executions,
resourceProfile,
prevAllocationIDs,
prevAllocationIDs,
executionSlotSharingGroupSize);
}
private static SlotProfile getSlotProfile(
SyncPreferredLocationsRetriever preferredLocationsRetriever,
List<ExecutionVertexID> executions,
ResourceProfile resourceProfile,
List<AllocationID> prevAllocationIDs,
Collection<AllocationID> reservedAllocationIds,
int executionSlotSharingGroupSize)
throws ExecutionException, InterruptedException {
SharedSlotProfileRetriever sharedSlotProfileRetriever =
new MergingSharedSlotProfileRetrieverFactory(
preferredLocationsRetriever,
executionVertexID ->
prevAllocationIDs.get(
executions.indexOf(executionVertexID)),
() -> new HashSet<>(reservedAllocationIds))
.createFromBulk(new HashSet<>(executions));
ExecutionSlotSharingGroup executionSlotSharingGroup = new ExecutionSlotSharingGroup();
executions.stream()
.limit(executionSlotSharingGroupSize)
.forEach(executionSlotSharingGroup::addVertex);
return sharedSlotProfileRetriever.getSlotProfile(
executionSlotSharingGroup, resourceProfile);
}
private static TaskManagerLocation createTaskManagerLocation() {
try {
return new TaskManagerLocation(
ResourceID.generate(), InetAddress.getByAddress(new byte[] {1, 2, 3, 4}), 8888);
} catch (UnknownHostException e) {
throw new FlinkRuntimeException("unexpected", e);
}
}
}
| apache-2.0 |