repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
Pengostores/magento1_training | js/mage/adminhtml/backup.js | 6418 | /**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Academic Free License (AFL 3.0)
* that is bundled with this package in the file LICENSE_AFL.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/afl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_Adminhtml
* @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/afl-3.0.php Academic Free License (AFL 3.0)
*/
var AdminBackup = new Class.create();
AdminBackup.prototype = {
initialize : function(a, b){
this.reset();
this.rollbackUrl = this.backupUrl = '';
this.rollbackValidator = new Validation($('rollback-form'));
this.backupValidator = new Validation($('backup-form'));
},
reset: function() {
this.time = 0;
this.type = '';
$('use-ftp-checkbox-row').hide();
$('use_ftp').checked = false;
$('ftp-credentials-container').hide();
$$('#ftp-credentials-container input').each(function(item) {
item.removeClassName('required-entry');
});
$('backup_maintenance_mode').checked = false;
$('rollback_maintenance_mode').checked = false;
$('exclude_media').checked = false;
$('password').value = '';
$('backup_name').value = '';
$$('.validation-advice').invoke('remove');
$$('input').invoke('removeClassName', 'validation-failed');
$$('input').invoke('removeClassName', 'validation-passed');
$$('.backup-messages').invoke('hide');
$$('#ftp-credentials-container input').each(function(item) {
item.value = '';
});
},
backup: function(type) {
this.reset();
this.type = type;
this.showBackupWarning();
return false;
},
rollback: function(type, time) {
this.reset();
this.time = time;
this.type = type;
this.showRollbackWarning();
return false;
},
showBackupWarning: function() {
this.showPopup('backup-warning');
},
showRollbackWarning: function() {
this.showPopup('rollback-warning');
},
requestBackupOptions: function() {
this.hidePopups();
var action = this.type != 'snapshot' ? 'hide' : 'show';
$$('#exclude-media-checkbox-container').invoke(action);
this.showPopup('backup-options');
},
requestPassword: function() {
this.hidePopups();
this.type != 'db' ? $('use-ftp-checkbox-row').show() : $('use-ftp-checkbox-row').hide();
this.showPopup('rollback-request-password');
},
toggleFtpCredentialsForm: function() {
$('use_ftp').checked ? $('ftp-credentials-container').show()
: $('ftp-credentials-container').hide();
var divId = 'rollback-request-password';
$$('#ftp-credentials-container input').each(function(item) {
if (item.name == 'ftp_path') return;
$('use_ftp').checked ? item.addClassName('required-entry') : item.removeClassName('required-entry');
});
$(divId).show().setStyle({
'marginTop': -$(divId).getDimensions().height / 2 + 'px'
});
},
submitBackup: function () {
if (!!this.backupValidator && this.backupValidator.validate()) {
this.hidePopups();
var data = {
'type': this.type,
'maintenance_mode': $('backup_maintenance_mode').checked ? 1 : 0,
'backup_name': $('backup_name').value,
'exclude_media': $('exclude_media').checked ? 1 : 0
};
new Ajax.Request(this.backupUrl, {
onSuccess: function(transport) {
this.processResponse(transport, 'backup-options');
}.bind(this),
method: 'post',
parameters: data
});
}
return false;
},
submitRollback: function() {
if (!!this.rollbackValidator && this.rollbackValidator.validate()) {
var data = this.getPostData();
this.hidePopups();
new Ajax.Request(this.rollbackUrl, {
onSuccess: function(transport) {
this.processResponse(transport, 'rollback-request-password');
}.bind(this),
method: 'post',
parameters: data
});
}
return false;
},
processResponse: function(transport, popupId) {
if (!transport.responseText.isJSON()) {
return;
}
var json = transport.responseText.evalJSON();
if (!!json.error) {
this.displayError(popupId, json.error);
this.showPopup(popupId);
return;
}
if (!!json.redirect_url) {
setLocation(json.redirect_url);
}
},
displayError: function(parentContainer, message) {
var messageHtml = this.getErrorMessageHtml(message);
$$('#' + parentContainer + ' .backup-messages .messages').invoke('update', messageHtml);
$$('#' + parentContainer + ' .backup-messages').invoke('show');
},
getErrorMessageHtml: function(message) {
return '<li class="error-msg"><ul><li><span>' + message + '</span></li></ul></li>';
},
getPostData: function() {
var data = $('rollback-form').serialize(true);
data['time'] = this.time;
data['type'] = this.type;
return data;
},
showPopup: function(divId) {
$(divId).show().setStyle({
'marginTop': -$(divId).getDimensions().height / 2 + 'px'
});
$('popup-window-mask').setStyle({
height: $('html-body').getHeight() + 'px'
}).show();
},
hidePopups: function() {
$$('.backup-dialog').each(Element.hide);
$('popup-window-mask').hide();
}
}
| gpl-3.0 |
elotroalex/morningside | application/models/Table/User.php | 1426 | <?php
/**
* Omeka
*
* @copyright Copyright 2007-2012 Roy Rosenzweig Center for History and New Media
* @license http://www.gnu.org/licenses/gpl-3.0.txt GNU GPLv3
*/
/**
* @package Omeka\Db\Table
*/
class Table_User extends Omeka_Db_Table
{
/**
* Find an active User given that user's ID.
*
* Returns null if the user being requested is not active.
*
* @return User|null
*/
public function findActiveById($id)
{
$select = $this->getSelectForFind($id);
$select->where('active = 1');
return $this->fetchObject($select);
}
protected function _getColumnPairs()
{
return array(
'users.id',
'users.name');
}
public function findByEmail($email)
{
$select = $this->getSelect();
$select->where('users.email = ?')->limit(1);
return $this->fetchObject($select, array($email));
}
public function applySearchFilters($select, $params)
{
// Show only users with a specific role.
if (array_key_exists('role', $params) and !empty($params['role'])) {
$select->where('users.role = ?', $params['role']);
}
// Show only users who are active
if (array_key_exists('active', $params) and $params['active'] !== '') {
$select->where('users.active = ?', (int)$params['active']);
}
}
}
| gpl-3.0 |
jamesmacwhite/Radarr | src/NzbDrone.Common/Http/Proxy/ProxyType.cs | 127 | namespace NzbDrone.Common.Http.Proxy
{
public enum ProxyType
{
Http,
Socks4,
Socks5
}
}
| gpl-3.0 |
diasEduardo/MuvucaGame01 | Assets/Scripts/Hero/HeroB.cs | 81 | using UnityEngine;
using System.Collections;
public class HeroB : Hero {
}
| gpl-3.0 |
AlexTMjugador/mtasa-blue | MTA10_Server/mods/deathmatch/logic/packets/CMapInfoPacket.cpp | 16210 | /*****************************************************************************
*
* PROJECT: Multi Theft Auto v1.0
* LICENSE: See LICENSE in the top level directory
* FILE: mods/deathmatch/logic/packets/CMapInfoPacket.h
* PURPOSE: Map/game information packet class
* DEVELOPERS: Christian Myhre Lundheim <>
* Jax <>
* lil_Toady <>
* Alberto Alonso <rydencillo@gmail.com>
* Sebas Lamers <sebasdevelopment@gmx.com>
*
* Multi Theft Auto is available from http://www.multitheftauto.com/
*
*****************************************************************************/
#include "StdInc.h"
CMapInfoPacket::CMapInfoPacket ( unsigned char ucWeather,
unsigned char ucWeatherBlendingTo,
unsigned char ucBlendedWeatherHour,
unsigned char ucClockHour,
unsigned char ucClockMin,
unsigned long ulMinuteDuration,
bool bShowNametags,
bool bShowRadar,
float fGravity,
float fGameSpeed,
float fWaveHeight,
const SWorldWaterLevelInfo& worldWaterLevelInfo,
bool bHasSkyGradient,
const SGarageStates& garageStates,
unsigned char ucSkyGradientTR,
unsigned char ucSkyGradientTG,
unsigned char ucSkyGradientTB,
unsigned char ucSkyGradientBR,
unsigned char ucSkyGradientBG,
unsigned char ucSkyGradientBB,
bool bHasHeatHaze,
const SHeatHazeSettings& heatHazeSettings,
unsigned short usFPSLimit,
bool bCloudsEnabled,
float fJetpackMaxHeight,
bool bOverrideWaterColor,
unsigned char ucWaterRed,
unsigned char ucWaterGreen,
unsigned char ucWaterBlue,
unsigned char ucWaterAlpha,
bool bInteriorSoundsEnabled,
bool bOverrideRainLevel,
float fRainLevel,
bool bOverrideSunSize,
float fSunSize,
bool bOverrideSunColor,
unsigned char ucSunCoreR,
unsigned char ucSunCoreG,
unsigned char ucSunCoreB,
unsigned char ucSunCoronaR,
unsigned char ucSunCoronaG,
unsigned char ucSunCoronaB,
bool bOverrideWindVelocity,
float fWindVelX,
float fWindVelY,
float fWindVelZ,
bool bOverrideFarClipDistance,
float fFarClip,
bool bOverrideFogDistance,
float fFogDistance,
float fAircraftMaxHeight,
float fAircraftMaxVelocity,
bool bOverrideMoonSize,
int iMoonSize )
{
m_ucWeather = ucWeather;
m_ucWeatherBlendingTo = ucWeatherBlendingTo;
m_ucBlendedWeatherHour = ucBlendedWeatherHour;
m_ucClockHour = ucClockHour;
m_ucClockMin = ucClockMin;
m_ulMinuteDuration = ulMinuteDuration;
m_bShowNametags = bShowNametags;
m_bShowRadar = bShowRadar;
m_fGravity = fGravity;
m_fGameSpeed = fGameSpeed;
m_fWaveHeight = fWaveHeight;
m_WorldWaterLevelInfo = worldWaterLevelInfo;
m_bHasSkyGradient = bHasSkyGradient;
m_pGarageStates = &garageStates;
m_ucSkyGradientTR = ucSkyGradientTR;
m_ucSkyGradientTG = ucSkyGradientTG;
m_ucSkyGradientTB = ucSkyGradientTB;
m_ucSkyGradientBR = ucSkyGradientBR;
m_ucSkyGradientBG = ucSkyGradientBG;
m_ucSkyGradientBB = ucSkyGradientBB;
m_bHasHeatHaze = bHasHeatHaze;
m_HeatHazeSettings = heatHazeSettings;
m_usFPSLimit = usFPSLimit;
m_bCloudsEnabled = bCloudsEnabled;
m_fJetpackMaxHeight = fJetpackMaxHeight;
m_bOverrideWaterColor = bOverrideWaterColor;
m_ucWaterRed = ucWaterRed;
m_ucWaterGreen = ucWaterGreen;
m_ucWaterBlue = ucWaterBlue;
m_ucWaterAlpha = ucWaterAlpha;
m_bInteriorSoundsEnabled = bInteriorSoundsEnabled;
m_bOverrideRainLevel = bOverrideRainLevel;
m_fRainLevel = fRainLevel;
m_bOverrideSunSize = bOverrideSunSize;
m_fSunSize = fSunSize;
m_bOverrideSunColor = bOverrideSunColor;
m_ucSunCoreR = ucSunCoreR;
m_ucSunCoreG = ucSunCoreG;
m_ucSunCoreB = ucSunCoreB;
m_ucSunCoronaR = ucSunCoronaR;
m_ucSunCoronaG = ucSunCoronaG;
m_ucSunCoronaB = ucSunCoronaB;
m_bOverrideWindVelocity = bOverrideWindVelocity;
m_fWindVelX = fWindVelX;
m_fWindVelY = fWindVelY;
m_fWindVelZ = fWindVelZ;
m_bOverrideFarClipDistance = bOverrideFarClipDistance;
m_fFarClip = fFarClip;
m_bOverrideFogDistance = bOverrideFogDistance;
m_fFogDistance = fFogDistance;
m_fAircraftMaxHeight = fAircraftMaxHeight;
m_fAircraftMaxVelocity = fAircraftMaxVelocity;
m_bOverrideMoonSize = bOverrideMoonSize;
m_iMoonSize = iMoonSize;
}
bool CMapInfoPacket::Write ( NetBitStreamInterface& BitStream ) const
{
// Write the map weather
BitStream.Write ( m_ucWeather );
BitStream.Write ( m_ucWeatherBlendingTo );
BitStream.Write ( m_ucBlendedWeatherHour );
BitStream.WriteBit ( m_bHasSkyGradient );
if ( m_bHasSkyGradient )
{
BitStream.Write ( m_ucSkyGradientTR );
BitStream.Write ( m_ucSkyGradientTG );
BitStream.Write ( m_ucSkyGradientTB );
BitStream.Write ( m_ucSkyGradientBR );
BitStream.Write ( m_ucSkyGradientBG );
BitStream.Write ( m_ucSkyGradientBB );
}
// Write heat haze
BitStream.WriteBit ( m_bHasHeatHaze );
if ( m_bHasHeatHaze )
{
SHeatHazeSync heatHaze ( m_HeatHazeSettings );
BitStream.Write ( &heatHaze );
}
// Write the map hour
BitStream.Write ( m_ucClockHour );
BitStream.Write ( m_ucClockMin );
BitStream.WriteCompressed ( m_ulMinuteDuration );
// Write the map flags
SMapInfoFlagsSync flags;
flags.data.bShowNametags = m_bShowNametags;
flags.data.bShowRadar = m_bShowRadar;
flags.data.bCloudsEnabled = m_bCloudsEnabled;
BitStream.Write ( &flags );
// Write any other world conditions
BitStream.Write ( m_fGravity );
if ( m_fGameSpeed == 1.0f )
BitStream.WriteBit ( true );
else
{
BitStream.WriteBit ( false );
BitStream.Write ( m_fGameSpeed );
}
BitStream.Write ( m_fWaveHeight );
// Write world water level
BitStream.Write ( m_WorldWaterLevelInfo.fSeaLevel );
BitStream.WriteBit ( m_WorldWaterLevelInfo.bNonSeaLevelSet );
if ( m_WorldWaterLevelInfo.bNonSeaLevelSet )
BitStream.Write ( m_WorldWaterLevelInfo.fNonSeaLevel );
BitStream.WriteCompressed ( m_usFPSLimit );
// Write the garage states
for ( unsigned char i = 0 ; i < MAX_GARAGES ; i++ )
{
const SGarageStates& garageStates = *m_pGarageStates;
BitStream.WriteBit( garageStates[i] );
}
// Write the fun bugs state
SFunBugsStateSync funBugs;
funBugs.data.bQuickReload = g_pGame->IsGlitchEnabled ( CGame::GLITCH_QUICKRELOAD );
funBugs.data.bFastFire = g_pGame->IsGlitchEnabled ( CGame::GLITCH_FASTFIRE );
funBugs.data.bFastMove = g_pGame->IsGlitchEnabled ( CGame::GLITCH_FASTMOVE );
funBugs.data.bCrouchBug = g_pGame->IsGlitchEnabled ( CGame::GLITCH_CROUCHBUG );
funBugs.data.bCloseRangeDamage = g_pGame->IsGlitchEnabled ( CGame::GLITCH_CLOSEDAMAGE );
funBugs.data2.bHitAnim = g_pGame->IsGlitchEnabled ( CGame::GLITCH_HITANIM );
funBugs.data3.bFastSprint = g_pGame->IsGlitchEnabled ( CGame::GLITCH_FASTSPRINT );
funBugs.data4.bBadDrivebyHitboxes = g_pGame->IsGlitchEnabled( CGame::GLITCH_BADDRIVEBYHITBOX );
funBugs.data5.bQuickStand = g_pGame->IsGlitchEnabled( CGame::GLITCH_QUICKSTAND );
BitStream.Write ( &funBugs );
BitStream.Write ( m_fJetpackMaxHeight );
BitStream.WriteBit ( m_bOverrideWaterColor );
if ( m_bOverrideWaterColor )
{
BitStream.Write ( m_ucWaterRed );
BitStream.Write ( m_ucWaterGreen );
BitStream.Write ( m_ucWaterBlue );
BitStream.Write ( m_ucWaterAlpha );
}
// Interior sounds
BitStream.WriteBit ( m_bInteriorSoundsEnabled );
// Rain level
BitStream.WriteBit ( m_bOverrideRainLevel );
if ( m_bOverrideRainLevel )
{
BitStream.Write ( m_fRainLevel );
}
// Moon size
if ( BitStream.Version () >= 0x40 )
{
BitStream.WriteBit ( m_bOverrideMoonSize );
if ( m_bOverrideMoonSize )
{
BitStream.Write ( m_iMoonSize );
}
}
// Sun size
BitStream.WriteBit ( m_bOverrideSunSize );
if ( m_bOverrideSunSize )
{
BitStream.Write ( m_fSunSize );
}
// Sun color
BitStream.WriteBit ( m_bOverrideSunColor );
if ( m_bOverrideSunColor )
{
BitStream.Write ( m_ucSunCoreR );
BitStream.Write ( m_ucSunCoreG );
BitStream.Write ( m_ucSunCoreB );
BitStream.Write ( m_ucSunCoronaR );
BitStream.Write ( m_ucSunCoronaG );
BitStream.Write ( m_ucSunCoronaB );
}
// Wind velocity
BitStream.WriteBit ( m_bOverrideWindVelocity );
if ( m_bOverrideWindVelocity )
{
BitStream.Write ( m_fWindVelX );
BitStream.Write ( m_fWindVelY );
BitStream.Write ( m_fWindVelZ );
}
// Far clip distance
BitStream.WriteBit ( m_bOverrideFarClipDistance );
if ( m_bOverrideFarClipDistance )
{
BitStream.Write ( m_fFarClip );
}
// Fog distance
BitStream.WriteBit ( m_bOverrideFogDistance );
if ( m_bOverrideFogDistance )
{
BitStream.Write ( m_fFogDistance );
}
BitStream.Write ( m_fAircraftMaxHeight );
if ( BitStream.Version () >= 0x3E )
BitStream.Write ( m_fAircraftMaxVelocity );
if ( BitStream.Version () >= 0x30 )
{
for (int i = WEAPONTYPE_BRASSKNUCKLE; i < WEAPONTYPE_PISTOL; i++)
{
bool bEnabled;
bEnabled = g_pGame->GetJetpackWeaponEnabled ( (eWeaponType) i );
BitStream.WriteBit ( bEnabled );
}
}
for (int i = WEAPONTYPE_PISTOL;i <= WEAPONTYPE_EXTINGUISHER;i++)
{
sWeaponPropertySync WeaponProperty;
CWeaponStat* pWeaponStat = g_pGame->GetWeaponStatManager ()->GetWeaponStats( (eWeaponType)i );
BitStream.WriteBit ( true );
WeaponProperty.data.weaponType = (int)pWeaponStat->GetWeaponType();
WeaponProperty.data.fAccuracy = pWeaponStat->GetAccuracy();
WeaponProperty.data.fMoveSpeed = pWeaponStat->GetMoveSpeed();
WeaponProperty.data.fTargetRange = pWeaponStat->GetTargetRange();
WeaponProperty.data.fWeaponRange = pWeaponStat->GetWeaponRange();
WeaponProperty.data.nAmmo = pWeaponStat->GetMaximumClipAmmo();
WeaponProperty.data.nDamage = pWeaponStat->GetDamagePerHit();
WeaponProperty.data.nFlags = pWeaponStat->GetFlags();
WeaponProperty.data.anim_loop_start = pWeaponStat->GetWeaponAnimLoopStart();
WeaponProperty.data.anim_loop_stop = pWeaponStat->GetWeaponAnimLoopStop();
WeaponProperty.data.anim_loop_bullet_fire = pWeaponStat->GetWeaponAnimLoopFireTime();
WeaponProperty.data.anim2_loop_start = pWeaponStat->GetWeaponAnim2LoopStart();
WeaponProperty.data.anim2_loop_stop = pWeaponStat->GetWeaponAnim2LoopStop();
WeaponProperty.data.anim2_loop_bullet_fire = pWeaponStat->GetWeaponAnim2LoopFireTime();
WeaponProperty.data.anim_breakout_time = pWeaponStat->GetWeaponAnimBreakoutTime();
BitStream.Write( &WeaponProperty );
if ( BitStream.Version () >= 0x30 )
{
BitStream.WriteBit ( g_pGame->GetJetpackWeaponEnabled ( (eWeaponType) i ) );
}
}
for (int i = WEAPONTYPE_PISTOL;i <= WEAPONTYPE_TEC9;i++)
{
sWeaponPropertySync WeaponProperty;
BitStream.WriteBit ( true );
for (int j = 0; j <= 2;j++)
{
CWeaponStat* pWeaponStat = g_pGame->GetWeaponStatManager ()->GetWeaponStats( (eWeaponType)i, (eWeaponSkill)j );
WeaponProperty.data.weaponType = (int)pWeaponStat->GetWeaponType();
WeaponProperty.data.fAccuracy = pWeaponStat->GetAccuracy();
WeaponProperty.data.fMoveSpeed = pWeaponStat->GetMoveSpeed();
WeaponProperty.data.fTargetRange = pWeaponStat->GetTargetRange();
WeaponProperty.data.fWeaponRange = pWeaponStat->GetWeaponRange();
WeaponProperty.data.nAmmo = pWeaponStat->GetMaximumClipAmmo();
WeaponProperty.data.nDamage = pWeaponStat->GetDamagePerHit();
WeaponProperty.data.nFlags = pWeaponStat->GetFlags();
WeaponProperty.data.anim_loop_start = pWeaponStat->GetWeaponAnimLoopStart();
WeaponProperty.data.anim_loop_stop = pWeaponStat->GetWeaponAnimLoopStop();
WeaponProperty.data.anim_loop_bullet_fire = pWeaponStat->GetWeaponAnimLoopFireTime();
WeaponProperty.data.anim2_loop_start = pWeaponStat->GetWeaponAnim2LoopStart();
WeaponProperty.data.anim2_loop_stop = pWeaponStat->GetWeaponAnim2LoopStop();
WeaponProperty.data.anim2_loop_bullet_fire = pWeaponStat->GetWeaponAnim2LoopFireTime();
WeaponProperty.data.anim_breakout_time = pWeaponStat->GetWeaponAnimBreakoutTime();
BitStream.Write( &WeaponProperty );
}
if ( BitStream.Version () >= 0x36 )
{
BitStream.WriteBit ( g_pGame->GetJetpackWeaponEnabled ( (eWeaponType) i ) );
}
}
if ( BitStream.Version () >= 0x30 )
{
for (int i = WEAPONTYPE_CAMERA; i <= WEAPONTYPE_PARACHUTE; i++)
{
bool bEnabled;
bEnabled = g_pGame->GetJetpackWeaponEnabled ( (eWeaponType) i );
BitStream.WriteBit ( bEnabled );
}
}
multimap< unsigned short, CBuildingRemoval* >::const_iterator iter = g_pGame->GetBuildingRemovalManager ( )->IterBegin();
for (; iter != g_pGame->GetBuildingRemovalManager ( )->IterEnd();++iter)
{
CBuildingRemoval * pBuildingRemoval = (*iter).second;
BitStream.WriteBit( true );
BitStream.Write( pBuildingRemoval->GetModel ( ) );
BitStream.Write( pBuildingRemoval->GetRadius ( ) );
BitStream.Write( pBuildingRemoval->GetPosition ( ).fX );
BitStream.Write( pBuildingRemoval->GetPosition ( ).fY );
BitStream.Write( pBuildingRemoval->GetPosition ( ).fZ );
if ( BitStream.Version() >= 0x039 )
{
BitStream.Write ( pBuildingRemoval->GetInterior ( ) );
}
}
BitStream.WriteBit( false );
if ( BitStream.Version () >= 0x25 )
{
bool bOcclusionsEnabled = g_pGame->GetOcclusionsEnabled ();
BitStream.WriteBit( bOcclusionsEnabled );
}
return true;
}
| gpl-3.0 |
snakeleon/YouCompleteMe-x86 | third_party/ycmd/third_party/godef/go_local/parser/interface.go | 7191 | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the exported entry points for invoking the parser.
package parser
import (
"bytes"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"../ast"
"../scanner"
"../token"
)
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
_, err := io.Copy(&buf, s)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
default:
return nil, errors.New("invalid source")
}
}
return ioutil.ReadFile(filename)
}
func (p *parser) parseEOF() error {
p.expect(token.EOF)
return p.GetError(scanner.Sorted)
}
// ParseExpr parses a Go expression and returns the corresponding
// AST node. The fset, filename, and src arguments have the same interpretation
// as for ParseFile. If there is an error, the result expression
// may be nil or contain a partial AST.
//
// if scope is non-nil, it will be used as the scope for the expression.
//
func ParseExpr(fset *token.FileSet, filename string, src interface{}, scope *ast.Scope) (ast.Expr, error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(fset, filename, data, 0, scope)
x := p.parseExpr()
if p.tok == token.SEMICOLON {
p.next() // consume automatically inserted semicolon, if any
}
return x, p.parseEOF()
}
// ParseStmtList parses a list of Go statements and returns the list
// of corresponding AST nodes. The fset, filename, and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
// if scope is non-nil, it will be used as the scope for the statements.
//
func ParseStmtList(fset *token.FileSet, filename string, src interface{}, scope *ast.Scope) ([]ast.Stmt, error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(fset, filename, data, 0, scope)
return p.parseStmtList(), p.parseEOF()
}
// ParseDeclList parses a list of Go declarations and returns the list
// of corresponding AST nodes. The fset, filename, and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
// If scope is non-nil, it will be used for declarations.
//
func ParseDeclList(fset *token.FileSet, filename string, src interface{}, scope *ast.Scope) ([]ast.Decl, error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(fset, filename, data, 0, scope)
p.pkgScope = scope
p.fileScope = scope
return p.parseDeclList(), p.parseEOF()
}
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
//
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint, pkgScope *ast.Scope) (*ast.File, error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(fset, filename, data, mode, pkgScope)
p.pkgScope = p.topScope
p.openScope()
p.fileScope = p.topScope
return p.parseFile(), p.GetError(scanner.NoMultiples) // parseFile() reads to EOF
}
func parseFileInPkg(fset *token.FileSet, pkgs map[string]*ast.Package, filename string, mode uint) (err error) {
data, err := readSource(filename, nil)
if err != nil {
return err
}
// first find package name, so we can use the correct package
// scope when parsing the file.
src, err := ParseFile(fset, filename, data, PackageClauseOnly, nil)
if err != nil {
return
}
name := src.Name.Name
pkg := pkgs[name]
if pkg == nil {
pkg = &ast.Package{name, ast.NewScope(Universe), nil, make(map[string]*ast.File)}
pkgs[name] = pkg
}
src, err = ParseFile(fset, filename, data, mode, pkg.Scope)
if err != nil {
return
}
pkg.Files[filename] = src
return
}
// ParseFiles calls ParseFile for each file in the filenames list and returns
// a map of package name -> package AST with all the packages found. The mode
// bits are passed to ParseFile unchanged. Position information is recorded
// in the file set fset.
//
// Files with parse errors are ignored. In this case the map of packages may
// be incomplete (missing packages and/or incomplete packages) and the first
// error encountered is returned.
//
func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[string]*ast.Package, first error) {
pkgs = make(map[string]*ast.Package)
for _, filename := range filenames {
if err := parseFileInPkg(fset, pkgs, filename, mode); err != nil && first == nil {
first = err
}
}
return
}
// ParseDir calls ParseFile for the files in the directory specified by path and
// returns a map of package name -> package AST with all the packages found. If
// filter != nil, only the files with os.FileInfo entries passing through the filter
// are considered. The mode bits are passed to ParseFile unchanged. Position
// information is recorded in the file set fset.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occurred, a non-nil but incomplete map and the
// error are returned.
//
func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode uint) (map[string]*ast.Package, error) {
fd, err := os.Open(path)
if err != nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
if err != nil {
return nil, err
}
filenames := make([]string, len(list))
n := 0
for i := 0; i < len(list); i++ {
d := list[i]
if filter == nil || filter(d) {
filenames[n] = filepath.Join(path, d.Name())
n++
}
}
filenames = filenames[0:n]
return ParseFiles(fset, filenames, mode)
}
| gpl-3.0 |
SlateScience/MozillaJS | js/src/jit/MIR.cpp | 80475 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MIR.h"
#include "mozilla/Casting.h"
#include "BaselineInspector.h"
#include "IonBuilder.h"
#include "LICM.h" // For LinearSum
#include "MIRGraph.h"
#include "EdgeCaseAnalysis.h"
#include "RangeAnalysis.h"
#include "IonSpewer.h"
#include "jsnum.h"
#include "jsstr.h"
#include "jsatominlines.h"
#include "jstypedarrayinlines.h"
using namespace js;
using namespace js::jit;
using mozilla::BitwiseCast;
void
MDefinition::PrintOpcodeName(FILE *fp, MDefinition::Opcode op)
{
static const char * const names[] =
{
#define NAME(x) #x,
MIR_OPCODE_LIST(NAME)
#undef NAME
};
const char *name = names[op];
size_t len = strlen(name);
for (size_t i = 0; i < len; i++)
fprintf(fp, "%c", tolower(name[i]));
}
// If one of the inputs to any non-phi are in a block that will abort, then there is
// no point in processing this instruction, since control flow cannot reach here.
bool
MDefinition::earlyAbortCheck()
{
if (isPhi())
return false;
for (size_t i = 0; i < numOperands(); i++) {
if (getOperand(i)->block()->earlyAbort()) {
block()->setEarlyAbort();
IonSpew(IonSpew_Range, "Ignoring value from block %d because instruction %d is in a block that aborts", block()->id(), getOperand(i)->id());
return true;
}
}
return false;
}
static inline bool
EqualValues(bool useGVN, MDefinition *left, MDefinition *right)
{
if (useGVN)
return left->valueNumber() == right->valueNumber();
return left->id() == right->id();
}
static MConstant *
EvaluateConstantOperands(MBinaryInstruction *ins, bool *ptypeChange = NULL)
{
MDefinition *left = ins->getOperand(0);
MDefinition *right = ins->getOperand(1);
if (!left->isConstant() || !right->isConstant())
return NULL;
Value lhs = left->toConstant()->value();
Value rhs = right->toConstant()->value();
Value ret = UndefinedValue();
switch (ins->op()) {
case MDefinition::Op_BitAnd:
ret = Int32Value(lhs.toInt32() & rhs.toInt32());
break;
case MDefinition::Op_BitOr:
ret = Int32Value(lhs.toInt32() | rhs.toInt32());
break;
case MDefinition::Op_BitXor:
ret = Int32Value(lhs.toInt32() ^ rhs.toInt32());
break;
case MDefinition::Op_Lsh:
ret = Int32Value(lhs.toInt32() << (rhs.toInt32() & 0x1F));
break;
case MDefinition::Op_Rsh:
ret = Int32Value(lhs.toInt32() >> (rhs.toInt32() & 0x1F));
break;
case MDefinition::Op_Ursh: {
uint32_t unsignedLhs = (uint32_t)lhs.toInt32();
ret.setNumber(uint32_t(unsignedLhs >> (rhs.toInt32() & 0x1F)));
break;
}
case MDefinition::Op_Add:
ret.setNumber(lhs.toNumber() + rhs.toNumber());
break;
case MDefinition::Op_Sub:
ret.setNumber(lhs.toNumber() - rhs.toNumber());
break;
case MDefinition::Op_Mul:
ret.setNumber(lhs.toNumber() * rhs.toNumber());
break;
case MDefinition::Op_Div:
ret.setNumber(NumberDiv(lhs.toNumber(), rhs.toNumber()));
break;
case MDefinition::Op_Mod:
ret.setNumber(NumberMod(lhs.toNumber(), rhs.toNumber()));
break;
default:
JS_NOT_REACHED("NYI");
return NULL;
}
if (ins->type() != MIRTypeFromValue(ret)) {
if (ptypeChange)
*ptypeChange = true;
return NULL;
}
return MConstant::New(ret);
}
void
MDefinition::printName(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, "%u", id());
if (valueNumber() != 0)
fprintf(fp, "-vn%u", valueNumber());
}
HashNumber
MDefinition::valueHash() const
{
HashNumber out = op();
for (size_t i = 0; i < numOperands(); i++) {
uint32_t valueNumber = getOperand(i)->valueNumber();
out = valueNumber + (out << 6) + (out << 16) - out;
}
return out;
}
bool
MDefinition::congruentIfOperandsEqual(MDefinition * const &ins) const
{
if (numOperands() != ins->numOperands())
return false;
if (op() != ins->op())
return false;
if (type() != ins->type())
return false;
if (isEffectful() || ins->isEffectful())
return false;
for (size_t i = 0; i < numOperands(); i++) {
if (getOperand(i)->valueNumber() != ins->getOperand(i)->valueNumber())
return false;
}
return true;
}
MDefinition *
MDefinition::foldsTo(bool useValueNumbers)
{
// In the default case, there are no constants to fold.
return this;
}
void
MDefinition::analyzeEdgeCasesForward()
{
}
void
MDefinition::analyzeEdgeCasesBackward()
{
}
static bool
MaybeEmulatesUndefined(JSContext *cx, MDefinition *op)
{
if (!op->mightBeType(MIRType_Object))
return false;
types::StackTypeSet *types = op->resultTypeSet();
if (!types)
return true;
if (!types->maybeObject())
return false;
return types->hasObjectFlags(cx, types::OBJECT_FLAG_EMULATES_UNDEFINED);
}
void
MTest::infer(JSContext *cx)
{
JS_ASSERT(operandMightEmulateUndefined());
if (!MaybeEmulatesUndefined(cx, getOperand(0)))
markOperandCantEmulateUndefined();
}
MDefinition *
MTest::foldsTo(bool useValueNumbers)
{
MDefinition *op = getOperand(0);
if (op->isNot())
return MTest::New(op->toNot()->operand(), ifFalse(), ifTrue());
return this;
}
void
MDefinition::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " ");
for (size_t j = 0; j < numOperands(); j++) {
getOperand(j)->printName(fp);
if (j != numOperands() - 1)
fprintf(fp, " ");
}
}
size_t
MDefinition::useCount() const
{
size_t count = 0;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++)
count++;
return count;
}
size_t
MDefinition::defUseCount() const
{
size_t count = 0;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++)
if ((*i)->consumer()->isDefinition())
count++;
return count;
}
MUseIterator
MDefinition::removeUse(MUseIterator use)
{
return uses_.removeAt(use);
}
MUseIterator
MNode::replaceOperand(MUseIterator use, MDefinition *def)
{
JS_ASSERT(def != NULL);
uint32_t index = use->index();
MDefinition *prev = use->producer();
JS_ASSERT(use->index() < numOperands());
JS_ASSERT(use->producer() == getOperand(index));
JS_ASSERT(use->consumer() == this);
if (prev == def)
return use;
MUseIterator result(prev->removeUse(use));
setOperand(index, def);
return result;
}
void
MNode::replaceOperand(size_t index, MDefinition *def)
{
JS_ASSERT(def != NULL);
MUse *use = getUseFor(index);
MDefinition *prev = use->producer();
JS_ASSERT(use->index() == index);
JS_ASSERT(use->index() < numOperands());
JS_ASSERT(use->producer() == getOperand(index));
JS_ASSERT(use->consumer() == this);
if (prev == def)
return;
prev->removeUse(use);
setOperand(index, def);
}
void
MNode::discardOperand(size_t index)
{
MUse *use = getUseFor(index);
JS_ASSERT(use->index() == index);
JS_ASSERT(use->producer() == getOperand(index));
JS_ASSERT(use->consumer() == this);
use->producer()->removeUse(use);
#ifdef DEBUG
// Causes any producer/consumer lookups to trip asserts.
use->set(NULL, NULL, index);
#endif
}
void
MDefinition::replaceAllUsesWith(MDefinition *dom)
{
JS_ASSERT(dom != NULL);
if (dom == this)
return;
for (size_t i = 0; i < numOperands(); i++)
getOperand(i)->setUseRemovedUnchecked();
for (MUseIterator i(usesBegin()); i != usesEnd(); ) {
JS_ASSERT(i->producer() == this);
i = i->consumer()->replaceOperand(i, dom);
}
}
bool
MDefinition::emptyResultTypeSet() const
{
return resultTypeSet() && resultTypeSet()->empty();
}
static inline bool
IsPowerOfTwo(uint32_t n)
{
return (n > 0) && ((n & (n - 1)) == 0);
}
MConstant *
MConstant::New(const Value &v)
{
return new MConstant(v);
}
types::StackTypeSet *
jit::MakeSingletonTypeSet(JSObject *obj)
{
LifoAlloc *alloc = GetIonContext()->temp->lifoAlloc();
types::StackTypeSet *types = alloc->new_<types::StackTypeSet>();
if (!types)
return NULL;
types::Type objectType = types::Type::ObjectType(obj);
types->addObject(objectType.objectKey(), alloc);
return types;
}
MConstant::MConstant(const js::Value &vp)
: value_(vp)
{
setResultType(MIRTypeFromValue(vp));
if (vp.isObject()) {
// Create a singleton type set for the object. This isn't necessary for
// other types as the result type encodes all needed information.
setResultTypeSet(MakeSingletonTypeSet(&vp.toObject()));
}
setMovable();
}
HashNumber
MConstant::valueHash() const
{
// This disregards some state, since values are 64 bits. But for a hash,
// it's completely acceptable.
return (HashNumber)JSVAL_TO_IMPL(value_).asBits;
}
bool
MConstant::congruentTo(MDefinition * const &ins) const
{
if (!ins->isConstant())
return false;
return ins->toConstant()->value() == value();
}
void
MConstant::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " ");
switch (type()) {
case MIRType_Undefined:
fprintf(fp, "undefined");
break;
case MIRType_Null:
fprintf(fp, "null");
break;
case MIRType_Boolean:
fprintf(fp, value().toBoolean() ? "true" : "false");
break;
case MIRType_Int32:
fprintf(fp, "0x%x", value().toInt32());
break;
case MIRType_Double:
fprintf(fp, "%f", value().toDouble());
break;
case MIRType_Object:
if (value().toObject().is<JSFunction>()) {
JSFunction *fun = &value().toObject().as<JSFunction>();
if (fun->displayAtom()) {
fputs("function ", fp);
FileEscapedString(fp, fun->displayAtom(), 0);
} else {
fputs("unnamed function", fp);
}
if (fun->hasScript()) {
JSScript *script = fun->nonLazyScript();
fprintf(fp, " (%s:%u)",
script->filename() ? script->filename() : "", script->lineno);
}
fprintf(fp, " at %p", (void *) fun);
break;
}
fprintf(fp, "object %p (%s)", (void *)&value().toObject(),
value().toObject().getClass()->name);
break;
case MIRType_String:
fprintf(fp, "string %p", (void *)value().toString());
break;
case MIRType_Magic:
fprintf(fp, "magic");
break;
default:
JS_NOT_REACHED("unexpected type");
break;
}
}
void
MConstantElements::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " %p", value());
}
MParameter *
MParameter::New(int32_t index, types::StackTypeSet *types)
{
return new MParameter(index, types);
}
void
MParameter::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " %d", index());
}
HashNumber
MParameter::valueHash() const
{
return index_; // Why not?
}
bool
MParameter::congruentTo(MDefinition * const &ins) const
{
if (!ins->isParameter())
return false;
return ins->toParameter()->index() == index_;
}
MCall *
MCall::New(JSFunction *target, size_t maxArgc, size_t numActualArgs, bool construct)
{
JS_ASSERT(maxArgc >= numActualArgs);
MCall *ins = new MCall(target, numActualArgs, construct);
if (!ins->init(maxArgc + NumNonArgumentOperands))
return NULL;
return ins;
}
MApplyArgs *
MApplyArgs::New(JSFunction *target, MDefinition *fun, MDefinition *argc, MDefinition *self)
{
return new MApplyArgs(target, fun, argc, self);
}
MDefinition*
MStringLength::foldsTo(bool useValueNumbers)
{
if ((type() == MIRType_Int32) && (string()->isConstant())) {
Value value = string()->toConstant()->value();
size_t length = JS_GetStringLength(value.toString());
return MConstant::New(Int32Value(length));
}
return this;
}
MTest *
MTest::New(MDefinition *ins, MBasicBlock *ifTrue, MBasicBlock *ifFalse)
{
return new MTest(ins, ifTrue, ifFalse);
}
MCompare *
MCompare::New(MDefinition *left, MDefinition *right, JSOp op)
{
return new MCompare(left, right, op);
}
MCompare *
MCompare::NewAsmJS(MDefinition *left, MDefinition *right, JSOp op, CompareType compareType)
{
JS_ASSERT(compareType == Compare_Int32 || compareType == Compare_UInt32 ||
compareType == Compare_Double);
MCompare *comp = new MCompare(left, right, op);
comp->compareType_ = compareType;
comp->operandMightEmulateUndefined_ = false;
comp->setResultType(MIRType_Int32);
return comp;
}
MTableSwitch *
MTableSwitch::New(MDefinition *ins, int32_t low, int32_t high)
{
return new MTableSwitch(ins, low, high);
}
MGoto *
MGoto::New(MBasicBlock *target)
{
JS_ASSERT(target);
return new MGoto(target);
}
void
MUnbox::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " ");
getOperand(0)->printName(fp);
fprintf(fp, " ");
switch (type()) {
case MIRType_Int32: fprintf(fp, "to Int32"); break;
case MIRType_Double: fprintf(fp, "to Double"); break;
case MIRType_Boolean: fprintf(fp, "to Boolean"); break;
case MIRType_String: fprintf(fp, "to String"); break;
case MIRType_Object: fprintf(fp, "to Object"); break;
default: break;
}
switch (mode()) {
case Fallible: fprintf(fp, " (fallible)"); break;
case Infallible: fprintf(fp, " (infallible)"); break;
case TypeBarrier: fprintf(fp, " (typebarrier)"); break;
case TypeGuard: fprintf(fp, " (typeguard)"); break;
default: break;
}
}
MPhi *
MPhi::New(uint32_t slot)
{
return new MPhi(slot);
}
void
MPhi::removeOperand(size_t index)
{
MUse *use = getUseFor(index);
JS_ASSERT(index < inputs_.length());
JS_ASSERT(inputs_.length() > 1);
JS_ASSERT(use->index() == index);
JS_ASSERT(use->producer() == getOperand(index));
JS_ASSERT(use->consumer() == this);
// Remove use from producer's use chain.
use->producer()->removeUse(use);
// If we have phi(..., a, b, c, d, ..., z) and we plan
// on removing a, then first shift downward so that we have
// phi(..., b, c, d, ..., z, z):
size_t length = inputs_.length();
for (size_t i = index; i < length - 1; i++) {
MUse *next = MPhi::getUseFor(i + 1);
next->producer()->removeUse(next);
MPhi::setOperand(i, next->producer());
}
// truncate the inputs_ list:
inputs_.shrinkBy(1);
}
MDefinition *
MPhi::foldsTo(bool useValueNumbers)
{
JS_ASSERT(inputs_.length() != 0);
MDefinition *first = getOperand(0);
for (size_t i = 1; i < inputs_.length(); i++) {
// Phis need dominator information to fold based on value numbers. For
// simplicity, we only compare SSA names right now (bug 714727).
if (!EqualValues(false, getOperand(i), first))
return this;
}
return first;
}
bool
MPhi::congruentTo(MDefinition *const &ins) const
{
if (!ins->isPhi())
return false;
// Since we do not know which predecessor we are merging from, we must
// assume that phi instructions in different blocks are not equal.
// (Bug 674656)
if (ins->block()->id() != block()->id())
return false;
return congruentIfOperandsEqual(ins);
}
bool
MPhi::reserveLength(size_t length)
{
// Initializes a new MPhi to have an Operand vector of at least the given
// capacity. This permits use of addInput() instead of addInputSlow(), the
// latter of which may call realloc_().
JS_ASSERT(numOperands() == 0);
#if DEBUG
capacity_ = length;
#endif
return inputs_.reserve(length);
}
static inline types::StackTypeSet *
MakeMIRTypeSet(MIRType type)
{
JS_ASSERT(type != MIRType_Value);
types::Type ntype = type == MIRType_Object
? types::Type::AnyObjectType()
: types::Type::PrimitiveType(ValueTypeFromMIRType(type));
return GetIonContext()->temp->lifoAlloc()->new_<types::StackTypeSet>(ntype);
}
void
jit::MergeTypes(MIRType *ptype, types::StackTypeSet **ptypeSet,
MIRType newType, types::StackTypeSet *newTypeSet)
{
if (newTypeSet && newTypeSet->empty())
return;
if (newType != *ptype) {
if (IsNumberType(newType) && IsNumberType(*ptype)) {
*ptype = MIRType_Double;
} else if (*ptype != MIRType_Value) {
if (!*ptypeSet)
*ptypeSet = MakeMIRTypeSet(*ptype);
*ptype = MIRType_Value;
}
}
if (*ptypeSet) {
LifoAlloc *alloc = GetIonContext()->temp->lifoAlloc();
if (!newTypeSet && newType != MIRType_Value)
newTypeSet = MakeMIRTypeSet(newType);
if (newTypeSet) {
if (!newTypeSet->isSubset(*ptypeSet))
*ptypeSet = types::TypeSet::unionSets(*ptypeSet, newTypeSet, alloc);
} else {
*ptypeSet = NULL;
}
}
}
void
MPhi::specializeType()
{
#ifdef DEBUG
JS_ASSERT(!specialized_);
specialized_ = true;
#endif
JS_ASSERT(!inputs_.empty());
size_t start;
if (hasBackedgeType_) {
// The type of this phi has already been populated with potential types
// that could come in via loop backedges.
start = 0;
} else {
setResultType(getOperand(0)->type());
setResultTypeSet(getOperand(0)->resultTypeSet());
start = 1;
}
MIRType resultType = this->type();
types::StackTypeSet *resultTypeSet = this->resultTypeSet();
for (size_t i = start; i < inputs_.length(); i++) {
MDefinition *def = getOperand(i);
MergeTypes(&resultType, &resultTypeSet, def->type(), def->resultTypeSet());
}
setResultType(resultType);
setResultTypeSet(resultTypeSet);
}
void
MPhi::addBackedgeType(MIRType type, types::StackTypeSet *typeSet)
{
JS_ASSERT(!specialized_);
if (hasBackedgeType_) {
MIRType resultType = this->type();
types::StackTypeSet *resultTypeSet = this->resultTypeSet();
MergeTypes(&resultType, &resultTypeSet, type, typeSet);
setResultType(resultType);
setResultTypeSet(resultTypeSet);
} else {
setResultType(type);
setResultTypeSet(typeSet);
hasBackedgeType_ = true;
}
}
bool
MPhi::typeIncludes(MDefinition *def)
{
if (def->type() == MIRType_Int32 && this->type() == MIRType_Double)
return true;
if (types::StackTypeSet *types = def->resultTypeSet()) {
if (this->resultTypeSet())
return types->isSubset(this->resultTypeSet());
if (this->type() == MIRType_Value || types->empty())
return true;
return this->type() == MIRTypeFromValueType(types->getKnownTypeTag());
}
if (def->type() == MIRType_Value) {
// This phi must be able to be any value.
return this->type() == MIRType_Value
&& (!this->resultTypeSet() || this->resultTypeSet()->unknown());
}
return this->mightBeType(def->type());
}
void
MPhi::addInput(MDefinition *ins)
{
// This can only been done if the length was reserved through reserveLength,
// else the slower addInputSlow need to get called.
JS_ASSERT(inputs_.length() < capacity_);
uint32_t index = inputs_.length();
inputs_.append(MUse());
MPhi::setOperand(index, ins);
}
bool
MPhi::addInputSlow(MDefinition *ins, bool *ptypeChange)
{
// The list of inputs to an MPhi is given as a vector of MUse nodes,
// each of which is in the list of the producer MDefinition.
// Because appending to a vector may reallocate the vector, it is possible
// that this operation may cause the producers' linked lists to reference
// invalid memory. Therefore, in the event of moving reallocation, each
// MUse must be removed and reinserted from/into its producer's use chain.
uint32_t index = inputs_.length();
bool performingRealloc = !inputs_.canAppendWithoutRealloc(1);
// Remove all MUses from all use lists, in case realloc_() moves.
if (performingRealloc) {
for (uint32_t i = 0; i < index; i++) {
MUse *use = &inputs_[i];
use->producer()->removeUse(use);
}
}
// Insert the new input.
if (!inputs_.append(MUse()))
return false;
MPhi::setOperand(index, ins);
if (ptypeChange) {
MIRType resultType = this->type();
types::StackTypeSet *resultTypeSet = this->resultTypeSet();
MergeTypes(&resultType, &resultTypeSet, ins->type(), ins->resultTypeSet());
if (resultType != this->type() || resultTypeSet != this->resultTypeSet()) {
*ptypeChange = true;
setResultType(resultType);
setResultTypeSet(resultTypeSet);
}
}
// Add all previously-removed MUses back.
if (performingRealloc) {
for (uint32_t i = 0; i < index; i++) {
MUse *use = &inputs_[i];
use->producer()->addUse(use);
}
}
return true;
}
uint32_t
MPrepareCall::argc() const
{
JS_ASSERT(useCount() == 1);
MCall *call = usesBegin()->consumer()->toDefinition()->toCall();
return call->numStackArgs();
}
void
MPassArg::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " %d ", argnum_);
for (size_t j = 0; j < numOperands(); j++) {
getOperand(j)->printName(fp);
if (j != numOperands() - 1)
fprintf(fp, " ");
}
}
void
MCall::addArg(size_t argnum, MPassArg *arg)
{
// The operand vector is initialized in reverse order by the IonBuilder.
// It cannot be checked for consistency until all arguments are added.
arg->setArgnum(argnum);
setOperand(argnum + NumNonArgumentOperands, arg->toDefinition());
}
void
MBitNot::infer()
{
if (getOperand(0)->mightBeType(MIRType_Object))
specialization_ = MIRType_None;
else
specialization_ = MIRType_Int32;
}
static inline bool
IsConstant(MDefinition *def, double v)
{
if (!def->isConstant())
return false;
// Compare the underlying bits to not equate -0 and +0.
uint64_t lhs = BitwiseCast<uint64_t>(def->toConstant()->value().toNumber());
uint64_t rhs = BitwiseCast<uint64_t>(v);
return lhs == rhs;
}
MDefinition *
MBinaryBitwiseInstruction::foldsTo(bool useValueNumbers)
{
if (specialization_ != MIRType_Int32)
return this;
if (MDefinition *folded = EvaluateConstantOperands(this))
return folded;
return this;
}
MDefinition *
MBinaryBitwiseInstruction::foldUnnecessaryBitop()
{
if (specialization_ != MIRType_Int32)
return this;
// Eliminate bitwise operations that are no-ops when used on integer
// inputs, such as (x | 0).
MDefinition *lhs = getOperand(0);
MDefinition *rhs = getOperand(1);
if (IsConstant(lhs, 0))
return foldIfZero(0);
if (IsConstant(rhs, 0))
return foldIfZero(1);
if (IsConstant(lhs, -1))
return foldIfNegOne(0);
if (IsConstant(rhs, -1))
return foldIfNegOne(1);
if (EqualValues(false, lhs, rhs))
return foldIfEqual();
return this;
}
void
MBinaryBitwiseInstruction::infer(BaselineInspector *, jsbytecode *)
{
if (getOperand(0)->mightBeType(MIRType_Object) || getOperand(1)->mightBeType(MIRType_Object)) {
specialization_ = MIRType_None;
} else {
specialization_ = MIRType_Int32;
setCommutative();
}
}
void
MBinaryBitwiseInstruction::specializeForAsmJS()
{
specialization_ = MIRType_Int32;
JS_ASSERT(type() == MIRType_Int32);
setCommutative();
}
void
MShiftInstruction::infer(BaselineInspector *, jsbytecode *)
{
if (getOperand(0)->mightBeType(MIRType_Object) || getOperand(1)->mightBeType(MIRType_Object))
specialization_ = MIRType_None;
else
specialization_ = MIRType_Int32;
}
void
MUrsh::infer(BaselineInspector *inspector, jsbytecode *pc)
{
if (getOperand(0)->mightBeType(MIRType_Object) || getOperand(1)->mightBeType(MIRType_Object)) {
specialization_ = MIRType_None;
setResultType(MIRType_Value);
return;
}
if (inspector->hasSeenDoubleResult(pc)) {
specialization_ = MIRType_Double;
setResultType(MIRType_Double);
return;
}
specialization_ = MIRType_Int32;
setResultType(MIRType_Int32);
}
static inline bool
NeedNegativeZeroCheck(MDefinition *def)
{
// Test if all uses have the same semantics for -0 and 0
for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
if (use->consumer()->isResumePoint())
continue;
MDefinition *use_def = use->consumer()->toDefinition();
switch (use_def->op()) {
case MDefinition::Op_Add: {
// If add is truncating -0 and 0 are observed as the same.
if (use_def->toAdd()->isTruncated())
break;
// x + y gives -0, when both x and y are -0
// Figure out the order in which the addition's operands will
// execute. EdgeCaseAnalysis::analyzeLate has renumbered the MIR
// definitions for us so that this just requires comparing ids.
MDefinition *first = use_def->getOperand(0);
MDefinition *second = use_def->getOperand(1);
if (first->id() > second->id()) {
MDefinition *temp = first;
first = second;
second = temp;
}
if (def == first) {
// Negative zero checks can be removed on the first executed
// operand only if it is guaranteed the second executed operand
// will produce a value other than -0. While the second is
// typed as an int32, a bailout taken between execution of the
// operands may change that type and cause a -0 to flow to the
// second.
//
// There is no way to test whether there are any bailouts
// between execution of the operands, so remove negative
// zero checks from the first only if the second's type is
// independent from type changes that may occur after bailing.
switch (second->op()) {
case MDefinition::Op_Constant:
case MDefinition::Op_BitAnd:
case MDefinition::Op_BitOr:
case MDefinition::Op_BitXor:
case MDefinition::Op_BitNot:
case MDefinition::Op_Lsh:
case MDefinition::Op_Rsh:
break;
default:
return true;
}
}
// The negative zero check can always be removed on the second
// executed operand; by the time this executes the first will have
// been evaluated as int32 and the addition's result cannot be -0.
break;
}
case MDefinition::Op_Sub:
// If sub is truncating -0 and 0 are observed as the same
if (use_def->toSub()->isTruncated())
break;
/* Fall through... */
case MDefinition::Op_StoreElement:
case MDefinition::Op_StoreElementHole:
case MDefinition::Op_LoadElement:
case MDefinition::Op_LoadElementHole:
case MDefinition::Op_LoadTypedArrayElement:
case MDefinition::Op_LoadTypedArrayElementHole:
case MDefinition::Op_CharCodeAt:
case MDefinition::Op_Mod:
// Only allowed to remove check when definition is the second operand
if (use_def->getOperand(0) == def)
return true;
if (use_def->numOperands() > 2) {
for (size_t i = 2; i < use_def->numOperands(); i++) {
if (use_def->getOperand(i) == def)
return true;
}
}
break;
case MDefinition::Op_BoundsCheck:
// Only allowed to remove check when definition is the first operand
if (use_def->getOperand(1) == def)
return true;
break;
case MDefinition::Op_ToString:
case MDefinition::Op_FromCharCode:
case MDefinition::Op_TableSwitch:
case MDefinition::Op_Compare:
case MDefinition::Op_BitAnd:
case MDefinition::Op_BitOr:
case MDefinition::Op_BitXor:
case MDefinition::Op_Abs:
case MDefinition::Op_TruncateToInt32:
// Always allowed to remove check. No matter which operand.
break;
default:
return true;
}
}
return false;
}
MDefinition *
MBinaryArithInstruction::foldsTo(bool useValueNumbers)
{
if (specialization_ == MIRType_None)
return this;
MDefinition *lhs = getOperand(0);
MDefinition *rhs = getOperand(1);
if (MDefinition *folded = EvaluateConstantOperands(this))
return folded;
// 0 + -0 = 0. So we can't remove addition
if (isAdd() && specialization_ != MIRType_Int32)
return this;
if (IsConstant(rhs, getIdentity()))
return lhs;
// subtraction isn't commutative. So we can't remove subtraction when lhs equals 0
if (isSub())
return this;
if (IsConstant(lhs, getIdentity()))
return rhs; // x op id => x
return this;
}
bool
MAbs::fallible() const
{
return !implicitTruncate_ && (!range() || !range()->isInt32());
}
MDefinition *
MDiv::foldsTo(bool useValueNumbers)
{
if (specialization_ == MIRType_None)
return this;
if (MDefinition *folded = EvaluateConstantOperands(this))
return folded;
return this;
}
void
MDiv::analyzeEdgeCasesForward()
{
// This is only meaningful when doing integer division.
if (specialization_ != MIRType_Int32)
return;
// Try removing divide by zero check
if (rhs()->isConstant() && !rhs()->toConstant()->value().isInt32(0))
canBeDivideByZero_ = false;
// If lhs is a constant int != INT32_MIN, then
// negative overflow check can be skipped.
if (lhs()->isConstant() && !lhs()->toConstant()->value().isInt32(INT32_MIN))
canBeNegativeOverflow_ = false;
// If rhs is a constant int != -1, likewise.
if (rhs()->isConstant() && !rhs()->toConstant()->value().isInt32(-1))
canBeNegativeOverflow_ = false;
// If lhs is != 0, then negative zero check can be skipped.
if (lhs()->isConstant() && !lhs()->toConstant()->value().isInt32(0))
setCanBeNegativeZero(false);
// If rhs is >= 0, likewise.
if (rhs()->isConstant()) {
const js::Value &val = rhs()->toConstant()->value();
if (val.isInt32() && val.toInt32() >= 0)
setCanBeNegativeZero(false);
}
}
void
MDiv::analyzeEdgeCasesBackward()
{
if (canBeNegativeZero() && !NeedNegativeZeroCheck(this))
setCanBeNegativeZero(false);
}
bool
MDiv::fallible()
{
return !isTruncated();
}
static inline MDefinition *
TryFold(MDefinition *original, MDefinition *replacement)
{
if (original->type() == replacement->type())
return replacement;
return original;
}
MDefinition *
MMod::foldsTo(bool useValueNumbers)
{
if (specialization_ == MIRType_None)
return this;
if (MDefinition *folded = EvaluateConstantOperands(this))
return folded;
return this;
}
bool
MMod::fallible()
{
return !isTruncated();
}
bool
MAdd::fallible()
{
// the add is fallible if range analysis does not say that it is finite, AND
// either the truncation analysis shows that there are non-truncated uses.
if (isTruncated())
return false;
if (range() && range()->isInt32())
return false;
return true;
}
bool
MSub::fallible()
{
// see comment in MAdd::fallible()
if (isTruncated())
return false;
if (range() && range()->isInt32())
return false;
return true;
}
MDefinition *
MMul::foldsTo(bool useValueNumbers)
{
MDefinition *out = MBinaryArithInstruction::foldsTo(useValueNumbers);
if (out != this)
return out;
if (specialization() != MIRType_Int32)
return this;
if (EqualValues(useValueNumbers, lhs(), rhs()))
setCanBeNegativeZero(false);
return this;
}
void
MMul::analyzeEdgeCasesForward()
{
// Try to remove the check for negative zero
// This only makes sense when using the integer multiplication
if (specialization() != MIRType_Int32)
return;
// If lhs is > 0, no need for negative zero check.
if (lhs()->isConstant()) {
const js::Value &val = lhs()->toConstant()->value();
if (val.isInt32() && val.toInt32() > 0)
setCanBeNegativeZero(false);
}
// If rhs is > 0, likewise.
if (rhs()->isConstant()) {
const js::Value &val = rhs()->toConstant()->value();
if (val.isInt32() && val.toInt32() > 0)
setCanBeNegativeZero(false);
}
}
void
MMul::analyzeEdgeCasesBackward()
{
if (canBeNegativeZero() && !NeedNegativeZeroCheck(this))
setCanBeNegativeZero(false);
}
bool
MMul::updateForReplacement(MDefinition *ins_)
{
MMul *ins = ins_->toMul();
bool negativeZero = canBeNegativeZero() || ins->canBeNegativeZero();
setCanBeNegativeZero(negativeZero);
// Remove the imul annotation when merging imul and normal multiplication.
if (mode_ == Integer && ins->mode() != Integer)
mode_ = Normal;
return true;
}
bool
MMul::canOverflow()
{
if (isTruncated())
return false;
return !range() || !range()->isInt32();
}
static inline bool
KnownNonStringPrimitive(MDefinition *op)
{
return !op->mightBeType(MIRType_Object)
&& !op->mightBeType(MIRType_String)
&& !op->mightBeType(MIRType_Magic);
}
void
MBinaryArithInstruction::infer(BaselineInspector *inspector,
jsbytecode *pc,
bool overflowed)
{
JS_ASSERT(this->type() == MIRType_Value);
specialization_ = MIRType_None;
// Retrieve type information of lhs and rhs.
MIRType lhs = getOperand(0)->type();
MIRType rhs = getOperand(1)->type();
// Anything complex - strings and objects - are not specialized
// unless baseline type hints suggest it might be profitable
if (!KnownNonStringPrimitive(getOperand(0)) || !KnownNonStringPrimitive(getOperand(1)))
return inferFallback(inspector, pc);
// Guess a result type based on the inputs.
// Don't specialize for neither-integer-nor-double results.
if (lhs == MIRType_Int32 && rhs == MIRType_Int32)
setResultType(MIRType_Int32);
else if (lhs == MIRType_Double || rhs == MIRType_Double)
setResultType(MIRType_Double);
else
return inferFallback(inspector, pc);
// If the operation has ever overflowed, use a double specialization.
if (inspector->hasSeenDoubleResult(pc))
setResultType(MIRType_Double);
// If the operation will always overflow on its constant operands, use a
// double specialization so that it can be constant folded later.
if ((isMul() || isDiv()) && lhs == MIRType_Int32 && rhs == MIRType_Int32) {
bool typeChange = false;
EvaluateConstantOperands(this, &typeChange);
if (typeChange)
setResultType(MIRType_Double);
}
JS_ASSERT(lhs < MIRType_String || lhs == MIRType_Value);
JS_ASSERT(rhs < MIRType_String || rhs == MIRType_Value);
MIRType rval = this->type();
// Don't specialize values when result isn't double
if (lhs == MIRType_Value || rhs == MIRType_Value) {
if (rval != MIRType_Double) {
specialization_ = MIRType_None;
return;
}
}
// Don't specialize as int32 if one of the operands is undefined,
// since ToNumber(undefined) is NaN.
if (rval == MIRType_Int32 && (lhs == MIRType_Undefined || rhs == MIRType_Undefined)) {
specialization_ = MIRType_None;
return;
}
specialization_ = rval;
if (isAdd() || isMul())
setCommutative();
setResultType(rval);
}
void
MBinaryArithInstruction::inferFallback(BaselineInspector *inspector,
jsbytecode *pc)
{
// Try to specialize based on what baseline observed in practice.
specialization_ = inspector->expectedBinaryArithSpecialization(pc);
if (specialization_ != MIRType_None) {
setResultType(specialization_);
return;
}
// In parallel execution, for now anyhow, we *only* support adding
// and manipulating numbers (not strings or objects). So no
// matter what we can specialize to double...if the result ought
// to have been something else, we'll fail in the various type
// guards that get inserted later.
if (block()->info().executionMode() == ParallelExecution) {
specialization_ = MIRType_Double;
setResultType(MIRType_Double);
return;
}
// If we can't specialize because we have no type information at all for
// the lhs or rhs, mark the binary instruction as having no possible types
// either to avoid degrading subsequent analysis.
if (getOperand(0)->emptyResultTypeSet() || getOperand(1)->emptyResultTypeSet()) {
LifoAlloc *alloc = GetIonContext()->temp->lifoAlloc();
types::StackTypeSet *types = alloc->new_<types::StackTypeSet>();
if (types)
setResultTypeSet(types);
}
}
static bool
SafelyCoercesToDouble(MDefinition *op)
{
// Strings are unhandled -- visitToDouble() doesn't support them yet.
// Null is unhandled -- ToDouble(null) == 0, but (0 == null) is false.
return KnownNonStringPrimitive(op) && !op->mightBeType(MIRType_Null);
}
static bool
ObjectOrSimplePrimitive(MDefinition *op)
{
// Return true if op is either undefined/null/bolean/int32 or an object.
return !op->mightBeType(MIRType_String)
&& !op->mightBeType(MIRType_Double)
&& !op->mightBeType(MIRType_Magic);
}
static bool
CanDoValueBitwiseCmp(JSContext *cx, MDefinition *lhs, MDefinition *rhs, bool looseEq)
{
// Only primitive (not double/string) or objects are supported.
// I.e. Undefined/Null/Boolean/Int32 and Object
if (!ObjectOrSimplePrimitive(lhs) || !ObjectOrSimplePrimitive(rhs))
return false;
// Objects that emulate undefined are not supported.
if (MaybeEmulatesUndefined(cx, lhs) || MaybeEmulatesUndefined(cx, rhs))
return false;
// In the loose comparison more values could be the same,
// but value comparison reporting otherwise.
if (looseEq) {
// Undefined compared loosy to Null is not supported,
// because tag is different, but value can be the same (undefined == null).
if ((lhs->mightBeType(MIRType_Undefined) && rhs->mightBeType(MIRType_Null)) ||
(lhs->mightBeType(MIRType_Null) && rhs->mightBeType(MIRType_Undefined)))
{
return false;
}
// Int32 compared loosy to Boolean is not supported,
// because tag is different, but value can be the same (1 == true).
if ((lhs->mightBeType(MIRType_Int32) && rhs->mightBeType(MIRType_Boolean)) ||
(lhs->mightBeType(MIRType_Boolean) && rhs->mightBeType(MIRType_Int32)))
{
return false;
}
// For loosy comparison of an object with a Boolean/Number/String
// the valueOf the object is taken. Therefore not supported.
bool simpleLHS = lhs->mightBeType(MIRType_Boolean) || lhs->mightBeType(MIRType_Int32);
bool simpleRHS = rhs->mightBeType(MIRType_Boolean) || rhs->mightBeType(MIRType_Int32);
if ((lhs->mightBeType(MIRType_Object) && simpleRHS) ||
(rhs->mightBeType(MIRType_Object) && simpleLHS))
{
return false;
}
}
return true;
}
MIRType
MCompare::inputType()
{
switch(compareType_) {
case Compare_Undefined:
return MIRType_Undefined;
case Compare_Null:
return MIRType_Null;
case Compare_Boolean:
return MIRType_Boolean;
case Compare_UInt32:
case Compare_Int32:
return MIRType_Int32;
case Compare_Double:
case Compare_DoubleMaybeCoerceLHS:
case Compare_DoubleMaybeCoerceRHS:
return MIRType_Double;
case Compare_String:
case Compare_StrictString:
return MIRType_String;
case Compare_Object:
return MIRType_Object;
case Compare_Unknown:
case Compare_Value:
return MIRType_Value;
default:
JS_NOT_REACHED("No known conversion");
return MIRType_None;
}
}
static inline bool
MustBeUInt32(MDefinition *def, MDefinition **pwrapped)
{
if (def->isUrsh()) {
*pwrapped = def->toUrsh()->getOperand(0);
MDefinition *rhs = def->toUrsh()->getOperand(1);
return rhs->isConstant()
&& rhs->toConstant()->value().isInt32()
&& rhs->toConstant()->value().toInt32() == 0;
}
if (def->isConstant()) {
*pwrapped = def;
return def->toConstant()->value().isInt32()
&& def->toConstant()->value().toInt32() >= 0;
}
return false;
}
void
MCompare::infer(JSContext *cx, BaselineInspector *inspector, jsbytecode *pc)
{
JS_ASSERT(operandMightEmulateUndefined());
if (!MaybeEmulatesUndefined(cx, getOperand(0)) && !MaybeEmulatesUndefined(cx, getOperand(1)))
markNoOperandEmulatesUndefined();
MIRType lhs = getOperand(0)->type();
MIRType rhs = getOperand(1)->type();
bool looseEq = jsop() == JSOP_EQ || jsop() == JSOP_NE;
bool strictEq = jsop() == JSOP_STRICTEQ || jsop() == JSOP_STRICTNE;
bool relationalEq = !(looseEq || strictEq);
// Comparisons on unsigned integers may be treated as UInt32. Skip any (x >>> 0)
// operation coercing the operands to uint32. The type policy will make sure the
// now unwrapped operand is an int32.
MDefinition *newlhs, *newrhs;
if (MustBeUInt32(getOperand(0), &newlhs) && MustBeUInt32(getOperand(1), &newrhs)) {
if (newlhs != getOperand(0))
replaceOperand(0, newlhs);
if (newrhs != getOperand(1))
replaceOperand(1, newrhs);
compareType_ = Compare_UInt32;
return;
}
// Integer to integer or boolean to boolean comparisons may be treated as Int32.
if ((lhs == MIRType_Int32 && rhs == MIRType_Int32) ||
(lhs == MIRType_Boolean && rhs == MIRType_Boolean))
{
compareType_ = Compare_Int32;
return;
}
// Loose/relational cross-integer/boolean comparisons may be treated as Int32.
if (!strictEq &&
(lhs == MIRType_Int32 || lhs == MIRType_Boolean) &&
(rhs == MIRType_Int32 || rhs == MIRType_Boolean))
{
compareType_ = Compare_Int32;
return;
}
// Numeric comparisons against a double coerce to double.
if (IsNumberType(lhs) && IsNumberType(rhs)) {
compareType_ = Compare_Double;
return;
}
// Any comparison is allowed except strict eq.
if (!strictEq && lhs == MIRType_Double && SafelyCoercesToDouble(getOperand(1))) {
compareType_ = Compare_DoubleMaybeCoerceRHS;
return;
}
if (!strictEq && rhs == MIRType_Double && SafelyCoercesToDouble(getOperand(0))) {
compareType_ = Compare_DoubleMaybeCoerceLHS;
return;
}
// Handle object comparison.
if (!relationalEq && lhs == MIRType_Object && rhs == MIRType_Object) {
compareType_ = Compare_Object;
return;
}
// Handle string comparisons. (Relational string compares are still unsupported).
if (!relationalEq && lhs == MIRType_String && rhs == MIRType_String) {
compareType_ = Compare_String;
return;
}
if (strictEq && lhs == MIRType_String) {
// Lowering expects the rhs to be definitly string.
compareType_ = Compare_StrictString;
swapOperands();
return;
}
if (strictEq && rhs == MIRType_String) {
compareType_ = Compare_StrictString;
return;
}
// Handle compare with lhs being Undefined or Null.
if (!relationalEq && IsNullOrUndefined(lhs)) {
// Lowering expects the rhs to be null/undefined, so we have to
// swap the operands. This is necessary since we may not know which
// operand was null/undefined during lowering (both operands may have
// MIRType_Value).
compareType_ = (lhs == MIRType_Null) ? Compare_Null : Compare_Undefined;
swapOperands();
return;
}
// Handle compare with rhs being Undefined or Null.
if (!relationalEq && IsNullOrUndefined(rhs)) {
compareType_ = (rhs == MIRType_Null) ? Compare_Null : Compare_Undefined;
return;
}
// Handle strict comparison with lhs/rhs being typed Boolean.
if (strictEq && (lhs == MIRType_Boolean || rhs == MIRType_Boolean)) {
// bool/bool case got an int32 specialization earlier.
JS_ASSERT(!(lhs == MIRType_Boolean && rhs == MIRType_Boolean));
// Ensure the boolean is on the right so that the type policy knows
// which side to unbox.
if (lhs == MIRType_Boolean)
swapOperands();
compareType_ = Compare_Boolean;
return;
}
// Determine if we can do the compare based on a quick value check.
if (!relationalEq && CanDoValueBitwiseCmp(cx, getOperand(0), getOperand(1), looseEq)) {
compareType_ = Compare_Value;
return;
}
// Type information is not good enough to pick out a particular type of
// comparison we can do here. Try to specialize based on any baseline
// caches that have been generated for the opcode. These will cause the
// instruction's type policy to insert fallible unboxes to the appropriate
// input types.
if (!strictEq)
compareType_ = inspector->expectedCompareType(pc);
}
MBitNot *
MBitNot::New(MDefinition *input)
{
return new MBitNot(input);
}
MBitNot *
MBitNot::NewAsmJS(MDefinition *input)
{
MBitNot *ins = new MBitNot(input);
ins->specialization_ = MIRType_Int32;
JS_ASSERT(ins->type() == MIRType_Int32);
return ins;
}
MDefinition *
MBitNot::foldsTo(bool useValueNumbers)
{
if (specialization_ != MIRType_Int32)
return this;
MDefinition *input = getOperand(0);
if (input->isConstant()) {
js::Value v = Int32Value(~(input->toConstant()->value().toInt32()));
return MConstant::New(v);
}
if (input->isBitNot() && input->toBitNot()->specialization_ == MIRType_Int32) {
JS_ASSERT(input->getOperand(0)->type() == MIRType_Int32);
return input->getOperand(0); // ~~x => x
}
return this;
}
MDefinition *
MTypeOf::foldsTo(bool useValueNumbers)
{
// Note: we can't use input->type() here, type analysis has
// boxed the input.
JS_ASSERT(input()->type() == MIRType_Value);
JSType type;
switch (inputType()) {
case MIRType_Double:
case MIRType_Int32:
type = JSTYPE_NUMBER;
break;
case MIRType_String:
type = JSTYPE_STRING;
break;
case MIRType_Null:
type = JSTYPE_OBJECT;
break;
case MIRType_Undefined:
type = JSTYPE_VOID;
break;
case MIRType_Boolean:
type = JSTYPE_BOOLEAN;
break;
default:
return this;
}
JSRuntime *rt = GetIonContext()->runtime;
return MConstant::New(StringValue(TypeName(type, rt)));
}
MBitAnd *
MBitAnd::New(MDefinition *left, MDefinition *right)
{
return new MBitAnd(left, right);
}
MBitAnd *
MBitAnd::NewAsmJS(MDefinition *left, MDefinition *right)
{
MBitAnd *ins = new MBitAnd(left, right);
ins->specializeForAsmJS();
return ins;
}
MBitOr *
MBitOr::New(MDefinition *left, MDefinition *right)
{
return new MBitOr(left, right);
}
MBitOr *
MBitOr::NewAsmJS(MDefinition *left, MDefinition *right)
{
MBitOr *ins = new MBitOr(left, right);
ins->specializeForAsmJS();
return ins;
}
MBitXor *
MBitXor::New(MDefinition *left, MDefinition *right)
{
return new MBitXor(left, right);
}
MBitXor *
MBitXor::NewAsmJS(MDefinition *left, MDefinition *right)
{
MBitXor *ins = new MBitXor(left, right);
ins->specializeForAsmJS();
return ins;
}
MLsh *
MLsh::New(MDefinition *left, MDefinition *right)
{
return new MLsh(left, right);
}
MLsh *
MLsh::NewAsmJS(MDefinition *left, MDefinition *right)
{
MLsh *ins = new MLsh(left, right);
ins->specializeForAsmJS();
return ins;
}
MRsh *
MRsh::New(MDefinition *left, MDefinition *right)
{
return new MRsh(left, right);
}
MRsh *
MRsh::NewAsmJS(MDefinition *left, MDefinition *right)
{
MRsh *ins = new MRsh(left, right);
ins->specializeForAsmJS();
return ins;
}
MUrsh *
MUrsh::New(MDefinition *left, MDefinition *right)
{
return new MUrsh(left, right);
}
MUrsh *
MUrsh::NewAsmJS(MDefinition *left, MDefinition *right)
{
MUrsh *ins = new MUrsh(left, right);
ins->specializeForAsmJS();
ins->canOverflow_ = false;
return ins;
}
MResumePoint *
MResumePoint::New(MBasicBlock *block, jsbytecode *pc, MResumePoint *parent, Mode mode)
{
MResumePoint *resume = new MResumePoint(block, pc, parent, mode);
if (!resume->init())
return NULL;
resume->inherit(block);
return resume;
}
MResumePoint::MResumePoint(MBasicBlock *block, jsbytecode *pc, MResumePoint *caller,
Mode mode)
: MNode(block),
stackDepth_(block->stackDepth()),
pc_(pc),
caller_(caller),
instruction_(NULL),
mode_(mode)
{
block->addResumePoint(this);
}
void
MResumePoint::inherit(MBasicBlock *block)
{
for (size_t i = 0; i < stackDepth(); i++) {
MDefinition *def = block->getSlot(i);
// We have to unwrap MPassArg: it's removed when inlining calls
// and LStackArg does not define a value.
if (def->isPassArg())
def = def->toPassArg()->getArgument();
setOperand(i, def);
}
}
MDefinition *
MToInt32::foldsTo(bool useValueNumbers)
{
MDefinition *input = getOperand(0);
if (input->type() == MIRType_Int32)
return input;
return this;
}
void
MToInt32::analyzeEdgeCasesBackward()
{
if (!NeedNegativeZeroCheck(this))
setCanBeNegativeZero(false);
}
MDefinition *
MTruncateToInt32::foldsTo(bool useValueNumbers)
{
MDefinition *input = getOperand(0);
if (input->type() == MIRType_Int32)
return input;
if (input->type() == MIRType_Double && input->isConstant()) {
const Value &v = input->toConstant()->value();
int32_t ret = ToInt32(v.toDouble());
return MConstant::New(Int32Value(ret));
}
return this;
}
MDefinition *
MToDouble::foldsTo(bool useValueNumbers)
{
if (input()->isConstant()) {
const Value &v = input()->toConstant()->value();
if (v.isNumber()) {
double out = v.toNumber();
return MConstant::New(DoubleValue(out));
}
}
return this;
}
MDefinition *
MToString::foldsTo(bool useValueNumbers)
{
return this;
}
MDefinition *
MClampToUint8::foldsTo(bool useValueNumbers)
{
if (input()->isConstant()) {
const Value &v = input()->toConstant()->value();
if (v.isDouble()) {
int32_t clamped = ClampDoubleToUint8(v.toDouble());
return MConstant::New(Int32Value(clamped));
}
if (v.isInt32()) {
int32_t clamped = ClampIntForUint8Array(v.toInt32());
return MConstant::New(Int32Value(clamped));
}
}
return this;
}
bool
MCompare::tryFold(bool *result)
{
JSOp op = jsop();
if (compareType_ == Compare_Null || compareType_ == Compare_Undefined) {
JS_ASSERT(op == JSOP_EQ || op == JSOP_STRICTEQ ||
op == JSOP_NE || op == JSOP_STRICTNE);
// The LHS is the value we want to test against null or undefined.
switch (lhs()->type()) {
case MIRType_Value:
return false;
case MIRType_Undefined:
case MIRType_Null:
if (lhs()->type() == inputType()) {
// Both sides have the same type, null or undefined.
*result = (op == JSOP_EQ || op == JSOP_STRICTEQ);
} else {
// One side is null, the other side is undefined. The result is only
// true for loose equality.
*result = (op == JSOP_EQ || op == JSOP_STRICTNE);
}
return true;
case MIRType_Object:
if ((op == JSOP_EQ || op == JSOP_NE) && operandMightEmulateUndefined())
return false;
/* FALL THROUGH */
case MIRType_Int32:
case MIRType_Double:
case MIRType_String:
case MIRType_Boolean:
*result = (op == JSOP_NE || op == JSOP_STRICTNE);
return true;
default:
JS_NOT_REACHED("Unexpected type");
return false;
}
}
if (compareType_ == Compare_Boolean) {
JS_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
JS_ASSERT(rhs()->type() == MIRType_Boolean);
switch (lhs()->type()) {
case MIRType_Value:
return false;
case MIRType_Int32:
case MIRType_Double:
case MIRType_String:
case MIRType_Object:
case MIRType_Null:
case MIRType_Undefined:
*result = (op == JSOP_STRICTNE);
return true;
case MIRType_Boolean:
// Int32 specialization should handle this.
JS_NOT_REACHED("Wrong specialization");
return false;
default:
JS_NOT_REACHED("Unexpected type");
return false;
}
}
if (compareType_ == Compare_StrictString) {
JS_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
JS_ASSERT(rhs()->type() == MIRType_String);
switch (lhs()->type()) {
case MIRType_Value:
return false;
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_Object:
case MIRType_Null:
case MIRType_Undefined:
*result = (op == JSOP_STRICTNE);
return true;
case MIRType_String:
// Compare_String specialization should handle this.
JS_NOT_REACHED("Wrong specialization");
return false;
default:
JS_NOT_REACHED("Unexpected type");
return false;
}
}
return false;
}
bool
MCompare::evaluateConstantOperands(bool *result)
{
if (type() != MIRType_Boolean && type() != MIRType_Int32)
return false;
MDefinition *left = getOperand(0);
MDefinition *right = getOperand(1);
if (!left->isConstant() || !right->isConstant())
return false;
Value lhs = left->toConstant()->value();
Value rhs = right->toConstant()->value();
// Fold away some String equality comparisons.
if (lhs.isString() && rhs.isString()) {
int32_t comp = 0; // Default to equal.
if (left != right) {
if (!CompareStrings(GetIonContext()->cx, lhs.toString(), rhs.toString(), &comp))
return false;
}
switch (jsop_) {
case JSOP_LT:
*result = (comp < 0);
break;
case JSOP_LE:
*result = (comp <= 0);
break;
case JSOP_GT:
*result = (comp > 0);
break;
case JSOP_GE:
*result = (comp >= 0);
break;
case JSOP_STRICTEQ: // Fall through.
case JSOP_EQ:
*result = (comp == 0);
break;
case JSOP_STRICTNE: // Fall through.
case JSOP_NE:
*result = (comp != 0);
break;
default:
JS_NOT_REACHED("Unexpected op.");
return false;
}
return true;
}
if (compareType_ == Compare_UInt32) {
uint32_t lhsUint = uint32_t(lhs.toInt32());
uint32_t rhsUint = uint32_t(rhs.toInt32());
switch (jsop_) {
case JSOP_LT:
*result = (lhsUint < rhsUint);
break;
case JSOP_LE:
*result = (lhsUint <= rhsUint);
break;
case JSOP_GT:
*result = (lhsUint > rhsUint);
break;
case JSOP_GE:
*result = (lhsUint >= rhsUint);
break;
case JSOP_EQ:
case JSOP_STRICTEQ:
*result = (lhsUint == rhsUint);
break;
case JSOP_NE:
case JSOP_STRICTNE:
*result = (lhsUint != rhsUint);
break;
default:
JS_NOT_REACHED("Unexpected op.");
return false;
}
return true;
}
if (!lhs.isNumber() || !rhs.isNumber())
return false;
switch (jsop_) {
case JSOP_LT:
*result = (lhs.toNumber() < rhs.toNumber());
break;
case JSOP_LE:
*result = (lhs.toNumber() <= rhs.toNumber());
break;
case JSOP_GT:
*result = (lhs.toNumber() > rhs.toNumber());
break;
case JSOP_GE:
*result = (lhs.toNumber() >= rhs.toNumber());
break;
case JSOP_EQ:
*result = (lhs.toNumber() == rhs.toNumber());
break;
case JSOP_NE:
*result = (lhs.toNumber() != rhs.toNumber());
break;
default:
return false;
}
return true;
}
MDefinition *
MCompare::foldsTo(bool useValueNumbers)
{
bool result;
if (tryFold(&result) || evaluateConstantOperands(&result)) {
if (type() == MIRType_Int32)
return MConstant::New(Int32Value(result));
JS_ASSERT(type() == MIRType_Boolean);
return MConstant::New(BooleanValue(result));
}
return this;
}
void
MNot::infer(JSContext *cx)
{
JS_ASSERT(operandMightEmulateUndefined());
if (!MaybeEmulatesUndefined(cx, getOperand(0)))
markOperandCantEmulateUndefined();
}
MDefinition *
MNot::foldsTo(bool useValueNumbers)
{
// Fold if the input is constant
if (operand()->isConstant()) {
const Value &v = operand()->toConstant()->value();
if (type() == MIRType_Int32)
return MConstant::New(Int32Value(!ToBoolean(v)));
// ToBoolean can cause no side effects, so this is safe.
return MConstant::New(BooleanValue(!ToBoolean(v)));
}
// NOT of an undefined or null value is always true
if (operand()->type() == MIRType_Undefined || operand()->type() == MIRType_Null)
return MConstant::New(BooleanValue(true));
// NOT of an object that can't emulate undefined is always false.
if (operand()->type() == MIRType_Object && !operandMightEmulateUndefined())
return MConstant::New(BooleanValue(false));
return this;
}
bool
MBoundsCheckLower::fallible()
{
return !range() || range()->lower() < minimum_;
}
void
MBeta::printOpcode(FILE *fp)
{
PrintOpcodeName(fp, op());
fprintf(fp, " ");
getOperand(0)->printName(fp);
fprintf(fp, " ");
Sprinter sp(GetIonContext()->cx);
sp.init();
comparison_->print(sp);
fprintf(fp, "%s", sp.string());
}
void
MBeta::computeRange()
{
bool emptyRange = false;
Range *range = Range::intersect(val_->range(), comparison_, &emptyRange);
if (emptyRange) {
IonSpew(IonSpew_Range, "Marking block for inst %d unexitable", id());
block()->setEarlyAbort();
} else {
setRange(range);
}
}
bool
MNewObject::shouldUseVM() const
{
return templateObject()->hasSingletonType() ||
templateObject()->hasDynamicSlots();
}
bool
MNewArray::shouldUseVM() const
{
JS_ASSERT(count() < JSObject::NELEMENTS_LIMIT);
size_t maxArraySlots =
gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER;
// Allocate space using the VMCall
// when mir hints it needs to get allocated immediatly,
// but only when data doesn't fit the available array slots.
bool allocating = isAllocating() && count() > maxArraySlots;
return templateObject()->hasSingletonType() || allocating;
}
bool
MLoadFixedSlot::mightAlias(MDefinition *store)
{
if (store->isStoreFixedSlot() && store->toStoreFixedSlot()->slot() != slot())
return false;
return true;
}
bool
MLoadSlot::mightAlias(MDefinition *store)
{
if (store->isStoreSlot() && store->toStoreSlot()->slot() != slot())
return false;
return true;
}
void
InlinePropertyTable::trimTo(AutoObjectVector &targets, Vector<bool> &choiceSet)
{
for (size_t i = 0; i < targets.length(); i++) {
// If the target was inlined, don't erase the entry.
if (choiceSet[i])
continue;
JSFunction *target = &targets[i]->as<JSFunction>();
// Eliminate all entries containing the vetoed function from the map.
size_t j = 0;
while (j < numEntries()) {
if (entries_[j]->func == target)
entries_.erase(&entries_[j]);
else
j++;
}
}
}
void
InlinePropertyTable::trimToAndMaybePatchTargets(AutoObjectVector &targets,
AutoObjectVector &originals)
{
IonSpew(IonSpew_Inlining, "Got inlineable property cache with %d cases",
(int)numEntries());
size_t i = 0;
while (i < numEntries()) {
bool foundFunc = false;
// Compare using originals, but if we find a matching function,
// patch it to the target, which might be a clone.
for (size_t j = 0; j < originals.length(); j++) {
if (entries_[i]->func == originals[j]) {
if (entries_[i]->func != targets[j])
entries_[i] = new Entry(entries_[i]->typeObj, &targets[j]->as<JSFunction>());
foundFunc = true;
break;
}
}
if (!foundFunc)
entries_.erase(&(entries_[i]));
else
i++;
}
IonSpew(IonSpew_Inlining, "%d inlineable cases left after trimming to %d targets",
(int)numEntries(), (int)targets.length());
}
bool
InlinePropertyTable::hasFunction(JSFunction *func) const
{
for (size_t i = 0; i < numEntries(); i++) {
if (entries_[i]->func == func)
return true;
}
return false;
}
types::StackTypeSet *
InlinePropertyTable::buildTypeSetForFunction(JSFunction *func) const
{
LifoAlloc *alloc = GetIonContext()->temp->lifoAlloc();
types::StackTypeSet *types = alloc->new_<types::StackTypeSet>();
if (!types)
return NULL;
for (size_t i = 0; i < numEntries(); i++) {
if (entries_[i]->func == func) {
if (!types->addObject(types::Type::ObjectType(entries_[i]->typeObj).objectKey(), alloc))
return NULL;
}
}
return types;
}
bool
MInArray::needsNegativeIntCheck() const
{
return !index()->range() || index()->range()->lower() < 0;
}
bool
MLoadElementHole::needsNegativeIntCheck() const
{
return !index()->range() || index()->range()->lower() < 0;
}
void *
MLoadTypedArrayElementStatic::base() const
{
return TypedArray::viewData(typedArray_);
}
size_t
MLoadTypedArrayElementStatic::length() const
{
return TypedArray::byteLength(typedArray_);
}
void *
MStoreTypedArrayElementStatic::base() const
{
return TypedArray::viewData(typedArray_);
}
size_t
MStoreTypedArrayElementStatic::length() const
{
return TypedArray::byteLength(typedArray_);
}
bool
MGetPropertyPolymorphic::mightAlias(MDefinition *store)
{
// Allow hoisting this instruction if the store does not write to a
// slot read by this instruction.
if (!store->isStoreFixedSlot() && !store->isStoreSlot())
return true;
for (size_t i = 0; i < numShapes(); i++) {
Shape *shape = this->shape(i);
if (shape->slot() < shape->numFixedSlots()) {
// Fixed slot.
uint32_t slot = shape->slot();
if (store->isStoreFixedSlot() && store->toStoreFixedSlot()->slot() != slot)
continue;
if (store->isStoreSlot())
continue;
} else {
// Dynamic slot.
uint32_t slot = shape->slot() - shape->numFixedSlots();
if (store->isStoreSlot() && store->toStoreSlot()->slot() != slot)
continue;
if (store->isStoreFixedSlot())
continue;
}
return true;
}
return false;
}
MDefinition *
MAsmJSUnsignedToDouble::foldsTo(bool useValueNumbers)
{
if (input()->isConstant()) {
const Value &v = input()->toConstant()->value();
if (v.isInt32())
return MConstant::New(DoubleValue(uint32_t(v.toInt32())));
}
return this;
}
MAsmJSCall *
MAsmJSCall::New(Callee callee, const Args &args, MIRType resultType, size_t spIncrement)
{
MAsmJSCall *call = new MAsmJSCall;
call->spIncrement_ = spIncrement;
call->callee_ = callee;
call->setResultType(resultType);
call->numArgs_ = args.length();
call->argRegs_ = (AnyRegister *)GetIonContext()->temp->allocate(call->numArgs_ * sizeof(AnyRegister));
if (!call->argRegs_)
return NULL;
for (size_t i = 0; i < call->numArgs_; i++)
call->argRegs_[i] = args[i].reg;
call->numOperands_ = call->numArgs_ + (callee.which() == Callee::Dynamic ? 1 : 0);
call->operands_ = (MUse *)GetIonContext()->temp->allocate(call->numOperands_ * sizeof(MUse));
if (!call->operands_)
return NULL;
for (size_t i = 0; i < call->numArgs_; i++)
call->setOperand(i, args[i].def);
if (callee.which() == Callee::Dynamic)
call->setOperand(call->numArgs_, callee.dynamic());
return call;
}
bool
jit::ElementAccessIsDenseNative(MDefinition *obj, MDefinition *id)
{
if (obj->mightBeType(MIRType_String))
return false;
if (id->type() != MIRType_Int32 && id->type() != MIRType_Double)
return false;
types::StackTypeSet *types = obj->resultTypeSet();
if (!types)
return false;
Class *clasp = types->getKnownClass();
return clasp && clasp->isNative();
}
bool
jit::ElementAccessIsTypedArray(MDefinition *obj, MDefinition *id, int *arrayType)
{
if (obj->mightBeType(MIRType_String))
return false;
if (id->type() != MIRType_Int32 && id->type() != MIRType_Double)
return false;
types::StackTypeSet *types = obj->resultTypeSet();
if (!types)
return false;
*arrayType = types->getTypedArrayType();
return *arrayType != TypedArray::TYPE_MAX;
}
bool
jit::ElementAccessIsPacked(JSContext *cx, MDefinition *obj)
{
types::StackTypeSet *types = obj->resultTypeSet();
return types && !types->hasObjectFlags(cx, types::OBJECT_FLAG_NON_PACKED);
}
bool
jit::ElementAccessHasExtraIndexedProperty(JSContext *cx, MDefinition *obj)
{
types::StackTypeSet *types = obj->resultTypeSet();
if (!types || types->hasObjectFlags(cx, types::OBJECT_FLAG_LENGTH_OVERFLOW))
return true;
return types::TypeCanHaveExtraIndexedProperties(cx, types);
}
MIRType
jit::DenseNativeElementType(JSContext *cx, MDefinition *obj)
{
types::StackTypeSet *types = obj->resultTypeSet();
MIRType elementType = MIRType_None;
unsigned count = types->getObjectCount();
for (unsigned i = 0; i < count; i++) {
if (types::TypeObject *object = types->getTypeOrSingleObject(cx, i)) {
if (object->unknownProperties())
return MIRType_None;
types::HeapTypeSet *elementTypes = object->getProperty(cx, JSID_VOID, false);
if (!elementTypes)
return MIRType_None;
MIRType type = MIRTypeFromValueType(elementTypes->getKnownTypeTag(cx));
if (type == MIRType_None)
return MIRType_None;
if (elementType == MIRType_None)
elementType = type;
else if (elementType != type)
return MIRType_None;
}
}
return elementType;
}
bool
jit::PropertyReadNeedsTypeBarrier(JSContext *cx, types::TypeObject *object, PropertyName *name,
types::StackTypeSet *observed, bool updateObserved)
{
// If the object being read from has types for the property which haven't
// been observed at this access site, the read could produce a new type and
// a barrier is needed. Note that this only covers reads from properties
// which are accounted for by type information, i.e. native data properties
// and elements.
if (object->unknownProperties())
return true;
jsid id = name ? types::IdToTypeId(NameToId(name)) : JSID_VOID;
// If this access has never executed, try to add types to the observed set
// according to any property which exists on the object or its prototype.
if (updateObserved && observed->empty() && observed->noConstraints() && !JSID_IS_VOID(id)) {
JSObject *obj = object->singleton ? object->singleton : object->proto;
while (obj) {
if (!obj->isNative())
break;
Value v;
if (HasDataProperty(cx, obj, id, &v)) {
if (v.isUndefined())
break;
observed->addType(cx, types::GetValueType(cx, v));
}
obj = obj->getProto();
}
}
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (!property)
return true;
// We need to consider possible types for the property both as an 'own'
// property on the object and as inherited from any prototype. Type sets
// for a property do not, however, reflect inherited types until a
// getFromPrototypes() call has been performed.
if (!property->hasPropagatedProperty())
object->getFromPrototypes(cx, id, property);
if (!TypeSetIncludes(observed, MIRType_Value, property))
return true;
// Type information for singleton objects is not required to reflect the
// initial 'undefined' value for native properties, in particular global
// variables declared with 'var'. Until the property is assigned a value
// other than undefined, a barrier is required.
if (name && object->singleton && object->singleton->isNative()) {
Shape *shape = object->singleton->nativeLookup(cx, name);
if (shape &&
shape->hasDefaultGetter() &&
object->singleton->nativeGetSlot(shape->slot()).isUndefined())
{
return true;
}
}
property->addFreeze(cx);
return false;
}
bool
jit::PropertyReadNeedsTypeBarrier(JSContext *cx, MDefinition *obj, PropertyName *name,
types::StackTypeSet *observed)
{
if (observed->unknown())
return false;
types::TypeSet *types = obj->resultTypeSet();
if (!types || types->unknownObject())
return true;
bool updateObserved = types->getObjectCount() == 1;
for (size_t i = 0; i < types->getObjectCount(); i++) {
types::TypeObject *object = types->getTypeOrSingleObject(cx, i);
if (object && PropertyReadNeedsTypeBarrier(cx, object, name, observed, updateObserved))
return true;
}
return false;
}
bool
jit::PropertyReadIsIdempotent(JSContext *cx, MDefinition *obj, PropertyName *name)
{
// Determine if reading a property from obj is likely to be idempotent.
jsid id = types::IdToTypeId(NameToId(name));
types::TypeSet *types = obj->resultTypeSet();
if (!types || types->unknownObject())
return false;
for (size_t i = 0; i < types->getObjectCount(); i++) {
if (types::TypeObject *object = types->getTypeOrSingleObject(cx, i)) {
if (object->unknownProperties())
return false;
// Check if the property has been reconfigured or is a getter.
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (!property || property->isOwnProperty(cx, object, true))
return false;
}
}
return true;
}
void
jit::AddObjectsForPropertyRead(JSContext *cx, MDefinition *obj, PropertyName *name,
types::StackTypeSet *observed)
{
// Add objects to observed which *could* be observed by reading name from obj,
// to hopefully avoid unnecessary type barriers and code invalidations.
JS_ASSERT(observed->noConstraints());
types::StackTypeSet *types = obj->resultTypeSet();
if (!types || types->unknownObject()) {
observed->addType(cx, types::Type::AnyObjectType());
return;
}
jsid id = name ? types::IdToTypeId(NameToId(name)) : JSID_VOID;
for (size_t i = 0; i < types->getObjectCount(); i++) {
types::TypeObject *object = types->getTypeOrSingleObject(cx, i);
if (!object)
continue;
if (object->unknownProperties()) {
observed->addType(cx, types::Type::AnyObjectType());
return;
}
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (property->unknownObject()) {
observed->addType(cx, types::Type::AnyObjectType());
return;
}
for (size_t i = 0; i < property->getObjectCount(); i++) {
if (types::TypeObject *object = property->getTypeObject(i))
observed->addType(cx, types::Type::ObjectType(object));
else if (JSObject *object = property->getSingleObject(i))
observed->addType(cx, types::Type::ObjectType(object));
}
}
}
static bool
TryAddTypeBarrierForWrite(JSContext *cx, MBasicBlock *current, types::StackTypeSet *objTypes,
jsid id, MDefinition **pvalue)
{
// Return whether pvalue was modified to include a type barrier ensuring
// that writing the value to objTypes/id will not require changing type
// information.
// All objects in the set must have the same types for id. Otherwise, we
// could bail out without subsequently triggering a type change that
// invalidates the compiled code.
types::HeapTypeSet *aggregateProperty = NULL;
for (size_t i = 0; i < objTypes->getObjectCount(); i++) {
types::TypeObject *object = objTypes->getTypeOrSingleObject(cx, i);
if (!object)
continue;
if (object->unknownProperties())
return false;
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (!property)
return false;
if (TypeSetIncludes(property, (*pvalue)->type(), (*pvalue)->resultTypeSet()))
return false;
// This freeze is not required for correctness, but ensures that we
// will recompile if the property types change and the barrier can
// potentially be removed.
property->addFreeze(cx);
if (aggregateProperty) {
if (!aggregateProperty->isSubset(property) || !property->isSubset(aggregateProperty))
return false;
} else {
aggregateProperty = property;
}
}
JS_ASSERT(aggregateProperty);
MIRType propertyType = MIRTypeFromValueType(aggregateProperty->getKnownTypeTag(cx));
switch (propertyType) {
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_String: {
// The property is a particular primitive type, guard by unboxing the
// value before the write.
if ((*pvalue)->type() != MIRType_Value) {
// The value is a different primitive, just do a VM call as it will
// always trigger invalidation of the compiled code.
JS_ASSERT((*pvalue)->type() != propertyType);
return false;
}
MInstruction *ins = MUnbox::New(*pvalue, propertyType, MUnbox::Fallible);
current->add(ins);
*pvalue = ins;
return true;
}
default:;
}
if ((*pvalue)->type() != MIRType_Value)
return false;
types::StackTypeSet *types = aggregateProperty->clone(GetIonContext()->temp->lifoAlloc());
if (!types)
return false;
MInstruction *ins = MMonitorTypes::New(*pvalue, types);
current->add(ins);
return true;
}
static MInstruction *
AddTypeGuard(MBasicBlock *current, MDefinition *obj, types::TypeObject *typeObject,
bool bailOnEquality)
{
MGuardObjectType *guard = MGuardObjectType::New(obj, typeObject, bailOnEquality);
current->add(guard);
// For now, never move type object guards.
guard->setNotMovable();
return guard;
}
bool
jit::PropertyWriteNeedsTypeBarrier(JSContext *cx, MBasicBlock *current, MDefinition **pobj,
PropertyName *name, MDefinition **pvalue, bool canModify)
{
// If any value being written is not reflected in the type information for
// objects which obj could represent, a type barrier is needed when writing
// the value. As for propertyReadNeedsTypeBarrier, this only applies for
// properties that are accounted for by type information, i.e. normal data
// properties and elements.
types::StackTypeSet *types = (*pobj)->resultTypeSet();
if (!types || types->unknownObject())
return true;
jsid id = name ? types::IdToTypeId(NameToId(name)) : JSID_VOID;
// If all of the objects being written to have property types which already
// reflect the value, no barrier at all is needed. Additionally, if all
// objects being written to have the same types for the property, and those
// types do *not* reflect the value, add a type barrier for the value.
bool success = true;
for (size_t i = 0; i < types->getObjectCount(); i++) {
types::TypeObject *object = types->getTypeOrSingleObject(cx, i);
if (!object || object->unknownProperties())
continue;
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (!property) {
success = false;
break;
}
if (!TypeSetIncludes(property, (*pvalue)->type(), (*pvalue)->resultTypeSet())) {
// Either pobj or pvalue needs to be modified to filter out the
// types which the value could have but are not in the property,
// or a VM call is required. A VM call is always required if pobj
// and pvalue cannot be modified.
if (!canModify)
return true;
success = TryAddTypeBarrierForWrite(cx, current, types, id, pvalue);
break;
}
}
if (success)
return false;
// If all of the objects except one have property types which reflect the
// value, and the remaining object has no types at all for the property,
// add a guard that the object does not have that remaining object's type.
if (types->getObjectCount() <= 1)
return true;
types::TypeObject *excluded = NULL;
for (size_t i = 0; i < types->getObjectCount(); i++) {
types::TypeObject *object = types->getTypeOrSingleObject(cx, i);
if (!object || object->unknownProperties())
continue;
types::HeapTypeSet *property = object->getProperty(cx, id, false);
if (!property)
return true;
if (TypeSetIncludes(property, (*pvalue)->type(), (*pvalue)->resultTypeSet()))
continue;
if (!property->empty() || excluded)
return true;
excluded = object;
}
JS_ASSERT(excluded);
*pobj = AddTypeGuard(current, *pobj, excluded, /* bailOnEquality = */ true);
return false;
}
| mpl-2.0 |
bytekast/cdf | core-js/src/test/javascript/cdf/queries/CpkQuery-spec.js | 1657 | /*!
* Copyright 2002 - 2015 Webdetails, a Pentaho company. All rights reserved.
*
* This software was developed by Webdetails and is provided under the terms
* of the Mozilla Public License, Version 2.0, or any later version. You may not use
* this file except in compliance with the license. If you need a copy of the license,
* please go to http://mozilla.org/MPL/2.0/. The Initial Developer is Webdetails.
*
* Software distributed under the Mozilla Public License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. Please refer to
* the license for the specific language governing your rights and limitations.
*/
define(["cdf/Dashboard.Clean"], function(Dashboard) {
var unprocessedData = {data: 0},
processedData = {data: [1, 2, 3]},
dashboard,
cpkQuery;
beforeEach(function() {
dashboard = new Dashboard();
dashboard.init();
cpkQuery = dashboard.getQuery("cpk", {});
});
/**
* ## CPK query #
*/
describe("CPK query #", function() {
/**
* ## CPK query # getSuccessHandler
*/
describe("CPK query # getSuccessHandler", function() {
/**
* ## CPK query # getSuccessHandler persists the last result and the post fetch processed result
*/
it("persists the last result and the post fetch processed result", function() {
cpkQuery.getSuccessHandler(function(data) { return processedData; })(unprocessedData);
expect(cpkQuery.getOption("lastResultSet")).toEqual(unprocessedData);
expect(cpkQuery.getOption("lastProcessedResultSet")).toEqual(processedData);
});
});
});
});
| mpl-2.0 |
talwai/consul | consul/config.go | 11405 | package consul
import (
"fmt"
"io"
"net"
"os"
"time"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
const (
DefaultDC = "dc1"
DefaultLANSerfPort = 8301
DefaultWANSerfPort = 8302
)
var (
DefaultRPCAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 8300}
)
// ProtocolVersionMap is the mapping of Consul protocol versions
// to Serf protocol versions. We mask the Serf protocols using
// our own protocol version.
var protocolVersionMap map[uint8]uint8
func init() {
protocolVersionMap = map[uint8]uint8{
1: 4,
2: 4,
3: 4,
}
}
// Config is used to configure the server
type Config struct {
// Bootstrap mode is used to bring up the first Consul server.
// It is required so that it can elect a leader without any
// other nodes being present
Bootstrap bool
// BootstrapExpect mode is used to automatically bring up a collection of
// Consul servers. This can be used to automatically bring up a collection
// of nodes.
BootstrapExpect int
// Datacenter is the datacenter this Consul server represents
Datacenter string
// DataDir is the directory to store our state in
DataDir string
// Node name is the name we use to advertise. Defaults to hostname.
NodeName string
// Domain is the DNS domain for the records. Defaults to "consul."
Domain string
// RaftConfig is the configuration used for Raft in the local DC
RaftConfig *raft.Config
// RPCAddr is the RPC address used by Consul. This should be reachable
// by the WAN and LAN
RPCAddr *net.TCPAddr
// RPCAdvertise is the address that is advertised to other nodes for
// the RPC endpoint. This can differ from the RPC address, if for example
// the RPCAddr is unspecified "0.0.0.0:8300", but this address must be
// reachable
RPCAdvertise *net.TCPAddr
// SerfLANConfig is the configuration for the intra-dc serf
SerfLANConfig *serf.Config
// SerfWANConfig is the configuration for the cross-dc serf
SerfWANConfig *serf.Config
// ReconcileInterval controls how often we reconcile the strongly
// consistent store with the Serf info. This is used to handle nodes
// that are force removed, as well as intermittent unavailability during
// leader election.
ReconcileInterval time.Duration
// LogOutput is the location to write logs to. If this is not set,
// logs will go to stderr.
LogOutput io.Writer
// ProtocolVersion is the protocol version to speak. This must be between
// ProtocolVersionMin and ProtocolVersionMax.
ProtocolVersion uint8
// VerifyIncoming is used to verify the authenticity of incoming connections.
// This means that TCP requests are forbidden, only allowing for TLS. TLS connections
// must match a provided certificate authority. This can be used to force client auth.
VerifyIncoming bool
// VerifyOutgoing is used to verify the authenticity of outgoing connections.
// This means that TLS requests are used, and TCP requests are not made. TLS connections
// must match a provided certificate authority. This is used to verify authenticity of
// server nodes.
VerifyOutgoing bool
// VerifyServerHostname is used to enable hostname verification of servers. This
// ensures that the certificate presented is valid for server.<datacenter>.<domain>.
// This prevents a compromised client from being restarted as a server, and then
// intercepting request traffic as well as being added as a raft peer. This should be
// enabled by default with VerifyOutgoing, but for legacy reasons we cannot break
// existing clients.
VerifyServerHostname bool
// CAFile is a path to a certificate authority file. This is used with VerifyIncoming
// or VerifyOutgoing to verify the TLS connection.
CAFile string
// CertFile is used to provide a TLS certificate that is used for serving TLS connections.
// Must be provided to serve TLS connections.
CertFile string
// KeyFile is used to provide a TLS key that is used for serving TLS connections.
// Must be provided to serve TLS connections.
KeyFile string
// ServerName is used with the TLS certificate to ensure the name we
// provide matches the certificate
ServerName string
// RejoinAfterLeave controls our interaction with Serf.
// When set to false (default), a leave causes a Consul to not rejoin
// the cluster until an explicit join is received. If this is set to
// true, we ignore the leave, and rejoin the cluster on start.
RejoinAfterLeave bool
// Build is a string that is gossiped around, and can be used to help
// operators track which versions are actively deployed
Build string
// ACLToken is the default token to use when making a request.
// If not provided, the anonymous token is used. This enables
// backwards compatibility as well.
ACLToken string
// ACLMasterToken is used to bootstrap the ACL system. It should be specified
// on the servers in the ACLDatacenter. When the leader comes online, it ensures
// that the Master token is available. This provides the initial token.
ACLMasterToken string
// ACLDatacenter provides the authoritative datacenter for ACL
// tokens. If not provided, ACL verification is disabled.
ACLDatacenter string
// ACLTTL controls the time-to-live of cached ACL policies.
// It can be set to zero to disable caching, but this adds
// a substantial cost.
ACLTTL time.Duration
// ACLDefaultPolicy is used to control the ACL interaction when
// there is no defined policy. This can be "allow" which means
// ACLs are used to black-list, or "deny" which means ACLs are
// white-lists.
ACLDefaultPolicy string
// ACLDownPolicy controls the behavior of ACLs if the ACLDatacenter
// cannot be contacted. It can be either "deny" to deny all requests,
// or "extend-cache" which ignores the ACLCacheInterval and uses
// cached policies. If a policy is not in the cache, it acts like deny.
// "allow" can be used to allow all requests. This is not recommended.
ACLDownPolicy string
// TombstoneTTL is used to control how long KV tombstones are retained.
// This provides a window of time where the X-Consul-Index is monotonic.
// Outside this window, the index may not be monotonic. This is a result
// of a few trade offs:
// 1) The index is defined by the data view and not globally. This is a
// performance optimization that prevents any write from incrementing the
// index for all data views.
// 2) Tombstones are not kept indefinitely, since otherwise storage required
// is also monotonic. This prevents deletes from reducing the disk space
// used.
// In theory, neither of these are intrinsic limitations, however for the
// purposes of building a practical system, they are reasonable trade offs.
//
// It is also possible to set this to an incredibly long time, thereby
// simulating infinite retention. This is not recommended however.
//
TombstoneTTL time.Duration
// TombstoneTTLGranularity is used to control how granular the timers are
// for the Tombstone GC. This is used to batch the GC of many keys together
// to reduce overhead. It is unlikely a user would ever need to tune this.
TombstoneTTLGranularity time.Duration
// Minimum Session TTL
SessionTTLMin time.Duration
// ServerUp callback can be used to trigger a notification that
// a Consul server is now up and known about.
ServerUp func()
// UserEventHandler callback can be used to handle incoming
// user events. This function should not block.
UserEventHandler func(serf.UserEvent)
// DisableCoordinates controls features related to network coordinates.
DisableCoordinates bool
// CoordinateUpdatePeriod controls how long a server batches coordinate
// updates before applying them in a Raft transaction. A larger period
// leads to fewer Raft transactions, but also the stored coordinates
// being more stale.
CoordinateUpdatePeriod time.Duration
// CoordinateUpdateBatchSize controls the maximum number of updates a
// server batches before applying them in a Raft transaction.
CoordinateUpdateBatchSize int
// CoordinateUpdateMaxBatches controls the maximum number of batches we
// are willing to apply in one period. After this limit we will issue a
// warning and discard the remaining updates.
CoordinateUpdateMaxBatches int
}
// CheckVersion is used to check if the ProtocolVersion is valid
func (c *Config) CheckVersion() error {
if c.ProtocolVersion < ProtocolVersionMin {
return fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
} else if c.ProtocolVersion > ProtocolVersionMax {
return fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]",
c.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)
}
return nil
}
// CheckACL is used to sanity check the ACL configuration
func (c *Config) CheckACL() error {
switch c.ACLDefaultPolicy {
case "allow":
case "deny":
default:
return fmt.Errorf("Unsupported default ACL policy: %s", c.ACLDefaultPolicy)
}
switch c.ACLDownPolicy {
case "allow":
case "deny":
case "extend-cache":
default:
return fmt.Errorf("Unsupported down ACL policy: %s", c.ACLDownPolicy)
}
return nil
}
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
conf := &Config{
Datacenter: DefaultDC,
NodeName: hostname,
RPCAddr: DefaultRPCAddr,
RaftConfig: raft.DefaultConfig(),
SerfLANConfig: serf.DefaultConfig(),
SerfWANConfig: serf.DefaultConfig(),
ReconcileInterval: 60 * time.Second,
ProtocolVersion: ProtocolVersion2Compatible,
ACLTTL: 30 * time.Second,
ACLDefaultPolicy: "allow",
ACLDownPolicy: "extend-cache",
TombstoneTTL: 15 * time.Minute,
TombstoneTTLGranularity: 30 * time.Second,
SessionTTLMin: 10 * time.Second,
DisableCoordinates: false,
// These are tuned to provide a total throughput of 128 updates
// per second. If you update these, you should update the client-
// side SyncCoordinateRateTarget parameter accordingly.
CoordinateUpdatePeriod: 5 * time.Second,
CoordinateUpdateBatchSize: 128,
CoordinateUpdateMaxBatches: 5,
}
// Increase our reap interval to 3 days instead of 24h.
conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour
// WAN Serf should use the WAN timing, since we are using it
// to communicate between DC's
conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()
// Ensure we don't have port conflicts
conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort
// Disable shutdown on removal
conf.RaftConfig.ShutdownOnRemove = false
return conf
}
func (c *Config) tlsConfig() *tlsutil.Config {
tlsConf := &tlsutil.Config{
VerifyIncoming: c.VerifyIncoming,
VerifyOutgoing: c.VerifyOutgoing,
VerifyServerHostname: c.VerifyServerHostname,
CAFile: c.CAFile,
CertFile: c.CertFile,
KeyFile: c.KeyFile,
NodeName: c.NodeName,
ServerName: c.ServerName,
Domain: c.Domain,
}
return tlsConf
}
| mpl-2.0 |
kalwar/openelisglobal-core | app/src/us/mn/state/health/lims/patient/saving/PatientEntryAfterSampleEntry.java | 1716 | /*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations under
* the License.
*
* The Original Code is OpenELIS code.
*
* Copyright (C) The Minnesota Department of Health. All Rights Reserved.
*
* Contributor(s): CIRG, University of Washington, Seattle WA.
*/
/**
* Côte d'Ivoire
* @author pahill
* @since 2010-06-15
**/
package us.mn.state.health.lims.patient.saving;
import static us.mn.state.health.lims.common.services.StatusService.RecordStatus.NotRegistered;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.beanutils.DynaBean;
import us.mn.state.health.lims.common.services.StatusService.RecordStatus;
public class PatientEntryAfterSampleEntry extends PatientEntry {
public PatientEntryAfterSampleEntry(DynaBean dynaBean, String sysUserId, HttpServletRequest request) throws Exception {
super(dynaBean, sysUserId, request);
this.newPatientStatus = RecordStatus.InitialRegistration;
this.newSampleStatus = null; // leave it be
}
/**
* An existing not registered patient with the sample already somewhere else
*/
@Override
public boolean canAccession() {
return (NotRegistered == statusSet.getPatientRecordStatus() &&
NotRegistered != statusSet.getSampleRecordStatus());
}
}
| mpl-2.0 |
DarkPrince304/MozDef | rest/plugins/cymon.py | 3350 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jeff Bryner jbryner@mozilla.com
import requests
import json
import os
import sys
from configlib import getConfig, OptionParser
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings to match with an rest endpoint
(i.e. blockip matches /blockip)
set the priority if you have a preference for order of plugins
0 goes first, 100 is assumed/default if not sent
Plugins will register in Meteor with attributes:
name: (as below)
description: (as below)
priority: (as below)
file: "plugins.filename" where filename.py is the plugin code.
Plugin gets sent main rest options as:
self.restoptions
self.restoptions['configfile'] will be the .conf file
used by the restapi's index.py file.
'''
self.registration = ['ipintel']
self.priority = 5
self.name = "cymon"
self.description = "IP intel from the cymon.io api"
# set my own conf file
# relative path to the rest index.py file
self.configfile = './plugins/cymon.conf'
self.options = None
if os.path.exists(self.configfile):
sys.stdout.write('found conf file {0}\n'.format(self.configfile))
self.initConfiguration()
def onMessage(self, request, response):
'''
request: http://bottlepy.org/docs/dev/api.html#the-request-object
response: http://bottlepy.org/docs/dev/api.html#the-response-object
'''
if request.body:
arequest = request.body.read()
request.body.close()
try:
requestDict = json.loads(arequest)
except ValueError as e:
response.status = 500
print(requestDict, requestDict.keys())
if 'ipaddress' in requestDict.keys():
url="https://cymon.io/api/nexus/v1/ip/{0}/timeline?combined=true&format=json".format(requestDict['ipaddress'])
# add the cymon api key?
if self.options is not None:
headers = {'Authorization': 'Token {0}'.format(self.options.cymonapikey)}
else:
headers = None
dresponse = requests.get(url, headers=headers)
if dresponse.status_code == 200:
response.content_type = "application/json"
response.body = dresponse.content
else:
response.status = dresponse.status_code
else:
response.status = 500
return (request, response)
def initConfiguration(self):
myparser = OptionParser()
# setup self.options by sending empty list [] to parse_args
(self.options, args) = myparser.parse_args([])
# fill self.options with plugin-specific options
# cymon options
self.options.cymonapikey = getConfig('cymonapikey',
'',
self.configfile)
| mpl-2.0 |
akhan7/servo | tests/unit/layout/size_of.rs | 1837 | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use layout::Fragment;
use layout::ServoThreadSafeLayoutNode;
use std::mem::size_of;
#[test]
fn test_size_of_fragment() {
let expected = 168;
let actual = size_of::<Fragment>();
if actual < expected {
panic!("Your changes have decreased the stack size of layout::fragment::Fragment \
from {} to {}. Good work! Please update the size in tests/unit/layout/size_of.rs",
expected, actual);
}
if actual > expected {
panic!("Your changes have increased the stack size of layout::fragment::Fragment \
from {} to {}. Please consider choosing a design which avoids this increase. \
If you feel that the increase is necessary, update the size in \
tests/unit/layout/size_of.rs.",
expected, actual);
}
}
#[test]
fn test_size_of_layout_node() {
let expected = 16;
let actual = size_of::<ServoThreadSafeLayoutNode>();
if actual < expected {
panic!("Your changes have decreased the stack size of layout::wrapper::ServoThreadSafeLayoutNode \
from {} to {}. Good work! Please update the size in tests/layout/unit/size_of.rs",
expected, actual);
}
if actual > expected {
panic!("Your changes have increased the stack size of layout::wrapper::ServoThreadSafeLayoutNode \
from {} to {}. Please consider choosing a design which avoids this increase. \
If you feel that the increase is necessary, update the size in \
tests/unit/layout/size_of.rs.",
expected, actual);
}
}
| mpl-2.0 |
kuali/kc | coeus-impl/src/main/java/org/kuali/kra/iacuc/procedures/IacucProtocolProcedureServiceImpl.java | 77837 | /*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2016 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.kra.iacuc.procedures;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.kuali.kra.iacuc.IacucPersonTraining;
import org.kuali.kra.iacuc.IacucProcedureCategoryCustomData;
import org.kuali.kra.iacuc.IacucProtocol;
import org.kuali.kra.iacuc.IacucProtocolForm;
import org.kuali.kra.iacuc.IacucSpecies;
import org.kuali.kra.iacuc.personnel.IacucProtocolPerson;
import org.kuali.kra.iacuc.personnel.IacucProtocolPersonTrainingService;
import org.kuali.kra.iacuc.species.IacucProtocolSpecies;
import org.kuali.kra.iacuc.species.IacucProtocolSpeciesService;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.kra.protocol.personnel.ProtocolPersonBase;
import org.kuali.rice.coreservice.framework.parameter.ParameterConstants;
import org.kuali.rice.coreservice.framework.parameter.ParameterService;
import org.kuali.rice.kns.util.KNSGlobalVariables;
import org.kuali.rice.krad.service.BusinessObjectService;
import org.kuali.rice.krad.service.SequenceAccessorService;
import org.kuali.rice.krad.util.ObjectUtils;
public class IacucProtocolProcedureServiceImpl implements IacucProtocolProcedureService {
private BusinessObjectService businessObjectService;
private ParameterService parameterService;
private IacucProtocolPersonTrainingService iacucProtocolPersonTrainingService;
private static final String PROTOCOL_STUDY_GROUP_HEADER_SEQUENCE_ID = "SEQ_IACUC_PROT_STUD_GRP_HDR_ID";
private static final String PROCEDURE_VIEW_MODE = "PROCEDURE_VIEW_MODE";
private static final String PROCEDURE_VIEW_MODE_SPECIES = "S";
private IacucProtocolSpeciesService iacucProtocolSpeciesService;
private SequenceAccessorService sequenceAccessorService;
@Override
public List<IacucProcedure> getAllProcedures() {
return (List<IacucProcedure>)getBusinessObjectService().findAllOrderBy(IacucProcedure.class, "procedureCategoryCode", true);
}
@Override
public List<IacucProcedureCategory> getAllProcedureCategories() {
return (List<IacucProcedureCategory>)getBusinessObjectService().findAllOrderBy(IacucProcedureCategory.class, "procedureCategoryCode", true);
}
@Override
@SuppressWarnings("deprecation")
public List<IacucProtocolSpecies> getProtocolSpecies() {
Long protocolId = ((IacucProtocolForm) KNSGlobalVariables.getKualiForm()).getIacucProtocolDocument().getProtocol().getProtocolId();
Map<String, Object> keyMap = new HashMap<String, Object> ();
keyMap.put("protocolId", protocolId);
return (List<IacucProtocolSpecies>) getBusinessObjectService().findMatching(IacucProtocolSpecies.class, keyMap);
}
@Override
public List<IacucProtocolStudyGroupBean> getRevisedStudyGroupBeans(IacucProtocol iacucProtocol, List<IacucProcedure> allProcedures) {
List<IacucProtocolStudyGroupBean> studyGroupBeans = iacucProtocol.getIacucProtocolStudyGroupBeans();
if(studyGroupBeans.isEmpty()) {
studyGroupBeans = getNewListOfStudyGroupBeans(iacucProtocol, allProcedures);
}
if(isProcedureViewedBySpecies()) {
groupProcedureStudyBySpecies(iacucProtocol);
}
return studyGroupBeans;
}
/**
* This method is to check if a procedure is persisted.
* @param allProcedures
* @param selectedProcedureCode
*/
private void selectUsedProcedureCategory(List<IacucProcedure> allProcedures, Integer selectedProcedureCode) {
for(IacucProcedure iacucProcedure : allProcedures) {
if(iacucProcedure.getProcedureCode().equals(selectedProcedureCode)) {
iacucProcedure.setProcedureSelected(true);
break;
}
}
}
@Override
public List<IacucPersonTraining> getIacucPersonTrainingDetails(String personId) {
return getIacucProtocolPersonTrainingService().getIacucPersonTrainingDetails(personId);
}
@Override
public void setTrainingDetails(IacucProtocol protocol) {
for(ProtocolPersonBase protocolPerson : protocol.getProtocolPersons()) {
IacucProtocolPerson iacucProtocolPerson = (IacucProtocolPerson)protocolPerson;
iacucProtocolPerson.setIacucPersonTrainings(getIacucPersonTrainingDetails(iacucProtocolPerson.getPersonId()));
}
}
/**
* This method is to get all procedure categories and format study group bean to display in ui
* A study group bean is created for each procedure/category code
* @return
*/
private List<IacucProtocolStudyGroupBean> getNewListOfStudyGroupBeans(IacucProtocol protocol, List<IacucProcedure> allProcedures) {
List<IacucProtocolStudyGroupBean> studyGroupBeans = new ArrayList<IacucProtocolStudyGroupBean>();
for(IacucProcedure iacucProcedure : getAllProcedures()) {
IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean = getCurrentStudyGroupForProcedure(allProcedures, protocol, iacucProcedure);
if(ObjectUtils.isNull(iacucProtocolStudyGroupBean)) {
iacucProtocolStudyGroupBean = new IacucProtocolStudyGroupBean();
iacucProtocolStudyGroupBean.setProcedureCategoryCode(iacucProcedure.getProcedureCategoryCode());
iacucProtocolStudyGroupBean.setProcedureCode(iacucProcedure.getProcedureCode());
}
studyGroupBeans.add(iacucProtocolStudyGroupBean);
}
return studyGroupBeans;
}
/**
* This method is to get matching study group for a given procedure
* @param protocol
* @param iacucProcedure
* @return
*/
private IacucProtocolStudyGroupBean getCurrentStudyGroupForProcedure(List<IacucProcedure> allProcedures, IacucProtocol protocol, IacucProcedure iacucProcedure) {
List<IacucProtocolStudyGroupBean> iacucProtocolStudyGroups = protocol.getIacucProtocolStudyGroups();
IacucProtocolStudyGroupBean currentStudyGroup = null;
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroup : iacucProtocolStudyGroups) {
if(iacucProtocolStudyGroup.getProcedureCategoryCode().equals(iacucProcedure.getProcedureCategoryCode()) &&
iacucProtocolStudyGroup.getProcedureCode().equals(iacucProcedure.getProcedureCode())) {
currentStudyGroup = iacucProtocolStudyGroup;
selectUsedProcedureCategory(allProcedures, iacucProtocolStudyGroup.getProcedureCode());
break;
}
}
return currentStudyGroup;
}
/**
* This method is to get custom data attributes for each procedure category
* @return
*/
private List<IacucProcedureCategoryCustomData> getIacucProcedureCustomData(Integer procedureCategoryCode) {
Map<String, Integer> matchingKey = new HashMap<String, Integer>();
matchingKey.put("procedureCategoryCode", procedureCategoryCode);
return (List<IacucProcedureCategoryCustomData>)getBusinessObjectService().findMatchingOrderBy(IacucProcedureCategoryCustomData.class, matchingKey, "sortId", true);
}
/**
* This method is to get the next sequence number
* This is the primary key
* @return
*/
private Integer getNextSequenceNumber(String sequenceKey, Class clazz) {
return getSequenceAccessorService().getNextAvailableSequenceNumber(sequenceKey, clazz).intValue();
}
public SequenceAccessorService getSequenceAccessorService() {
return sequenceAccessorService;
}
public void setSequenceAccessorService(SequenceAccessorService sequenceAccessorService) {
this.sequenceAccessorService = sequenceAccessorService;
}
public BusinessObjectService getBusinessObjectService() {
return businessObjectService;
}
public void setBusinessObjectService(BusinessObjectService businessObjectService) {
this.businessObjectService = businessObjectService;
}
public IacucProtocolPersonTrainingService getIacucProtocolPersonTrainingService() {
return iacucProtocolPersonTrainingService;
}
public void setIacucProtocolPersonTrainingService(IacucProtocolPersonTrainingService iacucProtocolPersonTrainingService) {
this.iacucProtocolPersonTrainingService = iacucProtocolPersonTrainingService;
}
public IacucProtocolSpeciesService getIacucProtocolSpeciesService() {
return iacucProtocolSpeciesService;
}
public void setIacucProtocolSpeciesService(IacucProtocolSpeciesService iacucProtocolSpeciesService) {
this.iacucProtocolSpeciesService = iacucProtocolSpeciesService;
}
@Override
public void addProtocolStudyGroup(IacucProtocolStudyGroupBean selectedProtocolStudyGroupBean, IacucProtocol iacucProtocol) {
boolean isNewCategoryBean = ObjectUtils.isNull(selectedProtocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId());
List<String> protocolSpeciesAndGroups = selectedProtocolStudyGroupBean.getProtocolSpeciesAndGroups();
if(isNewCategoryBean) {
setAttributesForNewStudyGroupBean(selectedProtocolStudyGroupBean, iacucProtocol);
iacucProtocol.getIacucProtocolStudyGroups().add(selectedProtocolStudyGroupBean);
}
List<IacucProtocolStudyGroup> newStudyGroups = getNewProtocolStudyGroups(protocolSpeciesAndGroups, iacucProtocol);
addProcedureCustomData(selectedProtocolStudyGroupBean, newStudyGroups, iacucProtocol);
selectedProtocolStudyGroupBean.getIacucProtocolStudyGroups().addAll(newStudyGroups);
groupProcedureStudyBySpecies(selectedProtocolStudyGroupBean);
}
/**
* This method is to rearrange procedures by species
* @param iacucProtocol
*/
private void groupProcedureStudyBySpecies(IacucProtocol iacucProtocol) {
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : iacucProtocol.getIacucProtocolStudyGroups()) {
groupProcedureStudyBySpecies(iacucProtocolStudyGroupBean);
}
}
/**
* This method is to rearrange study details to group by species
* @param selectedProtocolStudyGroupBean
*/
private void groupProcedureStudyBySpecies(IacucProtocolStudyGroupBean selectedProtocolStudyGroupBean) {
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = getListOfProcedureStudyBySpecies(selectedProtocolStudyGroupBean.getIacucProtocolStudyGroups());
selectedProtocolStudyGroupBean.setIacucProtocolSpeciesStudyGroups(iacucProtocolSpeciesStudyGroups);
}
/**
* This method is to build procedures grouped by species
* @param iacucProtocolStudyGroups
* @return
*/
private List<IacucProtocolSpeciesStudyGroup> getListOfProcedureStudyBySpecies(List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
Map<IacucSpecies,IacucProtocolSpeciesStudyGroup> protocolSpeciesStudyGroups = new HashMap<IacucSpecies,IacucProtocolSpeciesStudyGroup>();
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = new ArrayList<IacucProtocolSpeciesStudyGroup>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroups) {
IacucSpecies iacucSpecies = iacucProtocolStudyGroup.getIacucProtocolSpecies().getIacucSpecies();
IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup = protocolSpeciesStudyGroups.get(iacucSpecies);
if(ObjectUtils.isNull(iacucProtocolSpeciesStudyGroup)) {
iacucProtocolSpeciesStudyGroup = new IacucProtocolSpeciesStudyGroup();
iacucProtocolSpeciesStudyGroup.setSpeciesCode(iacucSpecies.getSpeciesCode());
iacucProtocolSpeciesStudyGroup.setIacucSpecies(iacucSpecies);
iacucProtocolSpeciesStudyGroup.setIacucProtocolStudyGroup(iacucProtocolStudyGroup);
iacucProtocolSpeciesStudyGroups.add(iacucProtocolSpeciesStudyGroup);
protocolSpeciesStudyGroups.put(iacucSpecies, iacucProtocolSpeciesStudyGroup);
}
iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroups().add(iacucProtocolStudyGroup);
}
return iacucProtocolSpeciesStudyGroups;
}
/**
* This method is to build procedures arranged by protocol species group
* @param iacucProtocolStudyGroups
* @return
*/
private List<IacucProtocolSpeciesStudyGroup> getListOfProcedureStudyBySpeciesGroup(List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
Map<IacucProtocolSpecies,IacucProtocolSpeciesStudyGroup> protocolSpeciesStudyGroups = new HashMap<IacucProtocolSpecies,IacucProtocolSpeciesStudyGroup>();
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = new ArrayList<IacucProtocolSpeciesStudyGroup>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroups) {
IacucProtocolSpecies iacucProtocolSpecies = iacucProtocolStudyGroup.getIacucProtocolSpecies();
IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup = protocolSpeciesStudyGroups.get(iacucProtocolSpecies);
if(ObjectUtils.isNull(iacucProtocolSpeciesStudyGroup)) {
iacucProtocolSpeciesStudyGroup = new IacucProtocolSpeciesStudyGroup();
iacucProtocolSpeciesStudyGroup.setIacucProtocolSpeciesId(iacucProtocolSpecies.getIacucProtocolSpeciesId());
iacucProtocolSpeciesStudyGroup.setIacucProtocolSpecies(iacucProtocolSpecies);
iacucProtocolSpeciesStudyGroup.setSpeciesCode(iacucProtocolSpecies.getIacucSpecies().getSpeciesCode());
iacucProtocolSpeciesStudyGroup.setIacucSpecies(iacucProtocolSpecies.getIacucSpecies());
iacucProtocolSpeciesStudyGroup.setIacucProtocolStudyGroup(iacucProtocolStudyGroup);
iacucProtocolSpeciesStudyGroup.setTotalSpeciesCount(iacucProtocolSpecies.getSpeciesCount());
iacucProtocolSpeciesStudyGroups.add(iacucProtocolSpeciesStudyGroup);
protocolSpeciesStudyGroups.put(iacucProtocolSpecies, iacucProtocolSpeciesStudyGroup);
}
iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroups().add(iacucProtocolStudyGroup);
}
return iacucProtocolSpeciesStudyGroups;
}
@Override
public void populateIacucSpeciesPersonProcedures(IacucProtocol iacucProtocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = getAllProcedureStudyGroups(iacucProtocol);
if(isProcedureViewedBySpecies()) {
setPersonProceduresBySpecies(iacucProtocol, iacucProtocolStudyGroups);
}else {
setPersonProceduresByGroups(iacucProtocol, iacucProtocolStudyGroups);
}
}
/**
* This method is to set procedures handled by person arranged by species
* @param iacucProtocol
* @param iacucProtocolStudyGroups
*/
private void setPersonProceduresBySpecies(IacucProtocol iacucProtocol, List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
for(ProtocolPersonBase protocolPerson : iacucProtocol.getProtocolPersons()) {
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = getListOfProcedureStudyBySpecies(iacucProtocolStudyGroups);
IacucProtocolPerson iacucProtocolPerson = (IacucProtocolPerson)protocolPerson;
iacucProtocolPerson.setProcedureDetails(getPersonProcedureDetails(iacucProtocolSpeciesStudyGroups, iacucProtocolPerson));
iacucProtocolPerson.setAllProceduresSelected(isAllProceduresChecked(iacucProtocolPerson.getProcedureDetails()));
}
}
/**
* This method is to set procedures handled by person arranged by groups
* @param iacucProtocol
* @param iacucProtocolStudyGroups
*/
private void setPersonProceduresByGroups(IacucProtocol iacucProtocol, List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
for(ProtocolPersonBase protocolPerson : iacucProtocol.getProtocolPersons()) {
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = getListOfProcedureStudyBySpeciesGroup(iacucProtocolStudyGroups);
IacucProtocolPerson iacucProtocolPerson = (IacucProtocolPerson)protocolPerson;
iacucProtocolPerson.setProcedureDetails(getPersonProcedureDetails(iacucProtocolSpeciesStudyGroups, iacucProtocolPerson));
iacucProtocolPerson.setAllProceduresSelected(isAllProceduresChecked(iacucProtocolPerson.getProcedureDetails()));
}
}
/**
* This method is to tag whether all procedures are marked in a group and at the top level
* @param iacucProtocolSpeciesStudyGroups
* @return
*/
private boolean isAllProceduresChecked(List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups) {
boolean allProceduresSelected = true;
for(IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup : iacucProtocolSpeciesStudyGroups) {
iacucProtocolSpeciesStudyGroup.setAllProceduresSelected(true);
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : iacucProtocolSpeciesStudyGroup.getResponsibleProcedures()) {
if(!iacucProtocolStudyGroupBean.isProcedureSelected()) {
allProceduresSelected = false;
iacucProtocolSpeciesStudyGroup.setAllProceduresSelected(false);
break;
}
}
}
return allProceduresSelected;
}
/**
* This method is to get procedure details performed by each person
* @param iacucProtocolSpeciesStudyGroups
* @param iacucProtocolPerson
* @return
*/
private List<IacucProtocolSpeciesStudyGroup> getPersonProcedureDetails(List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups, IacucProtocolPerson iacucProtocolPerson) {
List<IacucProtocolSpeciesStudyGroup> personProcedureDetails = new ArrayList<IacucProtocolSpeciesStudyGroup>();
for(IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup : iacucProtocolSpeciesStudyGroups) {
HashSet<Integer> studyGroupProcedures = new HashSet<Integer>();
List<IacucProtocolStudyGroupBean> responsibleProcedures = new ArrayList<IacucProtocolStudyGroupBean>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
if(studyGroupProcedures.add(iacucProtocolStudyGroup.getIacucProtocolStudyGroupHeaderId())) {
IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean = getNewCopyOfStudyGroupBean(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean());
if(isPersonResponsibleForProcedure(iacucProtocolPerson, iacucProtocolStudyGroup)) {
newIacucProtocolStudyGroupBean.setProcedureSelected(true);
newIacucProtocolStudyGroupBean.setNewProcedure(false);
}
responsibleProcedures.add(newIacucProtocolStudyGroupBean);
}
}
iacucProtocolSpeciesStudyGroup.setResponsibleProcedures(responsibleProcedures);
personProcedureDetails.add(iacucProtocolSpeciesStudyGroup);
}
return personProcedureDetails;
}
@Override
public void populateIacucSpeciesLocationProcedures(IacucProtocol iacucProtocol) {
populateProcedureStudyGroupLocations(iacucProtocol);
populateSpeciesLocationProcedures(iacucProtocol);
}
private void populateSpeciesLocationProcedures(IacucProtocol iacucProtocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = getAllProcedureStudyGroups(iacucProtocol);
if(isProcedureViewedBySpecies()) {
setLocationProceduresBySpecies(iacucProtocol, iacucProtocolStudyGroups);
}else {
setLocationProceduresByGroups(iacucProtocol, iacucProtocolStudyGroups);
}
}
/**
* This method is to set procedures handled in each location arranged by species
* @param iacucProtocol
* @param iacucProtocolStudyGroups
*/
private void setLocationProceduresBySpecies(IacucProtocol iacucProtocol, List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : iacucProtocol.getIacucProtocolStudyGroupLocations()) {
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = getListOfProcedureStudyBySpecies(iacucProtocolStudyGroups);
iacucProtocolStudyGroupLocation.setProcedureDetails(getLocationProcedureDetails(iacucProtocolSpeciesStudyGroups, iacucProtocolStudyGroupLocation));
iacucProtocolStudyGroupLocation.setAllProceduresSelected(isAllProceduresChecked(iacucProtocolStudyGroupLocation.getProcedureDetails()));
}
}
/**
* This method is to set procedures handled in each location arranged by groups
* @param iacucProtocol
* @param iacucProtocolStudyGroups
*/
private void setLocationProceduresByGroups(IacucProtocol iacucProtocol, List<IacucProtocolStudyGroup> iacucProtocolStudyGroups) {
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : iacucProtocol.getIacucProtocolStudyGroupLocations()) {
List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups = getListOfProcedureStudyBySpeciesGroup(iacucProtocolStudyGroups);
iacucProtocolStudyGroupLocation.setProcedureDetails(getLocationProcedureDetails(iacucProtocolSpeciesStudyGroups, iacucProtocolStudyGroupLocation));
iacucProtocolStudyGroupLocation.setAllProceduresSelected(isAllProceduresChecked(iacucProtocolStudyGroupLocation.getProcedureDetails()));
}
}
/**
* This method is to get procedure details performed at each location
* @param iacucProtocolSpeciesStudyGroups
* @param iacucProtocolStudyGroupLocation
* @return
*/
private List<IacucProtocolSpeciesStudyGroup> getLocationProcedureDetails(List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups,
IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation) {
List<IacucProtocolSpeciesStudyGroup> locationProcedureDetails = new ArrayList<IacucProtocolSpeciesStudyGroup>();
for(IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup : iacucProtocolSpeciesStudyGroups) {
HashSet<Integer> studyGroupProcedures = new HashSet<Integer>();
List<IacucProtocolStudyGroupBean> responsibleProcedures = new ArrayList<IacucProtocolStudyGroupBean>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
if(studyGroupProcedures.add(iacucProtocolStudyGroup.getIacucProtocolStudyGroupHeaderId())) {
IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean = getNewCopyOfStudyGroupBean(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean());
if(isLocationResponsibleForProcedure(iacucProtocolStudyGroupLocation, iacucProtocolStudyGroup)) {
newIacucProtocolStudyGroupBean.setProcedureSelected(true);
newIacucProtocolStudyGroupBean.setNewProcedure(false);
}
responsibleProcedures.add(newIacucProtocolStudyGroupBean);
}
}
iacucProtocolSpeciesStudyGroup.setResponsibleProcedures(responsibleProcedures);
locationProcedureDetails.add(iacucProtocolSpeciesStudyGroup);
}
return locationProcedureDetails;
}
/**
* This method is to populate distinct study group locations
* @param iacucProtocol
*/
private void populateProcedureStudyGroupLocations(IacucProtocol iacucProtocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = getAllProcedureStudyGroups(iacucProtocol);
List<IacucProtocolStudyGroupLocation> iacucProtocolStudyGroupLocations = new ArrayList<IacucProtocolStudyGroupLocation>();
HashSet<Integer> studyLocations = new HashSet<Integer>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroups) {
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
if(studyLocations.add(iacucProtocolStudyGroupLocation.getStudyGroupLocationId())) {
iacucProtocolStudyGroupLocations.add(iacucProtocolStudyGroupLocation);
}
}
}
iacucProtocol.setIacucProtocolStudyGroupLocations(iacucProtocolStudyGroupLocations);
}
/**
* This method is to get all procedure study groups
* @param iacucProtocol
* @return
*/
private List<IacucProtocolStudyGroup> getAllProcedureStudyGroups(IacucProtocol iacucProtocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroup = new ArrayList<IacucProtocolStudyGroup>();
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : iacucProtocol.getIacucProtocolStudyGroups()) {
iacucProtocolStudyGroup.addAll(iacucProtocolStudyGroupBean.getIacucProtocolStudyGroups());
}
return iacucProtocolStudyGroup;
}
/**
* This method is to get a new copy of current study group bean.
* This is used to identify the procedure performed for species
* @param iacucProtocolStudyGroupBean
* @return
*/
private IacucProtocolStudyGroupBean getNewCopyOfStudyGroupBean(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean) {
IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean = new IacucProtocolStudyGroupBean();
newIacucProtocolStudyGroupBean.setProcedureCode(iacucProtocolStudyGroupBean.getProcedureCode());
newIacucProtocolStudyGroupBean.setProcedureCategoryCode(iacucProtocolStudyGroupBean.getProcedureCategoryCode());
newIacucProtocolStudyGroupBean.setIacucProcedure(iacucProtocolStudyGroupBean.getIacucProcedure());
newIacucProtocolStudyGroupBean.setIacucProcedureCategory(iacucProtocolStudyGroupBean.getIacucProcedureCategory());
newIacucProtocolStudyGroupBean.setProcedureSelected(false);
newIacucProtocolStudyGroupBean.setNewProcedure(true);
return newIacucProtocolStudyGroupBean;
}
/**
* This method is to verify whether person is responsible for a procedure
* @param iacucProtocolPerson
* @param iacucProtocolStudyGroup
* @return
*/
private boolean isPersonResponsibleForProcedure(IacucProtocolPerson iacucProtocolPerson, IacucProtocolStudyGroup iacucProtocolStudyGroup) {
boolean personResponsibleForProcedure = false;
for(IacucProcedurePersonResponsible IacucProcedurePersonResponsible : iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList()) {
if(IacucProcedurePersonResponsible.getProtocolPersonId().equals(iacucProtocolPerson.getProtocolPersonId())) {
personResponsibleForProcedure = true;
break;
}
}
return personResponsibleForProcedure;
}
/**
* This method is to verify whether a procedure is assigned to a location
* @param iacucProtocolStudyGroupLocation
* @param iacucProtocolStudyGroup
* @return
*/
private boolean isLocationResponsibleForProcedure(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation, IacucProtocolStudyGroup iacucProtocolStudyGroup) {
boolean locationResponsibleForProcedure = false;
for(IacucProtocolStudyGroupLocation iacucProcedureLocationResponsible : iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
if(iacucProcedureLocationResponsible.getStudyGroupLocationId().equals(iacucProtocolStudyGroupLocation.getStudyGroupLocationId())) {
locationResponsibleForProcedure = true;
break;
}
}
return locationResponsibleForProcedure;
}
@Override
public void synchronizeProtocolStudyGroups(IacucProtocol iacucProtocol) {
if(isProcedureViewedBySpecies()) {
List<IacucProtocolStudyGroupLocation> newProtocolStudyLocationList = iacucProtocol.getIacucProtocolStudyGroupLocations();
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : iacucProtocol.getIacucProtocolStudyGroups()) {
synchronizeProtocolStudyGroups(iacucProtocolStudyGroupBean);
synchronizeProcedureLocationList(iacucProtocolStudyGroupBean, newProtocolStudyLocationList);
}
}
}
/**
* This method is to update study details collection that are grouped by species
* @param selectedProtocolStudyGroupBean
*/
private void synchronizeProtocolStudyGroups(IacucProtocolStudyGroupBean selectedProtocolStudyGroupBean) {
for(IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup : selectedProtocolStudyGroupBean.getIacucProtocolSpeciesStudyGroups()) {
IacucProtocolStudyGroup newIacucProtocolStudyGroup = iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroup();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
iacucProtocolStudyGroup.setPainCategoryCode(newIacucProtocolStudyGroup.getPainCategoryCode());
iacucProtocolStudyGroup.setCount(newIacucProtocolStudyGroup.getCount());
synchronizeProcedureCustomDataList(newIacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList(), iacucProtocolStudyGroup);
}
}
}
/**
* This method is to update custom data list grouped by species
*/
private void synchronizeProcedureCustomDataList(List<IacucProtocolStudyCustomData> newProtocolStudyCustomDataList, IacucProtocolStudyGroup iacucProtocolStudyGroup) {
for(IacucProtocolStudyCustomData newIacucProtocolStudyCustomData : newProtocolStudyCustomDataList) {
for(IacucProtocolStudyCustomData iacucProtocolStudyCustomData : iacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList()) {
if(iacucProtocolStudyCustomData.getProcedureCustomAttributeId().equals(newIacucProtocolStudyCustomData.getProcedureCustomAttributeId())) {
iacucProtocolStudyCustomData.setValue(newIacucProtocolStudyCustomData.getValue());
}
}
}
}
/**
* This method is to update location list grouped by species
* @param iacucProtocolStudyGroupBean
* @param newProtocolStudyLocationList
*/
private void synchronizeProcedureLocationList(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean , List<IacucProtocolStudyGroupLocation> newProtocolStudyLocationList) {
for(IacucProtocolStudyGroupLocation newIacucProtocolStudyGroupLocation : newProtocolStudyLocationList) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroupBean.getIacucProtocolStudyGroups()) {
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
if(iacucProtocolStudyGroupLocation.getStudyGroupLocationId().equals(newIacucProtocolStudyGroupLocation.getStudyGroupLocationId())) {
iacucProtocolStudyGroupLocation.setLocationTypeCode(newIacucProtocolStudyGroupLocation.getLocationTypeCode());
iacucProtocolStudyGroupLocation.setLocationId(newIacucProtocolStudyGroupLocation.getLocationId());
iacucProtocolStudyGroupLocation.setStudyGroupLocationDescription(newIacucProtocolStudyGroupLocation.getStudyGroupLocationDescription());
iacucProtocolStudyGroupLocation.setLocationRoom(newIacucProtocolStudyGroupLocation.getLocationRoom());
}
}
}
}
}
/**
* This method is to get a list of new protocol study groups.
* @param protocol
* @param protocolSpeciesAndGroups
* @return a new set of protocol study groups based on selected group and species
*/
private List<IacucProtocolStudyGroup> getNewProtocolStudyGroups(List<String> protocolSpeciesAndGroups,
IacucProtocol protocol) {
List<IacucProtocolStudyGroup> protocolStudyGroups = new ArrayList<IacucProtocolStudyGroup>();
for(String iacucProtocolSpeciesId : protocolSpeciesAndGroups) {
IacucProtocolStudyGroup iacucProtocolStudyGroup = new IacucProtocolStudyGroup();
iacucProtocolStudyGroup.setIacucProtocolSpeciesId(Integer.parseInt(iacucProtocolSpeciesId));
iacucProtocolStudyGroup.setCount(iacucProtocolStudyGroup.getIacucProtocolSpecies().getSpeciesCount());
iacucProtocolStudyGroup.setPainCategoryCode(iacucProtocolStudyGroup.getIacucProtocolSpecies().getPainCategoryCode());
iacucProtocolStudyGroup.setIacucPainCategory(iacucProtocolStudyGroup.getIacucPainCategory());
protocolStudyGroups.add(iacucProtocolStudyGroup);
}
return protocolStudyGroups;
}
/**
* This method is to add procedure custom data to each new study group.
* Custom data attributes are added based on attributes configured for each procedure code.
* @param selectedIacucProtocolStudyGroupBean
* @param newStudyGroups
*/
private void addProcedureCustomData(IacucProtocolStudyGroupBean selectedIacucProtocolStudyGroupBean,
List<IacucProtocolStudyGroup> newStudyGroups, IacucProtocol protocol) {
List<IacucProcedureCategoryCustomData> procedureCustomDataList = getIacucProcedureCustomData(selectedIacucProtocolStudyGroupBean.getProcedureCategoryCode());
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : newStudyGroups) {
List<IacucProtocolStudyCustomData> protocolStudyCustomDataList = new ArrayList<IacucProtocolStudyCustomData>();
for(IacucProcedureCategoryCustomData procedureCategoryCustomData : procedureCustomDataList) {
if(procedureCategoryCustomData.isActive()) {
IacucProtocolStudyCustomData newIacucProtocolStudyCustomData = new IacucProtocolStudyCustomData();
newIacucProtocolStudyCustomData.setProcedureCustomAttributeId(procedureCategoryCustomData.getId());
newIacucProtocolStudyCustomData.setIacucProcedureCategoryCustomData(procedureCategoryCustomData);
protocolStudyCustomDataList.add(newIacucProtocolStudyCustomData);
}
}
iacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList().addAll(protocolStudyCustomDataList);
}
}
/**
* This method is to set attribute values for new study group bean (header)
* @param selectedIacucProtocolStudyGroupBean
* @param protocol
*/
private void setAttributesForNewStudyGroupBean(IacucProtocolStudyGroupBean selectedIacucProtocolStudyGroupBean, IacucProtocol protocol) {
selectedIacucProtocolStudyGroupBean.setIacucProtocolStudyGroupHeaderId(getNextSequenceNumber(PROTOCOL_STUDY_GROUP_HEADER_SEQUENCE_ID, selectedIacucProtocolStudyGroupBean.getClass()));
selectedIacucProtocolStudyGroupBean.setProtocolId(protocol.getProtocolId());
selectedIacucProtocolStudyGroupBean.setProtocolNumber(protocol.getProtocolNumber());
selectedIacucProtocolStudyGroupBean.setSequenceNumber(protocol.getSequenceNumber());
}
@Override
public void deleteProtocolStudyGroup(IacucProtocolStudyGroupBean selectedProtocolStudyGroupBean,
IacucProtocolStudyGroup deletedIacucProtocolStudyGroup, IacucProtocol iacucProtocol) {
selectedProtocolStudyGroupBean.getIacucProtocolStudyGroups().remove(deletedIacucProtocolStudyGroup);
if(selectedProtocolStudyGroupBean.getIacucProtocolStudyGroups().size() == 0) {
iacucProtocol.getIacucProtocolStudyGroups().remove(selectedProtocolStudyGroupBean);
}
}
@Override
public void deleteProcedureGroupPersonResponsible(IacucProtocolStudyGroup selectedProtocolStudyGroup, IacucProcedurePersonResponsible deletedProcedurePersonResponsible,
IacucProtocol iacucProtocol) {
selectedProtocolStudyGroup.getIacucProcedurePersonResponsibleList().remove(deletedProcedurePersonResponsible);
}
@Override
public void deleteProcedureGroupLocation(IacucProtocolStudyGroup selectedProtocolStudyGroup, IacucProtocolStudyGroupLocation deletedProtocolStudyGroupLocation,
IacucProtocol iacucProtocol) {
selectedProtocolStudyGroup.getIacucProcedureLocationResponsibleList().remove(deletedProtocolStudyGroupLocation);
}
@Override
public void deleteProtocolStudyGroup(IacucProtocolStudyGroupBean selectedProtocolStudyGroupBean, IacucProtocolSpeciesStudyGroup deletedIacucProtocolStudyGroup) {
selectedProtocolStudyGroupBean.getIacucProtocolStudyGroups().removeAll(deletedIacucProtocolStudyGroup.getIacucProtocolStudyGroups());
selectedProtocolStudyGroupBean.getIacucProtocolSpeciesStudyGroups().remove(deletedIacucProtocolStudyGroup);
}
@Override
public void addProcedureLocation(IacucProtocolStudyGroupLocation newStudyGroupLocation, IacucProtocol protocol) {
updateAttributesForNewProcedureLocation(newStudyGroupLocation, protocol);
protocol.getIacucProtocolStudyGroupLocations().add(newStudyGroupLocation);
populateSpeciesLocationProcedures(protocol);
}
@Override
public void addProcedureGroupLocation(IacucProtocolStudyGroupLocation newStudyGroupLocation, IacucProtocolStudyGroup selectedStudyGroup, IacucProtocol protocol) {
populateProcedureStudyGroupLocations(protocol);
updateAttributesForNewProcedureLocation(newStudyGroupLocation, protocol);
protocol.getIacucProtocolStudyGroupLocations().add(newStudyGroupLocation);
selectedStudyGroup.getIacucProcedureLocationResponsibleList().add(newStudyGroupLocation);
selectedStudyGroup.setNewIacucProtocolStudyGroupLocation(new IacucProtocolStudyGroupLocation());
}
@Override
public void deleteProcedureLocation(IacucProtocolStudyGroupLocation deletedIacucProtocolStudyGroupLocation, IacucProtocol iacucProtocol) {
deleteProcedureLocationList(iacucProtocol, deletedIacucProtocolStudyGroupLocation);
iacucProtocol.getIacucProtocolStudyGroupLocations().remove(deletedIacucProtocolStudyGroupLocation);
}
/**
* This method is to remove deleted location from study group list
* @param protocol
* @param deletedProtocolStudyGroupLocation
*/
private void deleteProcedureLocationList(IacucProtocol protocol, IacucProtocolStudyGroupLocation deletedProtocolStudyGroupLocation) {
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : protocol.getIacucProtocolStudyGroups()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroupBean.getIacucProtocolStudyGroups()) {
List<IacucProtocolStudyGroupLocation> deletedProtocolStudyGroupLocations = new ArrayList<IacucProtocolStudyGroupLocation>();
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
if(iacucProtocolStudyGroupLocation.getStudyGroupLocationId().equals(deletedProtocolStudyGroupLocation.getStudyGroupLocationId())) {
deletedProtocolStudyGroupLocations.add(iacucProtocolStudyGroupLocation);
}
}
iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList().removeAll(deletedProtocolStudyGroupLocations);
}
}
}
/**
* This method is to set study group reference.
*/
private void updateAttributesForNewProcedureLocation(IacucProtocolStudyGroupLocation newIacucProtocolStudyGroupLocation,
IacucProtocol protocol) {
newIacucProtocolStudyGroupLocation.setStudyGroupLocationId(getNextStudyGroupLocationId(protocol));
}
/**
* This method is to get the next study group location id.
* generate a serial number generated based on the list of study group locations
* Use this to identify distinct locations
* @param iacucProtocol
* @return
*/
private Integer getNextStudyGroupLocationId(IacucProtocol iacucProtocol) {
Integer nextStudyGroupLocationId = 1;
if(!iacucProtocol.getIacucProtocolStudyGroupLocations().isEmpty()) {
List<IacucProtocolStudyGroupLocation> sortedStudyGroupLocations = getSortedStudyGroupLocations(iacucProtocol);
int totalStudyGroupLocs = sortedStudyGroupLocations.size();
nextStudyGroupLocationId = sortedStudyGroupLocations.get(totalStudyGroupLocs - 1).getStudyGroupLocationId() + 1;
}
return nextStudyGroupLocationId;
}
/**
* This method is to get a sorted list of current study group locations.
* @param iacucProtocol
* @return
*/
private List<IacucProtocolStudyGroupLocation> getSortedStudyGroupLocations(IacucProtocol iacucProtocol) {
List<IacucProtocolStudyGroupLocation> protocolStudyGroupLocations = iacucProtocol.getIacucProtocolStudyGroupLocations();
Collections.sort(protocolStudyGroupLocations, new Comparator<IacucProtocolStudyGroupLocation>() {
public int compare(IacucProtocolStudyGroupLocation location1, IacucProtocolStudyGroupLocation location2) {
return location1.getStudyGroupLocationId().compareTo(location2.getStudyGroupLocationId());
}
});
return protocolStudyGroupLocations;
}
@Override
public void addLocationResponsibleProcedures(IacucProtocol protocol) {
for(IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation : protocol.getIacucProtocolStudyGroupLocations()) {
for(IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup : iacucProtocolStudyGroupLocation.getProcedureDetails()) {
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : protocolSpeciesStudyGroup.getResponsibleProcedures()) {
if(iacucProtocolStudyGroupBean.isProcedureSelected()) {
addLocationResponsibleProcedures(iacucProtocolStudyGroupBean, iacucProtocolStudyGroupLocation, protocolSpeciesStudyGroup);
}else {
deleteLocationResponsibleProcedures(iacucProtocolStudyGroupBean, iacucProtocolStudyGroupLocation, protocolSpeciesStudyGroup);
}
}
}
}
populateSpeciesLocationProcedures(protocol);
}
/**
* This method is to add person responsible locations to study groups
* Add in invoked based on checked procedures
* @param iacucProtocolStudyGroupBean
* @param iacucProtocolStudyGroupLocation
* @param protocolSpeciesStudyGroup
*/
private void addLocationResponsibleProcedures(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean, IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation,
IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup) {
if(iacucProtocolStudyGroupBean.isNewProcedure()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean().getProcedureCode().equals(iacucProtocolStudyGroupBean.getProcedureCode())) {
IacucProtocolStudyGroupLocation newIacucProtocolStudyGroupLocation = (IacucProtocolStudyGroupLocation)deepCopy(iacucProtocolStudyGroupLocation);
newIacucProtocolStudyGroupLocation.resetPersistenceState();
iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList().add(newIacucProtocolStudyGroupLocation);
}
}
}
}
/**
* This method is to delete person responsible procedures from study groups
* Delete is invoked based on unchecked procedures
* @param iacucProtocolStudyGroupBean
* @param iacucProtocolStudyGroupLocation
* @param protocolSpeciesStudyGroup
*/
private void deleteLocationResponsibleProcedures(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean, IacucProtocolStudyGroupLocation iacucProtocolStudyGroupLocation,
IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup) {
if(!iacucProtocolStudyGroupBean.isNewProcedure()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
List<IacucProtocolStudyGroupLocation> deletedProcedureLocationResponsible = new ArrayList<IacucProtocolStudyGroupLocation>();
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean().getProcedureCode().equals(iacucProtocolStudyGroupBean.getProcedureCode())) {
for(IacucProtocolStudyGroupLocation iacucProcedureLocationResponsible : iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
if(iacucProcedureLocationResponsible.getStudyGroupLocationId().equals(iacucProtocolStudyGroupLocation.getStudyGroupLocationId())) {
deletedProcedureLocationResponsible.add(iacucProcedureLocationResponsible);
}
}
}
iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList().removeAll(deletedProcedureLocationResponsible);
}
}
}
@Override
public void addPersonResponsibleProcedures(IacucProtocol protocol) {
for(ProtocolPersonBase protocolPerson : protocol.getProtocolPersons()) {
IacucProtocolPerson iacucProtocolPerson = (IacucProtocolPerson)protocolPerson;
for(IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup : iacucProtocolPerson.getProcedureDetails()) {
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : protocolSpeciesStudyGroup.getResponsibleProcedures()) {
if(iacucProtocolStudyGroupBean.isProcedureSelected()) {
addPersonResponsibleProcedures(iacucProtocolStudyGroupBean, iacucProtocolPerson, protocolSpeciesStudyGroup);
}else {
deletePersonResponsibleProcedures(iacucProtocolStudyGroupBean, iacucProtocolPerson, protocolSpeciesStudyGroup);
}
}
}
}
populateIacucSpeciesPersonProcedures(protocol);
}
/**
* This method is to add person responsible procedures to study groups
* Add in invoked based on checked procedures
* @param iacucProtocolStudyGroupBean
* @param protocolPerson
* @param protocolSpeciesStudyGroup
*/
private void addPersonResponsibleProcedures(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean, IacucProtocolPerson protocolPerson,
IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup) {
if(iacucProtocolStudyGroupBean.isNewProcedure()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean().getProcedureCode().equals(iacucProtocolStudyGroupBean.getProcedureCode())) {
IacucProcedurePersonResponsible newIacucProcedurePersonResponsible = getNewPersonResponsibleProcedure(protocolPerson, iacucProtocolStudyGroup);
iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList().add(newIacucProcedurePersonResponsible);
}
}
}
}
/**
* This method is to delete person responsible procedures from study groups
* Delete is invoked based on unchecked procedures
* @param iacucProtocolStudyGroupBean
* @param protocolPerson
* @param protocolSpeciesStudyGroup
*/
private void deletePersonResponsibleProcedures(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean, IacucProtocolPerson protocolPerson,
IacucProtocolSpeciesStudyGroup protocolSpeciesStudyGroup) {
if(!iacucProtocolStudyGroupBean.isNewProcedure()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolSpeciesStudyGroup.getIacucProtocolStudyGroups()) {
List<IacucProcedurePersonResponsible> deletedProcedurePersonResponsible = new ArrayList<IacucProcedurePersonResponsible>();
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupBean().getProcedureCode().equals(iacucProtocolStudyGroupBean.getProcedureCode())) {
for(IacucProcedurePersonResponsible iacucProcedurePersonResponsible : iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList()) {
if(iacucProcedurePersonResponsible.getProtocolPersonId().equals(protocolPerson.getProtocolPersonId())) {
deletedProcedurePersonResponsible.add(iacucProcedurePersonResponsible);
}
}
}
iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList().removeAll(deletedProcedurePersonResponsible);
}
}
}
/**
* This method is to map iacuc protocol persons based on person id
* @param iacucProtocol
* @return
*/
private HashMap<String, IacucProtocolPerson> getProtocolPersons(IacucProtocol iacucProtocol) {
HashMap<String, IacucProtocolPerson> protocolPersons = new HashMap<String, IacucProtocolPerson>();
for(ProtocolPersonBase protocolPersonBase : iacucProtocol.getProtocolPersons()) {
IacucProtocolPerson iacucProtocolPerson = (IacucProtocolPerson)protocolPersonBase;
protocolPersons.put(iacucProtocolPerson.getPersonId(), iacucProtocolPerson);
}
return protocolPersons;
}
/**
* This method is to get a new person responsible procedure information
* Person responsibility for a procedure is recorded based on species
* @param protocolPerson
* @param iacucProtocolStudyGroup
* @return
*/
private IacucProcedurePersonResponsible getNewPersonResponsibleProcedure(IacucProtocolPerson protocolPerson, IacucProtocolStudyGroup iacucProtocolStudyGroup) {
IacucProcedurePersonResponsible resposibleProcedure = new IacucProcedurePersonResponsible();
setAttributesForPersonResponsibleProcedure(resposibleProcedure, protocolPerson, iacucProtocolStudyGroup);
return resposibleProcedure;
}
/**
* This method is to set protocol and person attributes for a new person responsible procedure
* @param resposibleProcedure
* @param protocolPerson
* @param iacucProtocolStudyGroup
*/
private void setAttributesForPersonResponsibleProcedure(IacucProcedurePersonResponsible resposibleProcedure, IacucProtocolPerson protocolPerson,
IacucProtocolStudyGroup iacucProtocolStudyGroup) {
resposibleProcedure.setProtocolPersonId(protocolPerson.getProtocolPersonId());
resposibleProcedure.setProtocolPerson(protocolPerson);
resposibleProcedure.setIacucProtocolStudyGroupId(iacucProtocolStudyGroup.getIacucProtocolStudyGroupId());
}
@Override
public void setProcedureSummaryGroupedBySpecies(IacucProtocol protocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = getAllProcedureStudyGroups(protocol);
List<IacucProtocolSpeciesStudyGroup> iacucProtocolStudyGroupSpeciesList = getListOfProcedureStudyBySpecies(iacucProtocolStudyGroups);
updateSpeciesCount(iacucProtocolStudyGroupSpeciesList, protocol);
protocol.setIacucProtocolStudyGroupSpeciesList(iacucProtocolStudyGroupSpeciesList);
addStudyGroupProceduresForSpecies(protocol);
addStudyGroupProcedureDetailsForSpecies(protocol);
}
private void updateSpeciesCount(List<IacucProtocolSpeciesStudyGroup> iacucProtocolSpeciesStudyGroups, IacucProtocol protocol) {
for(IacucProtocolSpeciesStudyGroup iacucProtocolSpeciesStudyGroup : iacucProtocolSpeciesStudyGroups) {
for(IacucProtocolSpecies iacucProtocolSpecies : protocol.getIacucProtocolSpeciesList()) {
if(iacucProtocolSpecies.getSpeciesCode().equals(iacucProtocolSpeciesStudyGroup.getSpeciesCode())) {
iacucProtocolSpeciesStudyGroup.addSpeciesCount(iacucProtocolSpecies.getSpeciesCount());
}
}
}
}
@Override
public void setProcedureSummaryBySpeciesGroup(IacucProtocol protocol) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = getAllProcedureStudyGroups(protocol);
List<IacucProtocolSpeciesStudyGroup> iacucProtocolStudyGroupSpeciesList = getListOfProcedureStudyBySpeciesGroup(iacucProtocolStudyGroups);
protocol.setIacucProtocolStudyGroupSpeciesList(iacucProtocolStudyGroupSpeciesList);
addProceduresForSpeciesGroups(protocol);
addProcedureDetailsForSpeciesGroups(protocol);
}
/**
* This method is to identify study group details for species group used in the study
* This grouping is used for summary display (studies grouped by species group)
* @param protocol
*/
private void addProceduresForSpeciesGroups(IacucProtocol protocol) {
for(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies : protocol.getIacucProtocolStudyGroupSpeciesList()) {
protocolStudyGroupSpecies.setResponsibleProcedures(new ArrayList<IacucProtocolStudyGroupBean>());
protocolStudyGroupSpecies.getResponsibleProcedures().addAll(getStudyGroupProceduresForSpeciesGroup(protocol, protocolStudyGroupSpecies.getIacucProtocolSpecies()));
}
}
/**
* This method is to get all study procedures based on species group
* @param protocol
* @param iacucProtocolSpecies
* @return
*/
private List<IacucProtocolStudyGroupBean> getStudyGroupProceduresForSpeciesGroup(IacucProtocol protocol, IacucProtocolSpecies iacucProtocolSpecies) {
List<IacucProtocolStudyGroupBean> protocolStudyGroups = new ArrayList<IacucProtocolStudyGroupBean>();
for(IacucProtocolStudyGroupBean protocolStudyGroupBean : protocol.getIacucProtocolStudyGroups()) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = new ArrayList<IacucProtocolStudyGroup>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolStudyGroupBean.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolSpeciesId().equals(iacucProtocolSpecies.getIacucProtocolSpeciesId())) {
iacucProtocolStudyGroups.add(iacucProtocolStudyGroup);
}
}
addProceduresDetails(iacucProtocolStudyGroups, protocolStudyGroups, protocolStudyGroupBean);
}
return protocolStudyGroups;
}
/**
* This method is to set procedure related studies
* @param iacucProtocolStudyGroups
* @param protocolStudyGroups
* @param protocolStudyGroupBean
*/
private void addProceduresDetails(List<IacucProtocolStudyGroup> iacucProtocolStudyGroups, List<IacucProtocolStudyGroupBean> protocolStudyGroups,
IacucProtocolStudyGroupBean protocolStudyGroupBean) {
if(!iacucProtocolStudyGroups.isEmpty()) {
IacucProtocolStudyGroupBean newProtocolStudyGroupBean = getNewProtocolStudyGroupBean(protocolStudyGroupBean);
newProtocolStudyGroupBean.getIacucProtocolStudyGroups().addAll(iacucProtocolStudyGroups);
protocolStudyGroups.add(newProtocolStudyGroupBean);
}
}
/**
* This method is to identify study procedures for species used in the study
* This grouping is used for summary display
* @param protocol
*/
private void addStudyGroupProceduresForSpecies(IacucProtocol protocol) {
for(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies : protocol.getIacucProtocolStudyGroupSpeciesList()) {
protocolStudyGroupSpecies.setResponsibleProcedures(new ArrayList<IacucProtocolStudyGroupBean>());
Integer speciesCode = protocolStudyGroupSpecies.getSpeciesCode();
protocolStudyGroupSpecies.getResponsibleProcedures().addAll(getStudyGroupProceduresForSpecies(protocol, speciesCode));
}
}
/**
* This method is to collect study details based on species code
* @param protocol
* @param speciesCode
* @return
*/
private List<IacucProtocolStudyGroupBean> getStudyGroupProceduresForSpecies(IacucProtocol protocol, Integer speciesCode) {
List<IacucProtocolStudyGroupBean> protocolStudyGroups = new ArrayList<IacucProtocolStudyGroupBean>();
for(IacucProtocolStudyGroupBean protocolStudyGroupBean : protocol.getIacucProtocolStudyGroups()) {
List<IacucProtocolStudyGroup> iacucProtocolStudyGroups = new ArrayList<IacucProtocolStudyGroup>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolStudyGroupBean.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolSpecies().getSpeciesCode().equals(speciesCode)) {
iacucProtocolStudyGroups.add(iacucProtocolStudyGroup);
}
}
addProceduresDetails(iacucProtocolStudyGroups, protocolStudyGroups, protocolStudyGroupBean);
}
return protocolStudyGroups;
}
/**
* This method is to get a new copy of an existing study group header bean
* @param protocolStudyGroupBean
* @return
*/
private IacucProtocolStudyGroupBean getNewProtocolStudyGroupBean(IacucProtocolStudyGroupBean protocolStudyGroupBean) {
IacucProtocolStudyGroupBean newProtocolStudyGroupBean = new IacucProtocolStudyGroupBean();
newProtocolStudyGroupBean.setIacucProtocolStudyGroupHeaderId(protocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId());
newProtocolStudyGroupBean.setIacucProcedureCategory(protocolStudyGroupBean.getIacucProcedureCategory());
newProtocolStudyGroupBean.setIacucProcedure(protocolStudyGroupBean.getIacucProcedure());
return newProtocolStudyGroupBean;
}
/**
* This method is to add related procedure details for each species
* This includes person responsibilities, location and custom data.
* This grouping is used for summary display
* @param protocol
*/
private void addStudyGroupProcedureDetailsForSpecies(IacucProtocol protocol) {
for(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies : protocol.getIacucProtocolStudyGroupSpeciesList()) {
for(IacucProtocolStudyGroupBean protocolStudyGroupBean : protocolStudyGroupSpecies.getResponsibleProcedures()) {
setAllProcedureDetailsForSpecies(protocolStudyGroupSpecies, protocolStudyGroupBean);
Integer totalProcSpeciesCount = 0;
for(IacucProtocolStudyGroup studyGroup : protocolStudyGroupBean.getIacucProtocolStudyGroups()) {
if(protocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId().equals(studyGroup.getIacucProtocolStudyGroupHeaderId()) &&
studyGroup.getIacucProtocolSpecies().getSpeciesCode().equals(protocolStudyGroupSpecies.getSpeciesCode())) {
totalProcSpeciesCount = totalProcSpeciesCount + studyGroup.getCount();
}
}
protocolStudyGroupBean.setSpeciesCount(totalProcSpeciesCount);
}
}
}
/**
* This method is to set all related collections for a procedure
* say Person responsible, Location and Custom data list
* @param protocolStudyGroupSpecies
* @param protocolStudyGroupBean
*/
private void setAllProcedureDetails(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies, IacucProtocolStudyGroupBean protocolStudyGroupBean) {
List<IacucProtocolStudyGroupLocation> allIacucProtocolStudyGroupLocations = new ArrayList<IacucProtocolStudyGroupLocation>();
List<IacucProcedurePersonResponsible> allIacucProtocolStudyGroupPersons = new ArrayList<IacucProcedurePersonResponsible>();
List<IacucProtocolStudyCustomData> allIacucProtocolStudyCustomDataList = new ArrayList<IacucProtocolStudyCustomData>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolStudyGroupSpecies.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupHeaderId().equals(protocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId())) {
allIacucProtocolStudyGroupLocations.addAll(iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList());
allIacucProtocolStudyGroupPersons.addAll(iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList());
allIacucProtocolStudyCustomDataList.addAll(iacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList());
}
}
protocolStudyGroupBean.setIacucProtocolStudyGroupLocations(allIacucProtocolStudyGroupLocations);
protocolStudyGroupBean.setIacucProtocolStudyGroupPersons(allIacucProtocolStudyGroupPersons);
protocolStudyGroupBean.setIacucProtocolStudyCustomDataList(allIacucProtocolStudyCustomDataList);
}
/**
* This method is to set all related collections for a procedure
* say Person responsible, Location and Custom data list
* We need to consider the distinct case where procedures are grouped by species
* @param protocolStudyGroupSpecies
* @param protocolStudyGroupBean
*/
private void setAllProcedureDetailsForSpecies(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies, IacucProtocolStudyGroupBean protocolStudyGroupBean) {
List<IacucProtocolStudyGroupLocation> allIacucProtocolStudyGroupLocations = new ArrayList<IacucProtocolStudyGroupLocation>();
List<IacucProcedurePersonResponsible> allIacucProtocolStudyGroupPersons = new ArrayList<IacucProcedurePersonResponsible>();
List<IacucProtocolStudyCustomData> allIacucProtocolStudyCustomDataList = new ArrayList<IacucProtocolStudyCustomData>();
Map<IacucSpecies,IacucProtocolStudyGroup> protocolSpeciesStudyGroups = new HashMap<IacucSpecies,IacucProtocolStudyGroup>();
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : protocolStudyGroupSpecies.getIacucProtocolStudyGroups()) {
if(iacucProtocolStudyGroup.getIacucProtocolStudyGroupHeaderId().equals(protocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId())) {
IacucSpecies iacucSpecies = iacucProtocolStudyGroup.getIacucProtocolSpecies().getIacucSpecies();
IacucProtocolStudyGroup groupedProtocolStudyGroup = protocolSpeciesStudyGroups.get(iacucSpecies);
if(ObjectUtils.isNull(groupedProtocolStudyGroup)) {
allIacucProtocolStudyGroupLocations.addAll(iacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList());
allIacucProtocolStudyGroupPersons.addAll(iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList());
allIacucProtocolStudyCustomDataList.addAll(iacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList());
protocolSpeciesStudyGroups.put(iacucSpecies, iacucProtocolStudyGroup);
}
}
}
protocolStudyGroupBean.setIacucProtocolStudyGroupLocations(allIacucProtocolStudyGroupLocations);
protocolStudyGroupBean.setIacucProtocolStudyGroupPersons(allIacucProtocolStudyGroupPersons);
protocolStudyGroupBean.setIacucProtocolStudyCustomDataList(allIacucProtocolStudyCustomDataList);
}
/**
* This method is to add related procedure details for each species group
* This includes person responsibilities, location and custom data.
* This grouping is used for summary display
* @param protocol
*/
private void addProcedureDetailsForSpeciesGroups(IacucProtocol protocol) {
for(IacucProtocolSpeciesStudyGroup protocolStudyGroupSpecies : protocol.getIacucProtocolStudyGroupSpeciesList()) {
for(IacucProtocolStudyGroupBean protocolStudyGroupBean : protocolStudyGroupSpecies.getResponsibleProcedures()) {
setAllProcedureDetails(protocolStudyGroupSpecies, protocolStudyGroupBean);
Integer totalProcSpeciesCount = 0;
IacucProtocolSpecies iacucProtocolSpecies = protocolStudyGroupSpecies.getIacucProtocolSpecies();
for(IacucProtocolStudyGroup studyGroup : protocolStudyGroupBean.getIacucProtocolStudyGroups()) {
if(protocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId().equals(studyGroup.getIacucProtocolStudyGroupHeaderId()) &&
studyGroup.getIacucProtocolSpeciesId().equals(iacucProtocolSpecies.getIacucProtocolSpeciesId())) {
totalProcSpeciesCount = totalProcSpeciesCount + studyGroup.getCount();
}
}
protocolStudyGroupBean.setSpeciesCount(totalProcSpeciesCount);
}
}
}
@Override
public void createNewProtocolStudyProcedures(IacucProtocol sourceProtocol, IacucProtocol destProtocol) {
createNewStudyProcedures(sourceProtocol, destProtocol);
}
@Override
@SuppressWarnings("unchecked")
public void mergeProtocolSpecies(IacucProtocol sourceProtocol, IacucProtocol destProtocol) {
destProtocol.setIacucProtocolSpeciesList((List<IacucProtocolSpecies>) deepCopy(sourceProtocol.getIacucProtocolSpeciesList()));
setAttributesForIacucProtocolSpecies(destProtocol);
synchronizeProcedureSpecies(destProtocol);
}
/**
* This method is to update reference - procedure details where species in use
* @param destProtocol
*/
private void synchronizeProcedureSpecies(IacucProtocol destProtocol) {
HashMap<String, IacucProtocolSpecies> newIacucProtocolSpeciesMapping = getIacucProtocolSpeciesMapping(destProtocol.getIacucProtocolSpeciesList());
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : destProtocol.getIacucProtocolStudyGroups()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroupBean.getIacucProtocolStudyGroups()) {
IacucProtocolSpecies destIacucProtocolSpecies = newIacucProtocolSpeciesMapping.get(iacucProtocolStudyGroup.getIacucProtocolSpecies().getGroupAndSpecies());
iacucProtocolStudyGroup.setIacucProtocolSpeciesId(destIacucProtocolSpecies.getIacucProtocolSpeciesId());
iacucProtocolStudyGroup.setIacucProtocolSpecies(destIacucProtocolSpecies);
}
}
}
/**
* This method is to reset persistence and set new attributes for iacuc protocol species.
* Purpose is to link protocol species for procedures
* @param destProtocol
*/
private void setAttributesForIacucProtocolSpecies(IacucProtocol destProtocol) {
for(IacucProtocolSpecies iacucProtocolSpecies : destProtocol.getIacucProtocolSpeciesList()) {
iacucProtocolSpecies.resetPersistenceState();
getIacucProtocolSpeciesService().getNewProtocolSpecies(destProtocol, iacucProtocolSpecies);
}
}
@Override
public void mergeProtocolProcedures(IacucProtocol sourceProtocol, IacucProtocol destProtocol) {
createNewStudyProcedures(sourceProtocol, destProtocol);
synchronizeProcedurePersonnel(sourceProtocol, destProtocol);
}
/**
* This method is to update personnel procedure references
* changes made during amendment/renewal
* @param destProtocol
*/
private void synchronizeProcedurePersonnel(IacucProtocol sourceProtocol, IacucProtocol destProtocol) {
HashMap<String, IacucProtocolPerson> newProtocolPersons = getProtocolPersons(sourceProtocol);
for(ProtocolPersonBase protocolPersonBase : destProtocol.getProtocolPersons()) {
IacucProtocolPerson destIacucProtocolPerson = (IacucProtocolPerson)protocolPersonBase;
IacucProtocolPerson sourceIacucProtocolPerson = newProtocolPersons.get(destIacucProtocolPerson.getPersonId());
destIacucProtocolPerson.setProcedureQualificationDescription(sourceIacucProtocolPerson.getProcedureQualificationDescription());
}
}
@Override
public void mergeProtocolProcedurePersonnel(IacucProtocol destProtocol) {
HashMap<String, IacucProtocolPerson> newProtocolPersons = getProtocolPersons(destProtocol);
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : destProtocol.getIacucProtocolStudyGroups()) {
for(IacucProtocolStudyGroup iacucProtocolStudyGroup : iacucProtocolStudyGroupBean.getIacucProtocolStudyGroups()) {
for(IacucProcedurePersonResponsible iacucProcedurePersonResponsible : iacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList()) {
IacucProtocolPerson newIacucProtocolPerson = newProtocolPersons.get(iacucProcedurePersonResponsible.getPersonId());
iacucProcedurePersonResponsible.setProtocolPersonId(newIacucProtocolPerson.getProtocolPersonId());
iacucProcedurePersonResponsible.setProtocolPerson(newIacucProtocolPerson);
}
}
}
}
@Override
public void resetAllProtocolStudyProcedures(IacucProtocol iacucProtocol) {
setAttributesForIacucProtocolSpecies(iacucProtocol);
HashMap<String, IacucProtocolSpecies> newIacucProtocolSpeciesMapping = getIacucProtocolSpeciesMapping(iacucProtocol.getIacucProtocolSpeciesList());
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : iacucProtocol.getIacucProtocolStudyGroups()) {
setAttributesForStudyProcedures(iacucProtocolStudyGroupBean, iacucProtocol, newIacucProtocolSpeciesMapping);
}
}
/**
* This method is to create a new set of study details in procedures tab.
* invoked during copy protocol
* @param sourceProtocol
* @param destProtocol
*/
private void createNewStudyProcedures(IacucProtocol sourceProtocol, IacucProtocol destProtocol) {
destProtocol.setIacucProtocolStudyGroups(new ArrayList<IacucProtocolStudyGroupBean>());
HashMap<String, IacucProtocolSpecies> newIacucProtocolSpeciesMapping = getIacucProtocolSpeciesMapping(destProtocol.getIacucProtocolSpeciesList());
for(IacucProtocolStudyGroupBean iacucProtocolStudyGroupBean : sourceProtocol.getIacucProtocolStudyGroups()) {
IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean = (IacucProtocolStudyGroupBean)deepCopy(iacucProtocolStudyGroupBean);
setAttributesForStudyProcedures(newIacucProtocolStudyGroupBean, destProtocol, newIacucProtocolSpeciesMapping);
destProtocol.getIacucProtocolStudyGroups().add(newIacucProtocolStudyGroupBean);
}
}
/**
* This method is to set attributes for new study groups
* @param newIacucProtocolStudyGroupBean
* @param destProtocol
* @param newIacucProtocolSpeciesMapping
*/
private void setAttributesForStudyProcedures(IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean, IacucProtocol destProtocol,
HashMap<String, IacucProtocolSpecies> newIacucProtocolSpeciesMapping) {
newIacucProtocolStudyGroupBean.resetPersistenceState();
setAttributesForNewStudyGroupBean(newIacucProtocolStudyGroupBean, destProtocol);
updateNewStudyProcedures(newIacucProtocolStudyGroupBean, destProtocol, newIacucProtocolSpeciesMapping);
}
/**
* This method to update new study group procedure collection
* reset state for custom data, person and location
* @param newIacucProtocolStudyGroupBean
* @param destProtocol
* @param newIacucProtocolSpeciesMapping
*/
private void updateNewStudyProcedures(IacucProtocolStudyGroupBean newIacucProtocolStudyGroupBean, IacucProtocol destProtocol,
HashMap<String, IacucProtocolSpecies> newIacucProtocolSpeciesMapping) {
HashMap<String, IacucProtocolPerson> newProtocolPersons = getProtocolPersons(destProtocol);
for(IacucProtocolStudyGroup newIacucProtocolStudyGroup : newIacucProtocolStudyGroupBean.getIacucProtocolStudyGroups()) {
newIacucProtocolStudyGroup.resetPersistenceState();
newIacucProtocolStudyGroup.setIacucProtocolStudyGroupHeaderId(newIacucProtocolStudyGroupBean.getIacucProtocolStudyGroupHeaderId());
newIacucProtocolStudyGroup.setIacucProtocolStudyGroupBean(newIacucProtocolStudyGroupBean);
IacucProtocolSpecies destIacucProtocolSpecies = newIacucProtocolSpeciesMapping.get(newIacucProtocolStudyGroup.getIacucProtocolSpecies().getGroupAndSpecies());
newIacucProtocolStudyGroup.setIacucProtocolSpeciesId(destIacucProtocolSpecies.getIacucProtocolSpeciesId());
newIacucProtocolStudyGroup.setIacucProtocolSpecies(destIacucProtocolSpecies);
for(IacucProtocolStudyCustomData newIacucProtocolStudyCustomData : newIacucProtocolStudyGroup.getIacucProtocolStudyCustomDataList()) {
newIacucProtocolStudyCustomData.resetPersistenceState();
newIacucProtocolStudyCustomData.setIacucProtocolStudyGroupId(newIacucProtocolStudyGroup.getIacucProtocolStudyGroupId());
}
for(IacucProcedurePersonResponsible newIacucProcedurePersonResponsible : newIacucProtocolStudyGroup.getIacucProcedurePersonResponsibleList()) {
newIacucProcedurePersonResponsible.resetPersistenceState();
IacucProtocolPerson newIacucProtocolPerson = newProtocolPersons.get(newIacucProcedurePersonResponsible.getPersonId());
newIacucProcedurePersonResponsible.setProtocolPersonId(newIacucProtocolPerson.getProtocolPersonId());
newIacucProcedurePersonResponsible.setProtocolPerson(newIacucProtocolPerson);
newIacucProcedurePersonResponsible.setIacucProtocolStudyGroupId(newIacucProtocolStudyGroup.getIacucProtocolStudyGroupId());
}
for(IacucProtocolStudyGroupLocation newIacucProtocolStudyGroupLocation : newIacucProtocolStudyGroup.getIacucProcedureLocationResponsibleList()) {
newIacucProtocolStudyGroupLocation.resetPersistenceState();
newIacucProtocolStudyGroupLocation.setIacucProtocolStudyGroupId(newIacucProtocolStudyGroup.getIacucProtocolStudyGroupId());
}
}
}
protected Object deepCopy(Object obj) {
if (obj instanceof Serializable) {
return ObjectUtils.deepCopy((Serializable) obj);
}
return obj;
}
/**
* This method is to get a map of list of protocol species
* Map protocol species to get the right protocol species for procedures during copy
* @param iacucProtocolSpeciesList
* @return
*/
private HashMap<String, IacucProtocolSpecies> getIacucProtocolSpeciesMapping(List<IacucProtocolSpecies> iacucProtocolSpeciesList) {
HashMap<String, IacucProtocolSpecies> protocolSpeciesList = new HashMap<String, IacucProtocolSpecies>();
for(IacucProtocolSpecies iacucProtocolSpecies : iacucProtocolSpeciesList) {
protocolSpeciesList.put(iacucProtocolSpecies.getGroupAndSpecies(), iacucProtocolSpecies);
}
return protocolSpeciesList;
}
public boolean isProcedureViewedBySpecies() {
String procedureViewModeParam = getProcedureViewModeParameter();
if(ObjectUtils.isNull(procedureViewModeParam)) {
procedureViewModeParam = PROCEDURE_VIEW_MODE_SPECIES;
}
return procedureViewModeParam.equals(PROCEDURE_VIEW_MODE_SPECIES);
}
protected String getProcedureViewModeParameter() {
return getParameterService().getParameterValueAsString(Constants.MODULE_NAMESPACE_IACUC, ParameterConstants.DOCUMENT_COMPONENT, PROCEDURE_VIEW_MODE);
}
public ParameterService getParameterService() {
return parameterService;
}
public void setParameterService(ParameterService parameterService) {
this.parameterService = parameterService;
}
}
| agpl-3.0 |
hospace/ToughRADIUS | toughradius/console/admin/__init__.py | 436 | #!/usr/bin/env python
# coding=utf-8
"""
管理模块列表,__all__中的模块将被加载
"""
__all__ = [
'param',
'passwd',
'ops',
'business',
'card',
'product',
'cmanager',
'issues',
'backup',
'node',
'opr',
'bas',
'roster',
'order',
'acceptlog',
'billing',
'member',
'account',
'online',
'ticket',
'online_stat',
'flow_stat',
'apiv1'
]
| agpl-3.0 |
Seldaiendil/meyeOS | eyeos/apps/register/lang/ro/ro.js | 415 | lang['Register'] = 'Înregistrare';
lang['Name'] = 'Numele';
lang['Surname: '] = 'Prenumele';
lang['Username'] = 'Username';
lang['Password'] = 'Parola';
lang['Repeat password'] = 'Repetă parola';
lang['Email'] = 'Email';
lang['Please fill in all fields.'] = 'Trebuie să completezi toate câmpurile.';
lang['Given passwords do not match, please try again'] = 'Parolele introduse nu se potrivesc, mai încearcă';
| agpl-3.0 |
witxo/bonos | modules/Bonos_Bonos/metadata/searchdefs.php | 1817 | <?php
$module_name = 'Bonos_Bonos';
$searchdefs [$module_name] =
array (
'layout' =>
array (
'basic_search' =>
array (
'bonos_bonos_accounts_name' =>
array (
'type' => 'relate',
'link' => true,
'label' => 'LBL_BONOS_BONOS_ACCOUNTS_FROM_ACCOUNTS_TITLE',
'id' => 'BONOS_BONOS_ACCOUNTSACCOUNTS_IDA',
'width' => '10%',
'default' => true,
'name' => 'bonos_bonos_accounts_name',
),
'fechacaducidad' =>
array (
'type' => 'date',
'label' => 'LBL_FECHACADUCIDAD',
'width' => '10%',
'default' => true,
'name' => 'fechacaducidad',
),
),
'advanced_search' =>
array (
'bonos_bonos_accounts_name' =>
array (
'type' => 'relate',
'link' => true,
'label' => 'LBL_BONOS_BONOS_ACCOUNTS_FROM_ACCOUNTS_TITLE',
'width' => '10%',
'default' => true,
'id' => 'BONOS_BONOS_ACCOUNTSACCOUNTS_IDA',
'name' => 'bonos_bonos_accounts_name',
),
'assigned_user_id' =>
array (
'name' => 'assigned_user_id',
'label' => 'LBL_ASSIGNED_TO',
'type' => 'enum',
'function' =>
array (
'name' => 'get_user_array',
'params' =>
array (
0 => false,
),
),
'default' => true,
'width' => '10%',
),
'date_entered' =>
array (
'type' => 'datetime',
'label' => 'LBL_DATE_ENTERED',
'width' => '10%',
'default' => true,
'name' => 'date_entered',
),
),
),
'templateMeta' =>
array (
'maxColumns' => '3',
'maxColumnsBasic' => '4',
'widths' =>
array (
'label' => '10',
'field' => '30',
),
),
);
?>
| agpl-3.0 |
geothomasp/kcmit | coeus-impl/src/main/java/org/kuali/coeus/common/questionnaire/impl/core/QuestionLookupAction.java | 6542 | /*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2015 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.coeus.common.questionnaire.impl.core;
import org.apache.commons.lang3.StringUtils;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import org.kuali.coeus.sys.framework.service.KcServiceLocator;
import org.kuali.kra.infrastructure.Constants;
import org.kuali.coeus.common.questionnaire.framework.question.Question;
import org.kuali.rice.kns.lookup.LookupResultsService;
import org.kuali.rice.kns.web.struts.action.KualiAction;
import org.kuali.rice.krad.bo.BusinessObject;
import org.kuali.rice.krad.util.GlobalVariables;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.Collection;
import java.util.Iterator;
/*
* This class is used for question look in Questionnaire maintenance
*/
public class QuestionLookupAction extends KualiAction {
private static final String PFP = "#f#";
private static final String PQP = "#q#";
private static final String SINGLE_LOOKUP = "singleLookup";
private static final String MULTI_LOOKUP = "multiLookup";
private static final String REPLACE_LOOKUP = "replaceLookup";
@Override
public ActionForward refresh(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response)
throws Exception {
ActionForward forward = super.refresh(mapping, form, request, response);
QuestionLookupForm questionLookupForm = (QuestionLookupForm) form;
String questions = Constants.EMPTY_STRING;
if (questionLookupForm.getLookupResultsBOClassName() != null && questionLookupForm.getLookupResultsSequenceNumber() != null) {
String lookupResultsSequenceNumber = questionLookupForm.getLookupResultsSequenceNumber();
@SuppressWarnings("unchecked")
Class<BusinessObject> lookupResultsBOClass = (Class<BusinessObject>) Class.forName(questionLookupForm.getLookupResultsBOClassName());
Collection<BusinessObject> rawValues = KcServiceLocator.getService(LookupResultsService.class)
.retrieveSelectedResultBOs(lookupResultsSequenceNumber, lookupResultsBOClass,
GlobalVariables.getUserSession().getPerson().getPrincipalId());
int idx = 0;
String idxString = StringUtils.substringBetween(questionLookupForm.getLookedUpCollectionName(), "[", "]");
if (StringUtils.isNotBlank(idxString)) {
idx = Integer.parseInt(idxString);
}
questionLookupForm.setSelectedQuestions(Constants.EMPTY_STRING);
for (Iterator iter = rawValues.iterator(); iter.hasNext();) {
Question question = (Question) iter.next();
String desc = question.getQuestion();
// need to deal with '"' in questio's description
// This '"' caused trouble for document.getElementById("selectedQuestions").value;
// It only getvalue up to '"', so not the whole string is returned
if (desc.indexOf("\"") > 0) {
desc = desc.replace("\"", """);
}
if (StringUtils.isBlank(questions)) {
questions = question.getId() + PFP + desc + PFP + question.getQuestionTypeId() + PFP
+ question.getSequenceNumber();
}
else {
questions = questions + PQP + question.getId() + PFP + desc + PFP
+ question.getQuestionTypeId() + PFP + question.getSequenceNumber();
}
questions = questions + PFP + getQuestionResponse(question);
}
questionLookupForm.setLookupResultsSequenceNumber(null);
}
questionLookupForm.setSelectedQuestions(questions);
if (questionLookupForm.getNodeIndex() >= 0) {
// when single lookup return, this refresh will be called too
forward = mapping.findForward(SINGLE_LOOKUP);
}
else if (questionLookupForm.getNodeIndex() == -2) {
forward = mapping.findForward(REPLACE_LOOKUP);
}
else {
forward = mapping.findForward(MULTI_LOOKUP);
}
return forward;
}
private String getQuestionResponse(Question question) {
String retString = "";
if (question.getQuestionTypeId().equals(new Integer(6))) {
String className = question.getLookupClass();
className = className.substring(className.lastIndexOf(".") + 1);
retString = className + PFP + question.getMaxAnswers() + PFP + question.getLookupReturn();
}
else {
retString = question.getDisplayedAnswers() + PFP + question.getMaxAnswers() + PFP + question.getAnswerMaxLength();
}
return retString;
}
@Override
public ActionForward execute(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response)
throws Exception {
ActionForward forward = super.execute(mapping, form, request, response);
String lookupType = request.getParameter("lookupType");
if (StringUtils.isNotBlank(lookupType)) {
if (lookupType.equals("single")) {
forward = mapping.findForward(SINGLE_LOOKUP);
}
else if (lookupType.equals("multivalue")) {
forward = mapping.findForward(MULTI_LOOKUP);
}
else if (lookupType.equals("replace")) {
forward = mapping.findForward(REPLACE_LOOKUP);
}
}
return forward;
}
}
| agpl-3.0 |
akva2/GoTools | gotools-core/src/utils/BoundingBox.C | 8001 | /*
* Copyright (C) 1998, 2000-2007, 2010, 2011, 2012, 2013 SINTEF ICT,
* Applied Mathematics, Norway.
*
* Contact information: E-mail: tor.dokken@sintef.no
* SINTEF ICT, Department of Applied Mathematics,
* P.O. Box 124 Blindern,
* 0314 Oslo, Norway.
*
* This file is part of GoTools.
*
* GoTools is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* GoTools is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with GoTools. If not, see
* <http://www.gnu.org/licenses/>.
*
* In accordance with Section 7(b) of the GNU Affero General Public
* License, a covered work must retain the producer line in every data
* file that is created or manipulated using GoTools.
*
* Other Usage
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the GoTools library without
* disclosing the source code of your own applications.
*
* This file may be used in accordance with the terms contained in a
* written agreement between you and SINTEF ICT.
*/
#include "GoTools/utils/BoundingBox.h"
#include <iostream>
using namespace Go;
using namespace std;
//===========================================================================
BoundingBox::~BoundingBox()
//===========================================================================
{
}
//===========================================================================
void BoundingBox::setFromPoints(const Point& low, const Point& high)
//===========================================================================
{
low_ = low;
high_ = high;
check();
}
//===========================================================================
void BoundingBox::read(std::istream& is)
//===========================================================================
{
ALWAYS_ERROR_IF(low_.dimension() == 0,
"Boundingbox has no set dimension yet - cannot read.");
is >> low_ >> high_;
check();
}
//===========================================================================
void BoundingBox::write(std::ostream& os) const
//===========================================================================
{
ALWAYS_ERROR_IF(!valid_, "Not initialized - cannot write.");
streamsize prev = os.precision(15);
os << low_ << endl << high_;
os.precision(prev); // Reset precision to it's previous value
}
//===========================================================================
bool BoundingBox::containsPoint(const Point& pt, double tol) const
//===========================================================================
{
ALWAYS_ERROR_IF(!valid_, "Not initialized - cannot call.");
for (int d = 0; d < low_.dimension(); ++d) {
if (pt[d] < low_[d] - tol)
return false;
if (pt[d] > high_[d] + tol)
return false;
}
return true;
}
//===========================================================================
bool BoundingBox::overlaps(const BoundingBox& box, double tol) const
//===========================================================================
{
double overlap;
return getOverlap(box, overlap, tol);
}
//===========================================================================
bool BoundingBox::getOverlap(const BoundingBox& box, double& overlap,
double tol) const
//===========================================================================
{
int kd;
double t2, t3;
overlap = 1.0e10; // Just a large number
Point other_high = box.high();
Point other_low = box.low();
for (kd=0; kd<low_.dimension(); kd++)
{
if (high_[kd] > other_high[kd])
{
t2 = low_[kd];
t3 = other_high[kd];
}
else
{
t2 = other_low[kd];
t3 = high_[kd];
}
overlap = std::min(overlap, t3-t2);
if (t3 < t2 - tol)
return false;
}
return true;
}
//===========================================================================
bool BoundingBox::containsBox(const BoundingBox& box, double tol) const
//===========================================================================
{
return (containsPoint(box.low(), tol) && containsPoint(box.high(), tol));
}
//===========================================================================
void BoundingBox::addUnionWith(const Point& pt)
//===========================================================================
{
ALWAYS_ERROR_IF (low_.dimension() != pt.dimension(),
"Dimension mismatch.");
if (!valid_) {
low_ = pt;
high_ = pt;
valid_ = true;
return;
}
for (int d = 0; d < low_.dimension(); ++d) {
if (pt[d] < low_[d])
low_[d] = pt[d];
else if (pt[d] > high_[d])
high_[d] = pt[d];
}
}
//===========================================================================
void BoundingBox::addUnionWith(const BoundingBox& box)
//===========================================================================
{
addUnionWith(box.low_);
addUnionWith(box.high_);
}
//===========================================================================
vector<Point> BoundingBox::lineIntersect(const Point& p1, const Point& dir) const
//===========================================================================
{
// For each box side, represent the side as an infite plane and intersect with
// the line. Check if the found point lies inside the box
vector<Point> result;
double tol = 1.0e-6;
// Bottom
Point norm(0.0, 0.0, -1.0);
double div = dir*norm;
double t;
if (fabs(div) > tol)
{
t = (low_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
// Top
norm.setValue(0.0, 0.0, 1.0);
div = dir*norm;
if (fabs(div) > tol)
{
t = (high_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
// Left side
norm.setValue(-1.0, 0.0, 0.0);
div = dir*norm;
if (fabs(div) > tol)
{
t = (low_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
// Right side
norm.setValue(1.0, 0.0, 0.0);
div = dir*norm;
if (fabs(div) > tol)
{
t = (high_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
// Front side
norm.setValue(0.0, -1.0, 0.0);
div = dir*norm;
if (fabs(div) > tol)
{
t = (low_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
// Back side
norm.setValue(0.0, 1.0, 0.0);
div = dir*norm;
if (fabs(div) > tol)
{
t = (high_ - p1)*norm/div;
if (t >= 0.0)
{
Point pos = p1 + t*dir;
if (containsPoint(pos, tol))
result.push_back(pos);
}
}
return result;
}
//===========================================================================
void BoundingBox::check() const
//===========================================================================
{
int n = low_.dimension();
if (high_.dimension() != n) {
THROW("Dimension mismatch.");
}
for (int i = 0; i < n; ++i) {
if (low_[i] > high_[i]) {
THROW("Low point is higher than high point.");
}
}
valid_ = true;
}
| agpl-3.0 |
ColostateResearchServices/kc | coeus-it/src/test/java/org/kuali/kra/test/StateServiceTest.java | 2356 | /*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2016 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.kra.test;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.kuali.coeus.sys.framework.service.KcServiceLocator;
import org.kuali.kra.test.infrastructure.KcIntegrationTestBase;
import org.kuali.rice.location.api.state.State;
import org.kuali.rice.location.api.state.StateService;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class StateServiceTest extends KcIntegrationTestBase {
private StateService stateService;
private static final String POSTAL_CNTRY_CD_UNITED_STATES = "US";
private static final String ALT_POSTAL_CNTRY_CD_UNITED_STATES = "USA";
@Before
public void setUp() throws Exception {
stateService = KcServiceLocator.getService(StateService.class);
}
@After
public void tearDown() throws Exception {
stateService = null;
}
@Test
public void testFindAllStatesByAltCountryCode() throws Exception {
List<State> states = stateService.findAllStatesInCountryByAltCode(ALT_POSTAL_CNTRY_CD_UNITED_STATES);
List<State> statesForComparison = stateService.findAllStatesInCountry(POSTAL_CNTRY_CD_UNITED_STATES);
assertNotNull(states);
assertNotNull(statesForComparison);
assertEquals(states.size(), statesForComparison.size());
int i = 0;
for(State state : states) {
assertEquals(state.getName(), statesForComparison.get(i).getName());
i++;
}
}
}
| agpl-3.0 |
Godin/checkstyle | src/main/java/com/puppycrawl/tools/checkstyle/checks/whitespace/OperatorWrapCheck.java | 8550 | ////////////////////////////////////////////////////////////////////////////////
// checkstyle: Checks Java source code for adherence to a set of rules.
// Copyright (C) 2001-2015 the original author or authors.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
////////////////////////////////////////////////////////////////////////////////
package com.puppycrawl.tools.checkstyle.checks.whitespace;
import org.apache.commons.lang3.StringUtils;
import com.puppycrawl.tools.checkstyle.Utils;
import com.puppycrawl.tools.checkstyle.api.DetailAST;
import com.puppycrawl.tools.checkstyle.api.TokenTypes;
import com.puppycrawl.tools.checkstyle.checks.AbstractOptionCheck;
/**
* <p>
* Checks line wrapping for operators.
* The policy to verify is specified using the {@link WrapOption} class
* and defaults to {@link WrapOption#NL}.
* </p>
* <p> By default the check will check the following operators:
* {@link TokenTypes#BAND BAND},
* {@link TokenTypes#BOR BOR},
* {@link TokenTypes#BSR BSR},
* {@link TokenTypes#BXOR BXOR},
* {@link TokenTypes#COLON COLON},
* {@link TokenTypes#DIV DIV},
* {@link TokenTypes#EQUAL EQUAL},
* {@link TokenTypes#GE GE},
* {@link TokenTypes#GT GT},
* {@link TokenTypes#LAND LAND},
* {@link TokenTypes#LE LE},
* {@link TokenTypes#LITERAL_INSTANCEOF LITERAL_INSTANCEOF},
* {@link TokenTypes#LOR LOR},
* {@link TokenTypes#LT LT},
* {@link TokenTypes#MINUS MINUS},
* {@link TokenTypes#MOD MOD},
* {@link TokenTypes#NOT_EQUAL NOT_EQUAL},
* {@link TokenTypes#PLUS PLUS},
* {@link TokenTypes#QUESTION QUESTION},
* {@link TokenTypes#SL SL},
* {@link TokenTypes#SR SR},
* {@link TokenTypes#STAR STAR}.
* Other acceptable tokens are
* {@link TokenTypes#ASSIGN ASSIGN},
* {@link TokenTypes#BAND_ASSIGN BAND_ASSIGN},
* {@link TokenTypes#BOR_ASSIGN BOR_ASSIGN},
* {@link TokenTypes#BSR_ASSIGN BSR_ASSIGN},
* {@link TokenTypes#BXOR_ASSIGN BXOR_ASSIGN},
* {@link TokenTypes#DIV_ASSIGN DIV_ASSIGN},
* {@link TokenTypes#MINUS_ASSIGN MINUS_ASSIGN},
* {@link TokenTypes#MOD_ASSIGN MOD_ASSIGN},
* {@link TokenTypes#PLUS_ASSIGN PLUS_ASSIGN},
* {@link TokenTypes#SL_ASSIGN SL_ASSIGN},
* {@link TokenTypes#SR_ASSIGN SR_ASSIGN},
* {@link TokenTypes#STAR_ASSIGN STAR_ASSIGN}.
* </p>
* <p>
* An example of how to configure the check is:
* </p>
* <pre>
* <module name="OperatorWrap"/>
* </pre>
* <p> An example of how to configure the check for assignment operators at the
* end of a line is:
* </p>
* <pre>
* <module name="OperatorWrap">
* <property name="tokens"
* value="ASSIGN,DIV_ASSIGN,PLUS_ASSIGN,MINUS_ASSIGN,STAR_ASSIGN,MOD_ASSIGN,SR_ASSIGN,BSR_ASSIGN,SL_ASSIGN,BXOR_ASSIGN,BOR_ASSIGN,BAND_ASSIGN"/>
* <property name="option" value="eol"/>
* </module>
* </pre>
*
* @author Rick Giles
*/
public class OperatorWrapCheck
extends AbstractOptionCheck<WrapOption> {
/**
* A key is pointing to the warning message text in "messages.properties"
* file.
*/
public static final String LINE_NEW = "line.new";
/**
* A key is pointing to the warning message text in "messages.properties"
* file.
*/
public static final String LINE_PREVIOUS = "line.previous";
/**
* Sets the operator wrap option to new line.
*/
public OperatorWrapCheck() {
super(WrapOption.NL, WrapOption.class);
}
@Override
public int[] getDefaultTokens() {
return new int[] {
TokenTypes.QUESTION, // '?'
TokenTypes.COLON, // ':' (not reported for a case)
TokenTypes.EQUAL, // "=="
TokenTypes.NOT_EQUAL, // "!="
TokenTypes.DIV, // '/'
TokenTypes.PLUS, //' +' (unary plus is UNARY_PLUS)
TokenTypes.MINUS, // '-' (unary minus is UNARY_MINUS)
TokenTypes.STAR, // '*'
TokenTypes.MOD, // '%'
TokenTypes.SR, // ">>"
TokenTypes.BSR, // ">>>"
TokenTypes.GE, // ">="
TokenTypes.GT, // ">"
TokenTypes.SL, // "<<"
TokenTypes.LE, // "<="
TokenTypes.LT, // '<'
TokenTypes.BXOR, // '^'
TokenTypes.BOR, // '|'
TokenTypes.LOR, // "||"
TokenTypes.BAND, // '&'
TokenTypes.LAND, // "&&"
TokenTypes.TYPE_EXTENSION_AND,
TokenTypes.LITERAL_INSTANCEOF,
};
}
@Override
public int[] getAcceptableTokens() {
return new int[] {
TokenTypes.QUESTION, // '?'
TokenTypes.COLON, // ':' (not reported for a case)
TokenTypes.EQUAL, // "=="
TokenTypes.NOT_EQUAL, // "!="
TokenTypes.DIV, // '/'
TokenTypes.PLUS, //' +' (unary plus is UNARY_PLUS)
TokenTypes.MINUS, // '-' (unary minus is UNARY_MINUS)
TokenTypes.STAR, // '*'
TokenTypes.MOD, // '%'
TokenTypes.SR, // ">>"
TokenTypes.BSR, // ">>>"
TokenTypes.GE, // ">="
TokenTypes.GT, // ">"
TokenTypes.SL, // "<<"
TokenTypes.LE, // "<="
TokenTypes.LT, // '<'
TokenTypes.BXOR, // '^'
TokenTypes.BOR, // '|'
TokenTypes.LOR, // "||"
TokenTypes.BAND, // '&'
TokenTypes.LAND, // "&&"
TokenTypes.LITERAL_INSTANCEOF,
TokenTypes.TYPE_EXTENSION_AND,
TokenTypes.ASSIGN, // '='
TokenTypes.DIV_ASSIGN, // "/="
TokenTypes.PLUS_ASSIGN, // "+="
TokenTypes.MINUS_ASSIGN, //"-="
TokenTypes.STAR_ASSIGN, // "*="
TokenTypes.MOD_ASSIGN, // "%="
TokenTypes.SR_ASSIGN, // ">>="
TokenTypes.BSR_ASSIGN, // ">>>="
TokenTypes.SL_ASSIGN, // "<<="
TokenTypes.BXOR_ASSIGN, // "^="
TokenTypes.BOR_ASSIGN, // "|="
TokenTypes.BAND_ASSIGN, // "&="
};
}
@Override
public void visitToken(DetailAST ast) {
if (ast.getType() == TokenTypes.COLON) {
final DetailAST parent = ast.getParent();
if (parent.getType() == TokenTypes.LITERAL_DEFAULT
|| parent.getType() == TokenTypes.LITERAL_CASE) {
//we do not want to check colon for cases and defaults
return;
}
}
final WrapOption wOp = getAbstractOption();
final String text = ast.getText();
final int colNo = ast.getColumnNo();
final int lineNo = ast.getLineNo();
final String currentLine = getLine(lineNo - 1);
// Check if rest of line is whitespace, and not just the operator
// by itself. This last bit is to handle the operator on a line by
// itself.
if (wOp == WrapOption.NL
&& !text.equals(currentLine.trim())
&& StringUtils.isBlank(currentLine.substring(colNo + text.length()))) {
log(lineNo, colNo, LINE_NEW, text);
}
else if (wOp == WrapOption.EOL
&& Utils.whitespaceBefore(colNo - 1, currentLine)) {
log(lineNo, colNo, LINE_PREVIOUS, text);
}
}
}
| lgpl-2.1 |
rajsingh8220/abixen-platform | abixen-platform-core/src/main/java/com/abixen/platform/core/util/RoleBuilder.java | 875 | /**
* Copyright (c) 2010-present Abixen Systems. All rights reserved.
*
* This library is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 2.1 of the License, or (at your option)
* any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*/
package com.abixen.platform.core.util;
import com.abixen.platform.common.model.enumtype.RoleType;
import com.abixen.platform.core.model.impl.Role;
public interface RoleBuilder {
Role build();
RoleBuilder name(String name);
RoleBuilder type(RoleType roleType);
}
| lgpl-2.1 |
plast-lab/soot | src/main/generated/sablecc/soot/jimple/parser/node/ALengthofUnop.java | 1900 | /* This file was generated by SableCC (http://www.sablecc.org/). */
package soot.jimple.parser.node;
import soot.jimple.parser.analysis.*;
@SuppressWarnings("nls")
public final class ALengthofUnop extends PUnop
{
private TLengthof _lengthof_;
public ALengthofUnop()
{
// Constructor
}
public ALengthofUnop(
@SuppressWarnings("hiding") TLengthof _lengthof_)
{
// Constructor
setLengthof(_lengthof_);
}
@Override
public Object clone()
{
return new ALengthofUnop(
cloneNode(this._lengthof_));
}
@Override
public void apply(Switch sw)
{
((Analysis) sw).caseALengthofUnop(this);
}
public TLengthof getLengthof()
{
return this._lengthof_;
}
public void setLengthof(TLengthof node)
{
if(this._lengthof_ != null)
{
this._lengthof_.parent(null);
}
if(node != null)
{
if(node.parent() != null)
{
node.parent().removeChild(node);
}
node.parent(this);
}
this._lengthof_ = node;
}
@Override
public String toString()
{
return ""
+ toString(this._lengthof_);
}
@Override
void removeChild(@SuppressWarnings("unused") Node child)
{
// Remove child
if(this._lengthof_ == child)
{
this._lengthof_ = null;
return;
}
throw new RuntimeException("Not a child.");
}
@Override
void replaceChild(@SuppressWarnings("unused") Node oldChild, @SuppressWarnings("unused") Node newChild)
{
// Replace child
if(this._lengthof_ == oldChild)
{
setLengthof((TLengthof) newChild);
return;
}
throw new RuntimeException("Not a child.");
}
}
| lgpl-2.1 |
golovnin/wildfly | messaging-activemq/src/main/java/org/wildfly/extension/messaging/activemq/BroadcastGroupWriteAttributeHandler.java | 2346 | /*
* JBoss, Home of Professional Open Source.
* Copyright 2011, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.wildfly.extension.messaging.activemq;
import org.jboss.as.controller.OperationContext;
import org.jboss.as.controller.OperationFailedException;
import org.jboss.as.controller.ReloadRequiredWriteAttributeHandler;
import org.jboss.as.controller.registry.Resource;
import org.jboss.dmr.ModelNode;
/**
* Write attribute handler for attributes that update a broadcast group resource.
*
* @author Brian Stansberry (c) 2011 Red Hat Inc.
*/
public class BroadcastGroupWriteAttributeHandler extends ReloadRequiredWriteAttributeHandler {
public static final BroadcastGroupWriteAttributeHandler INSTANCE = new BroadcastGroupWriteAttributeHandler();
private BroadcastGroupWriteAttributeHandler() {
super(BroadcastGroupDefinition.ATTRIBUTES);
}
@Override
protected void finishModelStage(final OperationContext context,final ModelNode operation,final String attributeName,final ModelNode newValue,
final ModelNode oldValue,final Resource model) throws OperationFailedException {
if(attributeName.equals(BroadcastGroupDefinition.CONNECTOR_REFS.getName())){
BroadcastGroupDefinition.validateConnectors(context, operation, newValue);
}
super.finishModelStage(context, operation, attributeName, newValue, oldValue, model);
}
}
| lgpl-2.1 |
luck3y/wildfly-core | elytron/src/main/java/org/wildfly/extension/elytron/VirtualSecurityDomainProcessor.java | 2692 | /*
* Copyright 2019 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wildfly.extension.elytron;
import static org.jboss.as.server.security.VirtualDomainMarkerUtility.isVirtualDomainRequired;
import static org.jboss.as.server.security.VirtualDomainMarkerUtility.virtualDomainName;
import java.util.function.Consumer;
import org.jboss.as.server.deployment.DeploymentPhaseContext;
import org.jboss.as.server.deployment.DeploymentUnit;
import org.jboss.as.server.deployment.DeploymentUnitProcessingException;
import org.jboss.as.server.deployment.DeploymentUnitProcessor;
import org.jboss.msc.Service;
import org.jboss.msc.service.ServiceBuilder;
import org.jboss.msc.service.ServiceController.Mode;
import org.jboss.msc.service.ServiceName;
import org.jboss.msc.service.ServiceTarget;
import org.wildfly.security.auth.server.SecurityDomain;
/**
* A {@link DeploymentUnitProcessor} to install a virtual {@link SecurityDomain} if required.
*
* @author <a href="mailto:darran.lofthouse@jboss.com">Darran Lofthouse</a>
*/
class VirtualSecurityDomainProcessor implements DeploymentUnitProcessor {
@Override
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
if (deploymentUnit.getParent() != null || !isVirtualDomainRequired(deploymentUnit)) {
return; // Only interested in installation if this is really the root deployment.
}
ServiceName virtualDomainName = virtualDomainName(deploymentUnit);
ServiceTarget serviceTarget = phaseContext.getServiceTarget();
ServiceBuilder<?> serviceBuilder = serviceTarget.addService(virtualDomainName);
final SecurityDomain virtualDomain = SecurityDomain.builder().build();
final Consumer<SecurityDomain> consumer = serviceBuilder.provides(virtualDomainName);
serviceBuilder.setInstance(Service.newInstance(consumer, virtualDomain));
serviceBuilder.setInitialMode(Mode.ON_DEMAND);
serviceBuilder.install();
}
@Override
public void undeploy(DeploymentUnit deploymentUnit) {}
}
| lgpl-2.1 |
JoeCarlson/intermine | intermine/api/main/src/org/intermine/api/bag/operations/RelativeComplement.java | 3099 | package org.intermine.api.bag.operations;
/*
* Copyright (C) 2002-2016 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import static java.util.Arrays.asList;
import static org.intermine.metadata.DescriptorUtils.findIntersectionType;
import static org.intermine.metadata.DescriptorUtils.findSumType;
import java.util.Collection;
import org.intermine.api.profile.InterMineBag;
import org.intermine.api.profile.Profile;
import org.intermine.metadata.ClassDescriptor;
import org.intermine.metadata.MetaDataException;
import org.intermine.metadata.Model;
import org.intermine.objectstore.query.ObjectStoreBagCombination;
/**
*
* @author Alex
*
*/
public class RelativeComplement extends BagOperation
{
private Collection<InterMineBag> excluded;
/**
* @param model data model
* @param profile userprofile
* @param froms base lists to use in operation
* @param exclude lists that contain objects to exclude from product list
*/
public RelativeComplement(
Model model, Profile profile, Collection<InterMineBag> froms,
Collection<InterMineBag> exclude) {
super(model, profile, froms);
this.excluded = exclude;
}
@Override
public String getNewBagType() throws IncompatibleTypes {
try {
ClassDescriptor leftType = findSumType(getClasses());
// We have to check these individually, because of multiple inheritance on the
// left. We could, for example have Employees on the left, and subtract Things and
// HasAddresses from them, even though there is no common type of Thing and
// HasAddress.
for (InterMineBag bag: excluded) {
// Just check that it makes sense to subtract the rights from the lefts,
// ie. that there is some kind of common type here at all.
findIntersectionType(asList(leftType,
model.getClassDescriptorByName(bag.getType())));
}
// But in all cases, the final type is the left type.
return leftType.getUnqualifiedName();
} catch (MetaDataException e) {
throw new IncompatibleTypes(e);
}
}
@Override
protected ObjectStoreBagCombination combineBags() {
ObjectStoreBagCombination leftUnion =
new ObjectStoreBagCombination(ObjectStoreBagCombination.UNION);
for (InterMineBag bag : getBags()) {
leftUnion.addBag(bag.getOsb());
}
ObjectStoreBagCombination osbc = new ObjectStoreBagCombination(getOperationCode());
osbc.addBagCombination(leftUnion);
for (InterMineBag bag : excluded) {
osbc.addBag(bag.getOsb());
}
return osbc;
}
@Override
protected int getOperationCode() {
return ObjectStoreBagCombination.EXCEPT;
}
}
| lgpl-2.1 |
xph906/SootNew | src/soot/jimple/toolkits/infoflow/AbstractDataSource.java | 1746 | package soot.jimple.toolkits.infoflow;
import soot.*;
import soot.util.*;
import java.util.*;
// Wraps any object as a Value
public class AbstractDataSource implements Value
{
Object sourcename;
public AbstractDataSource(Object sourcename)
{
this.sourcename = sourcename;
}
@Override
public List<ValueBox> getUseBoxes()
{
return Collections.emptyList();
}
/** Clones the object. Not implemented here. */
public Object clone()
{
return new AbstractDataSource(sourcename);
}
/** Returns true if this object is structurally equivalent to c.
* AbstractDataSources are equal and equivalent if their sourcename is the same */
public boolean equivTo(Object c)
{
if(sourcename instanceof Value)
return (c instanceof AbstractDataSource && ((Value) sourcename).equivTo( ((AbstractDataSource)c).sourcename ));
return (c instanceof AbstractDataSource && ((AbstractDataSource)c).sourcename.equals(sourcename));
}
public boolean equals(Object c)
{
return (c instanceof AbstractDataSource && ((AbstractDataSource)c).sourcename.equals(sourcename));
}
/** Returns a hash code consistent with structural equality for this object. */
public int equivHashCode()
{
if(sourcename instanceof Value)
return ((Value) sourcename).equivHashCode();
return sourcename.hashCode();
}
public void toString( UnitPrinter up ) {}
public Type getType()
{
return NullType.v();
}
public void apply(Switch sw)
{
throw new RuntimeException("Not Implemented");
}
public String toString()
{
return "sourceof<" + sourcename.toString() + ">";
}
}
| lgpl-2.1 |
fevangelista/psi4 | psi4/src/psi4/libiwl/buf_fetch.cc | 2165 | /*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2021 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/
/*!
\file
\ingroup IWL
*/
#include <cstdio>
#include "psi4/libpsio/psio.h"
#include "iwl.h"
#include "iwl.hpp"
namespace psi {
void IWL::fetch() {
psio_->read(itap_, IWL_KEY_BUF, (char *)&(lastbuf_), sizeof(int), bufpos_, &bufpos_);
psio_->read(itap_, IWL_KEY_BUF, (char *)&(inbuf_), sizeof(int), bufpos_, &bufpos_);
psio_->read(itap_, IWL_KEY_BUF, (char *)labels_, ints_per_buf_ * 4 * sizeof(Label), bufpos_, &bufpos_);
psio_->read(itap_, IWL_KEY_BUF, (char *)values_, ints_per_buf_ * sizeof(Value), bufpos_, &bufpos_);
idx_ = 0;
}
/*!
** iwl_buf_fetch()
**
** Fetch an IWL buffer from disk
** David Sherrill, 26 June 1996
** \ingroup IWL
*/
void PSI_API iwl_buf_fetch(struct iwlbuf *Buf) {
psio_read(Buf->itap, IWL_KEY_BUF, (char *)&(Buf->lastbuf), sizeof(int), Buf->bufpos, &Buf->bufpos);
psio_read(Buf->itap, IWL_KEY_BUF, (char *)&(Buf->inbuf), sizeof(int), Buf->bufpos, &Buf->bufpos);
psio_read(Buf->itap, IWL_KEY_BUF, (char *)Buf->labels, Buf->ints_per_buf * 4 * sizeof(Label), Buf->bufpos,
&Buf->bufpos);
psio_read(Buf->itap, IWL_KEY_BUF, (char *)Buf->values, Buf->ints_per_buf * sizeof(Value), Buf->bufpos,
&Buf->bufpos);
Buf->idx = 0;
}
}
| lgpl-3.0 |
fommil/matrix-toolkits-java | src/main/java/no/uib/cipr/matrix/LowerSymmDenseMatrix.java | 2993 | /*
* Copyright (C) 2003-2006 Bjørn-Ove Heimsund
*
* This file is part of MTJ.
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation; either version 2.1 of the License, or (at your
* option) any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package no.uib.cipr.matrix;
/**
* Lower symmetric dense matrix. It has the same storage layout as the
* {@link no.uib.cipr.matrix.DenseMatrix DenseMatrix}, but only refers to
* elements below or on the main diagonal. The remaining elements are never
* accessed nor changed, and is known only by symmetry.
*/
public class LowerSymmDenseMatrix extends AbstractSymmDenseMatrix {
/**
* Constructor for LowerSymmDenseMatrix
*
* @param n
* Size of the matrix. Since the matrix must be square, this
* equals both the number of rows and columns
*/
public LowerSymmDenseMatrix(int n) {
super(n, UpLo.Lower);
}
/**
* Constructor for LowerSymmDenseMatrix
*
* @param A
* Matrix to copy. It must be a square matrix, and only the lower
* triangular part is copied
*/
public LowerSymmDenseMatrix(Matrix A) {
this(A, true);
}
/**
* Constructor for LowerSymmDenseMatrix
*
* @param A
* Matrix to copy. It must be a square matrix, and only the lower
* triangular part is copied
* @param deep
* If false, a shallow copy is made. In that case, <code>A</code>
* must be a dense matrix
*/
public LowerSymmDenseMatrix(Matrix A, boolean deep) {
super(A, deep, UpLo.Lower);
}
@Override
public void add(int row, int column, double value) {
if (column <= row)
super.add(row, column, value);
}
@Override
public double get(int row, int column) {
if (column > row)
return super.get(column, row);
return super.get(row, column);
}
@Override
public void set(int row, int column, double value) {
if (column <= row)
super.set(row, column, value);
}
@Override
public LowerSymmDenseMatrix copy() {
return new LowerSymmDenseMatrix(this);
}
@Override
void copy(Matrix A) {
for (MatrixEntry e : A)
if (e.row() >= e.column())
set(e.row(), e.column(), e.get());
}
}
| lgpl-3.0 |
tibnor/acado | examples/multi_objective/catalyst_mixing_ennc.cpp | 4223 | /*
* This file is part of ACADO Toolkit.
*
* ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
* Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
* Milan Vukov, Rien Quirynen, KU Leuven.
* Developed within the Optimization in Engineering Center (OPTEC)
* under supervision of Moritz Diehl. All rights reserved.
*
* ACADO Toolkit is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* ACADO Toolkit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with ACADO Toolkit; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/**
* \file examples/multi_objective/catalyst_mixing_ennc.cpp
* \author Filip Logist, Boris Houska, Hans Joachim Ferreau
* \date 2009
*
* Objectives:
* Maximize desired product
* Minimize catalyst A
*
* Reference:
* Adapted from
* Gunn and W.J. Thomas, 1965.
* Mass transport and chemical reaction in multifunctional catalysts.
* Chem. Eng. Sci. 20, 89.
*
*/
// IMPLEMENTATION:
// ---------------
#include <acado_optimal_control.hpp>
#include <acado_gnuplot.hpp>
/* >>> start tutorial code >>> */
int main( ){
USING_NAMESPACE_ACADO
// INTRODUCE THE VARIABLES:
// -------------------------
DifferentialState x1,x2,x3;
Control u;
DifferentialEquation f(0.0,1.0);
// DEFINE A DIFFERENTIAL EQUATION:
// -------------------------------
f << dot(x1) == -u*(x1-10.0*x2);
f << dot(x2) == u*(x1-10.0*x2)-(1.0-u)*x2;
f << dot(x3) == u/10.0;
// DEFINE AN OPTIMAL CONTROL PROBLEM:
// ----------------------------------
OCP ocp(0.0,1.0,25);
ocp.minimizeMayerTerm( 0, -(1.0-x1-x2));
ocp.minimizeMayerTerm( 1, x3 );
ocp.subjectTo( f );
ocp.subjectTo( AT_START, x1 == 1.0 );
ocp.subjectTo( AT_START, x2 == 0.0 );
ocp.subjectTo( AT_START, x3 == 0.0 );
ocp.subjectTo( 0.0 <= x1 <= 1.0 );
ocp.subjectTo( 0.0 <= x2 <= 1.0 );
ocp.subjectTo( 0.0 <= x3 <= 1.0 );
ocp.subjectTo( 0.0 <= u <= 1.0 );
// DEFINE A MULTI-OBJECTIVE ALGORITHM AND SOLVE THE OCP:
// -----------------------------------------------------
MultiObjectiveAlgorithm algorithm(ocp);
algorithm.set( PARETO_FRONT_GENERATION , PFG_ENHANCED_NORMALIZED_NORMAL_CONSTRAINT );
algorithm.set( PARETO_FRONT_DISCRETIZATION, 11 );
algorithm.set( HESSIAN_APPROXIMATION, EXACT_HESSIAN );
//algorithm.set( PARETO_FRONT_HOTSTART, BT_FALSE );
//algorithm.set( DISCRETIZATION_TYPE, SINGLE_SHOOTING );
// Minimize individual objective function
algorithm.solveSingleObjective(0);
// Minimize individual objective function
algorithm.solveSingleObjective(1);
// Generate Pareto set
algorithm.solve();
algorithm.getWeights("catatlyst_mixing_ennc_weights.txt");
algorithm.getAllDifferentialStates("catalyst_mixing_ennc_states.txt");
algorithm.getAllControls("catalyst_mixing_ennc_controls.txt");
// GET THE RESULT FOR THE PARETO FRONT AND PLOT IT:
// ------------------------------------------------
VariablesGrid paretoFront;
algorithm.getParetoFront( paretoFront );
GnuplotWindow window1;
window1.addSubplot( paretoFront, "Pareto Front", "Conversion","Catalyst", PM_POINTS );
window1.plot( );
// PRINT INFORMATION ABOUT THE ALGORITHM:
// --------------------------------------
algorithm.printInfo();
// SAVE INFORMATION:
// -----------------
paretoFront.print( "catalyst_mixing_ennc_pareto.txt" );
return 0;
}
/* <<< end tutorial code <<< */
| lgpl-3.0 |
ethersphere/go-ethereum | vendor/github.com/ethereum/go-ethereum/p2p/enode/localnode.go | 7467 | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"crypto/ecdsa"
"fmt"
"net"
"reflect"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
const (
// IP tracker configuration
iptrackMinStatements = 10
iptrackWindow = 5 * time.Minute
iptrackContactWindow = 10 * time.Minute
)
// LocalNode produces the signed node record of a local node, i.e. a node run in the
// current process. Setting ENR entries via the Set method updates the record. A new version
// of the record is signed on demand when the Node method is called.
type LocalNode struct {
cur atomic.Value // holds a non-nil node pointer while the record is up-to-date.
id ID
key *ecdsa.PrivateKey
db *DB
// everything below is protected by a lock
mu sync.Mutex
seq uint64
entries map[string]enr.Entry
endpoint4 lnEndpoint
endpoint6 lnEndpoint
}
type lnEndpoint struct {
track *netutil.IPTracker
staticIP, fallbackIP net.IP
fallbackUDP int
}
// NewLocalNode creates a local node.
func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
ln := &LocalNode{
id: PubkeyToIDV4(&key.PublicKey),
db: db,
key: key,
entries: make(map[string]enr.Entry),
endpoint4: lnEndpoint{
track: netutil.NewIPTracker(iptrackWindow, iptrackContactWindow, iptrackMinStatements),
},
endpoint6: lnEndpoint{
track: netutil.NewIPTracker(iptrackWindow, iptrackContactWindow, iptrackMinStatements),
},
}
ln.seq = db.localSeq(ln.id)
ln.invalidate()
return ln
}
// Database returns the node database associated with the local node.
func (ln *LocalNode) Database() *DB {
return ln.db
}
// Node returns the current version of the local node record.
func (ln *LocalNode) Node() *Node {
n := ln.cur.Load().(*Node)
if n != nil {
return n
}
// Record was invalidated, sign a new copy.
ln.mu.Lock()
defer ln.mu.Unlock()
ln.sign()
return ln.cur.Load().(*Node)
}
// Seq returns the current sequence number of the local node record.
func (ln *LocalNode) Seq() uint64 {
ln.mu.Lock()
defer ln.mu.Unlock()
return ln.seq
}
// ID returns the local node ID.
func (ln *LocalNode) ID() ID {
return ln.id
}
// Set puts the given entry into the local record, overwriting any existing value.
// Use Set*IP and SetFallbackUDP to set IP addresses and UDP port, otherwise they'll
// be overwritten by the endpoint predictor.
func (ln *LocalNode) Set(e enr.Entry) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.set(e)
}
func (ln *LocalNode) set(e enr.Entry) {
val, exists := ln.entries[e.ENRKey()]
if !exists || !reflect.DeepEqual(val, e) {
ln.entries[e.ENRKey()] = e
ln.invalidate()
}
}
// Delete removes the given entry from the local record.
func (ln *LocalNode) Delete(e enr.Entry) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.delete(e)
}
func (ln *LocalNode) delete(e enr.Entry) {
_, exists := ln.entries[e.ENRKey()]
if exists {
delete(ln.entries, e.ENRKey())
ln.invalidate()
}
}
func (ln *LocalNode) endpointForIP(ip net.IP) *lnEndpoint {
if ip.To4() != nil {
return &ln.endpoint4
}
return &ln.endpoint6
}
// SetStaticIP sets the local IP to the given one unconditionally.
// This disables endpoint prediction.
func (ln *LocalNode) SetStaticIP(ip net.IP) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.endpointForIP(ip).staticIP = ip
ln.updateEndpoints()
}
// SetFallbackIP sets the last-resort IP address. This address is used
// if no endpoint prediction can be made and no static IP is set.
func (ln *LocalNode) SetFallbackIP(ip net.IP) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.endpointForIP(ip).fallbackIP = ip
ln.updateEndpoints()
}
// SetFallbackUDP sets the last-resort UDP-on-IPv4 port. This port is used
// if no endpoint prediction can be made.
func (ln *LocalNode) SetFallbackUDP(port int) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.endpoint4.fallbackUDP = port
ln.endpoint6.fallbackUDP = port
ln.updateEndpoints()
}
// UDPEndpointStatement should be called whenever a statement about the local node's
// UDP endpoint is received. It feeds the local endpoint predictor.
func (ln *LocalNode) UDPEndpointStatement(fromaddr, endpoint *net.UDPAddr) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.endpointForIP(endpoint.IP).track.AddStatement(fromaddr.String(), endpoint.String())
ln.updateEndpoints()
}
// UDPContact should be called whenever the local node has announced itself to another node
// via UDP. It feeds the local endpoint predictor.
func (ln *LocalNode) UDPContact(toaddr *net.UDPAddr) {
ln.mu.Lock()
defer ln.mu.Unlock()
ln.endpointForIP(toaddr.IP).track.AddContact(toaddr.String())
ln.updateEndpoints()
}
// updateEndpoints updates the record with predicted endpoints.
func (ln *LocalNode) updateEndpoints() {
ip4, udp4 := ln.endpoint4.get()
ip6, udp6 := ln.endpoint6.get()
if ip4 != nil && !ip4.IsUnspecified() {
ln.set(enr.IPv4(ip4))
} else {
ln.delete(enr.IPv4{})
}
if ip6 != nil && !ip6.IsUnspecified() {
ln.set(enr.IPv6(ip6))
} else {
ln.delete(enr.IPv6{})
}
if udp4 != 0 {
ln.set(enr.UDP(udp4))
} else {
ln.delete(enr.UDP(0))
}
if udp6 != 0 && udp6 != udp4 {
ln.set(enr.UDP6(udp6))
} else {
ln.delete(enr.UDP6(0))
}
}
// get returns the endpoint with highest precedence.
func (e *lnEndpoint) get() (newIP net.IP, newPort int) {
newPort = e.fallbackUDP
if e.fallbackIP != nil {
newIP = e.fallbackIP
}
if e.staticIP != nil {
newIP = e.staticIP
} else if ip, port := predictAddr(e.track); ip != nil {
newIP = ip
newPort = port
}
return newIP, newPort
}
// predictAddr wraps IPTracker.PredictEndpoint, converting from its string-based
// endpoint representation to IP and port types.
func predictAddr(t *netutil.IPTracker) (net.IP, int) {
ep := t.PredictEndpoint()
if ep == "" {
return nil, 0
}
ipString, portString, _ := net.SplitHostPort(ep)
ip := net.ParseIP(ipString)
port, _ := strconv.Atoi(portString)
return ip, port
}
func (ln *LocalNode) invalidate() {
ln.cur.Store((*Node)(nil))
}
func (ln *LocalNode) sign() {
if n := ln.cur.Load().(*Node); n != nil {
return // no changes
}
var r enr.Record
for _, e := range ln.entries {
r.Set(e)
}
ln.bumpSeq()
r.SetSeq(ln.seq)
if err := SignV4(&r, ln.key); err != nil {
panic(fmt.Errorf("enode: can't sign record: %v", err))
}
n, err := New(ValidSchemes, &r)
if err != nil {
panic(fmt.Errorf("enode: can't verify local record: %v", err))
}
ln.cur.Store(n)
log.Info("New local node record", "seq", ln.seq, "id", n.ID(), "ip", n.IP(), "udp", n.UDP(), "tcp", n.TCP())
}
func (ln *LocalNode) bumpSeq() {
ln.seq++
ln.db.storeLocalSeq(ln.id, ln.seq)
}
| lgpl-3.0 |
ssangkong/NVRAM_KWU | qt-everywhere-opensource-src-4.7.4/src/3rdparty/webkit/WebCore/generated/JSHTMLSelectElement.cpp | 23613 | /*
This file is part of the WebKit open source project.
This file has been generated by generate-bindings.pl. DO NOT MODIFY!
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "JSHTMLSelectElement.h"
#include "ExceptionCode.h"
#include "HTMLFormElement.h"
#include "HTMLOptionsCollection.h"
#include "HTMLSelectElement.h"
#include "JSHTMLElement.h"
#include "JSHTMLFormElement.h"
#include "JSHTMLOptionsCollection.h"
#include "JSNode.h"
#include "JSValidityState.h"
#include "KURL.h"
#include "Node.h"
#include "ValidityState.h"
#include <runtime/Error.h>
#include <runtime/JSNumberCell.h>
#include <runtime/JSString.h>
#include <runtime/PropertyNameArray.h>
#include <wtf/GetPtr.h>
using namespace JSC;
namespace WebCore {
ASSERT_CLASS_FITS_IN_CELL(JSHTMLSelectElement);
/* Hash table */
static const HashTableValue JSHTMLSelectElementTableValues[16] =
{
{ "type", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementType), (intptr_t)0 },
{ "selectedIndex", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementSelectedIndex), (intptr_t)setJSHTMLSelectElementSelectedIndex },
{ "value", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementValue), (intptr_t)setJSHTMLSelectElementValue },
{ "length", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementLength), (intptr_t)setJSHTMLSelectElementLength },
{ "form", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementForm), (intptr_t)0 },
{ "validity", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementValidity), (intptr_t)0 },
{ "willValidate", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementWillValidate), (intptr_t)0 },
{ "validationMessage", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementValidationMessage), (intptr_t)0 },
{ "options", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementOptions), (intptr_t)0 },
{ "disabled", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementDisabled), (intptr_t)setJSHTMLSelectElementDisabled },
{ "autofocus", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementAutofocus), (intptr_t)setJSHTMLSelectElementAutofocus },
{ "multiple", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementMultiple), (intptr_t)setJSHTMLSelectElementMultiple },
{ "name", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementName), (intptr_t)setJSHTMLSelectElementName },
{ "size", DontDelete, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementSize), (intptr_t)setJSHTMLSelectElementSize },
{ "constructor", DontEnum|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsHTMLSelectElementConstructor), (intptr_t)0 },
{ 0, 0, 0, 0 }
};
static JSC_CONST_HASHTABLE HashTable JSHTMLSelectElementTable =
#if ENABLE(PERFECT_HASH_SIZE)
{ 127, JSHTMLSelectElementTableValues, 0 };
#else
{ 35, 31, JSHTMLSelectElementTableValues, 0 };
#endif
/* Hash table for constructor */
static const HashTableValue JSHTMLSelectElementConstructorTableValues[1] =
{
{ 0, 0, 0, 0 }
};
static JSC_CONST_HASHTABLE HashTable JSHTMLSelectElementConstructorTable =
#if ENABLE(PERFECT_HASH_SIZE)
{ 0, JSHTMLSelectElementConstructorTableValues, 0 };
#else
{ 1, 0, JSHTMLSelectElementConstructorTableValues, 0 };
#endif
class JSHTMLSelectElementConstructor : public DOMConstructorObject {
public:
JSHTMLSelectElementConstructor(ExecState* exec, JSDOMGlobalObject* globalObject)
: DOMConstructorObject(JSHTMLSelectElementConstructor::createStructure(globalObject->objectPrototype()), globalObject)
{
putDirect(exec->propertyNames().prototype, JSHTMLSelectElementPrototype::self(exec, globalObject), None);
}
virtual bool getOwnPropertySlot(ExecState*, const Identifier&, PropertySlot&);
virtual bool getOwnPropertyDescriptor(ExecState*, const Identifier&, PropertyDescriptor&);
virtual const ClassInfo* classInfo() const { return &s_info; }
static const ClassInfo s_info;
static PassRefPtr<Structure> createStructure(JSValue proto)
{
return Structure::create(proto, TypeInfo(ObjectType, StructureFlags), AnonymousSlotCount);
}
protected:
static const unsigned StructureFlags = OverridesGetOwnPropertySlot | ImplementsHasInstance | DOMConstructorObject::StructureFlags;
};
const ClassInfo JSHTMLSelectElementConstructor::s_info = { "HTMLSelectElementConstructor", 0, &JSHTMLSelectElementConstructorTable, 0 };
bool JSHTMLSelectElementConstructor::getOwnPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
return getStaticValueSlot<JSHTMLSelectElementConstructor, DOMObject>(exec, &JSHTMLSelectElementConstructorTable, this, propertyName, slot);
}
bool JSHTMLSelectElementConstructor::getOwnPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
return getStaticValueDescriptor<JSHTMLSelectElementConstructor, DOMObject>(exec, &JSHTMLSelectElementConstructorTable, this, propertyName, descriptor);
}
/* Hash table for prototype */
static const HashTableValue JSHTMLSelectElementPrototypeTableValues[7] =
{
{ "checkValidity", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionCheckValidity), (intptr_t)0 },
{ "setCustomValidity", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionSetCustomValidity), (intptr_t)1 },
{ "add", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionAdd), (intptr_t)2 },
{ "remove", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionRemove), (intptr_t)0 },
{ "item", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionItem), (intptr_t)1 },
{ "namedItem", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsHTMLSelectElementPrototypeFunctionNamedItem), (intptr_t)1 },
{ 0, 0, 0, 0 }
};
static JSC_CONST_HASHTABLE HashTable JSHTMLSelectElementPrototypeTable =
#if ENABLE(PERFECT_HASH_SIZE)
{ 15, JSHTMLSelectElementPrototypeTableValues, 0 };
#else
{ 16, 15, JSHTMLSelectElementPrototypeTableValues, 0 };
#endif
const ClassInfo JSHTMLSelectElementPrototype::s_info = { "HTMLSelectElementPrototype", 0, &JSHTMLSelectElementPrototypeTable, 0 };
JSObject* JSHTMLSelectElementPrototype::self(ExecState* exec, JSGlobalObject* globalObject)
{
return getDOMPrototype<JSHTMLSelectElement>(exec, globalObject);
}
bool JSHTMLSelectElementPrototype::getOwnPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
return getStaticFunctionSlot<JSObject>(exec, &JSHTMLSelectElementPrototypeTable, this, propertyName, slot);
}
bool JSHTMLSelectElementPrototype::getOwnPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
return getStaticFunctionDescriptor<JSObject>(exec, &JSHTMLSelectElementPrototypeTable, this, propertyName, descriptor);
}
const ClassInfo JSHTMLSelectElement::s_info = { "HTMLSelectElement", &JSHTMLElement::s_info, &JSHTMLSelectElementTable, 0 };
JSHTMLSelectElement::JSHTMLSelectElement(NonNullPassRefPtr<Structure> structure, JSDOMGlobalObject* globalObject, PassRefPtr<HTMLSelectElement> impl)
: JSHTMLElement(structure, globalObject, impl)
{
}
JSObject* JSHTMLSelectElement::createPrototype(ExecState* exec, JSGlobalObject* globalObject)
{
return new (exec) JSHTMLSelectElementPrototype(JSHTMLSelectElementPrototype::createStructure(JSHTMLElementPrototype::self(exec, globalObject)));
}
bool JSHTMLSelectElement::getOwnPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
const HashEntry* entry = JSHTMLSelectElementTable.entry(exec, propertyName);
if (entry) {
slot.setCustom(this, entry->propertyGetter());
return true;
}
bool ok;
unsigned index = propertyName.toUInt32(&ok, false);
if (ok && index < static_cast<HTMLSelectElement*>(impl())->length()) {
slot.setCustomIndex(this, index, indexGetter);
return true;
}
return getStaticValueSlot<JSHTMLSelectElement, Base>(exec, &JSHTMLSelectElementTable, this, propertyName, slot);
}
bool JSHTMLSelectElement::getOwnPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
const HashEntry* entry = JSHTMLSelectElementTable.entry(exec, propertyName);
if (entry) {
PropertySlot slot;
slot.setCustom(this, entry->propertyGetter());
descriptor.setDescriptor(slot.getValue(exec, propertyName), entry->attributes());
return true;
}
bool ok;
unsigned index = propertyName.toUInt32(&ok, false);
if (ok && index < static_cast<HTMLSelectElement*>(impl())->length()) {
PropertySlot slot;
slot.setCustomIndex(this, index, indexGetter);
descriptor.setDescriptor(slot.getValue(exec, propertyName), DontDelete);
return true;
}
return getStaticValueDescriptor<JSHTMLSelectElement, Base>(exec, &JSHTMLSelectElementTable, this, propertyName, descriptor);
}
bool JSHTMLSelectElement::getOwnPropertySlot(ExecState* exec, unsigned propertyName, PropertySlot& slot)
{
if (propertyName < static_cast<HTMLSelectElement*>(impl())->length()) {
slot.setCustomIndex(this, propertyName, indexGetter);
return true;
}
return getOwnPropertySlot(exec, Identifier::from(exec, propertyName), slot);
}
JSValue jsHTMLSelectElementType(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsString(exec, imp->type());
return result;
}
JSValue jsHTMLSelectElementSelectedIndex(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsNumber(exec, imp->selectedIndex());
return result;
}
JSValue jsHTMLSelectElementValue(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsString(exec, imp->value());
return result;
}
JSValue jsHTMLSelectElementLength(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsNumber(exec, imp->length());
return result;
}
JSValue jsHTMLSelectElementForm(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->form()));
return result;
}
JSValue jsHTMLSelectElementValidity(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->validity()));
return result;
}
JSValue jsHTMLSelectElementWillValidate(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsBoolean(imp->willValidate());
return result;
}
JSValue jsHTMLSelectElementValidationMessage(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsString(exec, imp->validationMessage());
return result;
}
JSValue jsHTMLSelectElementOptions(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->options()));
return result;
}
JSValue jsHTMLSelectElementDisabled(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsBoolean(imp->disabled());
return result;
}
JSValue jsHTMLSelectElementAutofocus(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsBoolean(imp->autofocus());
return result;
}
JSValue jsHTMLSelectElementMultiple(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsBoolean(imp->multiple());
return result;
}
JSValue jsHTMLSelectElementName(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsString(exec, imp->name());
return result;
}
JSValue jsHTMLSelectElementSize(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* castedThis = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
UNUSED_PARAM(exec);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThis->impl());
JSValue result = jsNumber(exec, imp->size());
return result;
}
JSValue jsHTMLSelectElementConstructor(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSHTMLSelectElement* domObject = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
return JSHTMLSelectElement::getConstructor(exec, domObject->globalObject());
}
void JSHTMLSelectElement::put(ExecState* exec, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)
{
bool ok;
unsigned index = propertyName.toUInt32(&ok, false);
if (ok) {
indexSetter(exec, index, value);
return;
}
lookupPut<JSHTMLSelectElement, Base>(exec, propertyName, value, &JSHTMLSelectElementTable, this, slot);
}
void JSHTMLSelectElement::put(ExecState* exec, unsigned propertyName, JSValue value)
{
indexSetter(exec, propertyName, value);
return;
}
void setJSHTMLSelectElementSelectedIndex(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setSelectedIndex(value.toInt32(exec));
}
void setJSHTMLSelectElementValue(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setValue(valueToStringWithNullCheck(exec, value));
}
void setJSHTMLSelectElementLength(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
ExceptionCode ec = 0;
imp->setLength(value.toInt32(exec), ec);
setDOMException(exec, ec);
}
void setJSHTMLSelectElementDisabled(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setDisabled(value.toBoolean(exec));
}
void setJSHTMLSelectElementAutofocus(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setAutofocus(value.toBoolean(exec));
}
void setJSHTMLSelectElementMultiple(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setMultiple(value.toBoolean(exec));
}
void setJSHTMLSelectElementName(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setName(valueToStringWithNullCheck(exec, value));
}
void setJSHTMLSelectElementSize(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(thisObject);
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
imp->setSize(value.toInt32(exec));
}
void JSHTMLSelectElement::getOwnPropertyNames(ExecState* exec, PropertyNameArray& propertyNames, EnumerationMode mode)
{
for (unsigned i = 0; i < static_cast<HTMLSelectElement*>(impl())->length(); ++i)
propertyNames.add(Identifier::from(exec, i));
Base::getOwnPropertyNames(exec, propertyNames, mode);
}
JSValue JSHTMLSelectElement::getConstructor(ExecState* exec, JSGlobalObject* globalObject)
{
return getDOMConstructor<JSHTMLSelectElementConstructor>(exec, static_cast<JSDOMGlobalObject*>(globalObject));
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionCheckValidity(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
JSC::JSValue result = jsBoolean(imp->checkValidity());
return result;
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionSetCustomValidity(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
const UString& error = valueToStringWithUndefinedOrNullCheck(exec, args.at(0));
imp->setCustomValidity(error);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionAdd(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
ExceptionCode ec = 0;
HTMLElement* element = toHTMLElement(args.at(0));
HTMLElement* before = toHTMLElement(args.at(1));
imp->add(element, before, ec);
setDOMException(exec, ec);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionRemove(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
return castedThisObj->remove(exec, args);
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionItem(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
int index = args.at(0).toInt32(exec);
if (index < 0) {
setDOMException(exec, INDEX_SIZE_ERR);
return jsUndefined();
}
JSC::JSValue result = toJS(exec, castedThisObj->globalObject(), WTF::getPtr(imp->item(index)));
return result;
}
JSValue JSC_HOST_CALL jsHTMLSelectElementPrototypeFunctionNamedItem(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSHTMLSelectElement::s_info))
return throwError(exec, TypeError);
JSHTMLSelectElement* castedThisObj = static_cast<JSHTMLSelectElement*>(asObject(thisValue));
HTMLSelectElement* imp = static_cast<HTMLSelectElement*>(castedThisObj->impl());
const UString& name = args.at(0).toString(exec);
JSC::JSValue result = toJS(exec, castedThisObj->globalObject(), WTF::getPtr(imp->namedItem(name)));
return result;
}
JSValue JSHTMLSelectElement::indexGetter(ExecState* exec, JSValue slotBase, unsigned index)
{
JSHTMLSelectElement* thisObj = static_cast<JSHTMLSelectElement*>(asObject(slotBase));
return toJS(exec, thisObj->globalObject(), static_cast<HTMLSelectElement*>(thisObj->impl())->item(index));
}
}
| lgpl-3.0 |
chunlinyao/fop | fop-core/src/main/java/org/apache/fop/render/pdf/PDFImageHandlerSVG.java | 11775 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* $Id$ */
package org.apache.fop.render.pdf;
import java.awt.Color;
import java.awt.Rectangle;
import java.awt.geom.AffineTransform;
import java.io.IOException;
import org.w3c.dom.Document;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.batik.anim.dom.SVGDOMImplementation;
import org.apache.batik.bridge.BridgeContext;
import org.apache.batik.bridge.GVTBuilder;
import org.apache.batik.gvt.GraphicsNode;
import org.apache.batik.util.SVGConstants;
import org.apache.xmlgraphics.image.loader.Image;
import org.apache.xmlgraphics.image.loader.ImageFlavor;
import org.apache.xmlgraphics.image.loader.impl.ImageXMLDOM;
import org.apache.xmlgraphics.util.UnitConv;
import org.apache.fop.apps.FOUserAgent;
import org.apache.fop.events.EventBroadcaster;
import org.apache.fop.image.loader.batik.BatikImageFlavors;
import org.apache.fop.image.loader.batik.BatikUtil;
import org.apache.fop.pdf.TransparencyDisallowedException;
import org.apache.fop.render.ImageHandler;
import org.apache.fop.render.ImageHandlerUtil;
import org.apache.fop.render.RenderingContext;
import org.apache.fop.render.pdf.PDFLogicalStructureHandler.MarkedContentInfo;
import org.apache.fop.render.ps.PSImageHandlerSVG;
import org.apache.fop.svg.PDFAElementBridge;
import org.apache.fop.svg.PDFBridgeContext;
import org.apache.fop.svg.PDFGraphics2D;
import org.apache.fop.svg.SVGEventProducer;
import org.apache.fop.svg.SVGUserAgent;
import org.apache.fop.svg.font.FOPFontFamilyResolverImpl;
/**
* Image Handler implementation which handles SVG images.
*/
public class PDFImageHandlerSVG implements ImageHandler {
/** logging instance */
private static Log log = LogFactory.getLog(PDFImageHandlerSVG.class);
/** {@inheritDoc} */
public void handleImage(RenderingContext context,
Image image, Rectangle pos)
throws IOException {
PDFRenderingContext pdfContext = (PDFRenderingContext)context;
PDFContentGenerator generator = pdfContext.getGenerator();
ImageXMLDOM imageSVG = (ImageXMLDOM)image;
FOUserAgent userAgent = context.getUserAgent();
final float deviceResolution = userAgent.getTargetResolution();
if (log.isDebugEnabled()) {
log.debug("Generating SVG at " + deviceResolution + "dpi.");
}
final float uaResolution = userAgent.getSourceResolution();
SVGUserAgent ua = new SVGUserAgent(userAgent, new FOPFontFamilyResolverImpl(pdfContext.getFontInfo()),
new AffineTransform());
GVTBuilder builder = new GVTBuilder();
//Controls whether text painted by Batik is generated using text or path operations
boolean strokeText = PSImageHandlerSVG.shouldStrokeText(imageSVG.getDocument().getChildNodes());
//TODO connect with configuration elsewhere.
BridgeContext ctx = new PDFBridgeContext(ua,
(strokeText ? null : pdfContext.getFontInfo()),
userAgent.getImageManager(),
userAgent.getImageSessionContext(),
new AffineTransform());
//Cloning SVG DOM as Batik attaches non-thread-safe facilities (like the CSS engine)
//to it.
Document clonedDoc = BatikUtil.cloneSVGDocument(imageSVG.getDocument());
GraphicsNode root;
try {
root = builder.build(ctx, clonedDoc);
} catch (Exception e) {
SVGEventProducer eventProducer = SVGEventProducer.Provider.get(
context.getUserAgent().getEventBroadcaster());
eventProducer.svgNotBuilt(this, e, image.getInfo().getOriginalURI());
return;
}
// get the 'width' and 'height' attributes of the SVG document
float w = image.getSize().getWidthMpt();
float h = image.getSize().getHeightMpt();
float sx = pos.width / w;
float sy = pos.height / h;
//Scaling and translation for the bounding box of the image
AffineTransform scaling = new AffineTransform(
sx, 0, 0, sy, pos.x / 1000f, pos.y / 1000f);
double sourceScale = UnitConv.IN2PT / uaResolution;
scaling.scale(sourceScale, sourceScale);
//Scale for higher resolution on-the-fly images from Batik
AffineTransform resolutionScaling = new AffineTransform();
double targetScale = uaResolution / deviceResolution;
resolutionScaling.scale(targetScale, targetScale);
resolutionScaling.scale(1.0 / sx, 1.0 / sy);
//Transformation matrix that establishes the local coordinate system for the SVG graphic
//in relation to the current coordinate system
AffineTransform imageTransform = new AffineTransform();
imageTransform.concatenate(scaling);
imageTransform.concatenate(resolutionScaling);
if (log.isTraceEnabled()) {
log.trace("nat size: " + w + "/" + h);
log.trace("req size: " + pos.width + "/" + pos.height);
log.trace("source res: " + uaResolution + ", targetRes: " + deviceResolution
+ " --> target scaling: " + targetScale);
log.trace(image.getSize());
log.trace("sx: " + sx + ", sy: " + sy);
log.trace("scaling: " + scaling);
log.trace("resolution scaling: " + resolutionScaling);
log.trace("image transform: " + resolutionScaling);
}
/*
* Clip to the svg area.
* Note: To have the svg overlay (under) a text area then use
* an fo:block-container
*/
if (log.isTraceEnabled()) {
generator.comment("SVG setup");
}
generator.saveGraphicsState();
if (context.getUserAgent().isAccessibilityEnabled()) {
MarkedContentInfo mci = pdfContext.getMarkedContentInfo();
generator.beginMarkedContentSequence(mci.tag, mci.mcid);
}
generator.updateColor(Color.black, false, null);
generator.updateColor(Color.black, true, null);
if (!scaling.isIdentity()) {
if (log.isTraceEnabled()) {
generator.comment("viewbox");
}
generator.add(CTMHelper.toPDFString(scaling, false) + " cm\n");
}
//SVGSVGElement svg = ((SVGDocument)doc).getRootElement();
PDFGraphics2D graphics = new PDFGraphics2D(true, pdfContext.getFontInfo(),
generator.getDocument(),
generator.getResourceContext(), pdfContext.getPage().makeReference(),
"", 0, new TransparencyIgnoredEventListener(pdfContext, imageSVG));
graphics.setGraphicContext(new org.apache.xmlgraphics.java2d.GraphicContext());
if (!resolutionScaling.isIdentity()) {
if (log.isTraceEnabled()) {
generator.comment("resolution scaling for " + uaResolution
+ " -> " + deviceResolution);
}
generator.add(
CTMHelper.toPDFString(resolutionScaling, false) + " cm\n");
graphics.scale(
1.0 / resolutionScaling.getScaleX(),
1.0 / resolutionScaling.getScaleY());
}
if (log.isTraceEnabled()) {
generator.comment("SVG start");
}
//Save state and update coordinate system for the SVG image
generator.getState().save();
generator.getState().concatenate(imageTransform);
//Now that we have the complete transformation matrix for the image, we can update the
//transformation matrix for the AElementBridge.
PDFAElementBridge aBridge = (PDFAElementBridge)ctx.getBridge(
SVGDOMImplementation.SVG_NAMESPACE_URI, SVGConstants.SVG_A_TAG);
aBridge.getCurrentTransform().setTransform(generator.getState().getTransform());
graphics.setPaintingState(generator.getState());
graphics.setOutputStream(generator.getOutputStream());
try {
root.paint(graphics);
ctx.dispose();
generator.add(graphics.getString());
} catch (TransparencyDisallowedException e) {
SVGEventProducer eventProducer = SVGEventProducer.Provider.get(
context.getUserAgent().getEventBroadcaster());
eventProducer.bitmapWithTransparency(this, e.getProfile(), image.getInfo().getOriginalURI());
} catch (Exception e) {
SVGEventProducer eventProducer = SVGEventProducer.Provider.get(
context.getUserAgent().getEventBroadcaster());
eventProducer.svgRenderingError(this, e, image.getInfo().getOriginalURI());
}
generator.getState().restore();
if (context.getUserAgent().isAccessibilityEnabled()) {
generator.restoreGraphicsStateAccess();
} else {
generator.restoreGraphicsState();
}
if (log.isTraceEnabled()) {
generator.comment("SVG end");
}
}
private static class TransparencyIgnoredEventListener
implements PDFGraphics2D.TransparencyIgnoredEventListener {
private final RenderingContext context;
private final Image image;
public TransparencyIgnoredEventListener(RenderingContext context, Image image) {
this.context = context;
this.image = image;
}
private boolean warningIssued;
public void transparencyIgnored(Object pdfProfile) {
if (!warningIssued) {
EventBroadcaster broadcaster = context.getUserAgent().getEventBroadcaster();
SVGEventProducer producer = SVGEventProducer.Provider.get(broadcaster);
producer.transparencyIgnored(this, pdfProfile, image.getInfo().getOriginalURI());
warningIssued = true;
}
}
}
/** {@inheritDoc} */
public int getPriority() {
return 400;
}
/** {@inheritDoc} */
public Class getSupportedImageClass() {
return ImageXMLDOM.class;
}
/** {@inheritDoc} */
public ImageFlavor[] getSupportedImageFlavors() {
return new ImageFlavor[] {
BatikImageFlavors.SVG_DOM
};
}
/** {@inheritDoc} */
public boolean isCompatible(RenderingContext targetContext, Image image) {
boolean supported = (image == null
|| (image instanceof ImageXMLDOM
&& image.getFlavor().isCompatible(BatikImageFlavors.SVG_DOM)))
&& targetContext instanceof PDFRenderingContext;
if (supported) {
String mode = (String)targetContext.getHint(ImageHandlerUtil.CONVERSION_MODE);
if (ImageHandlerUtil.isConversionModeBitmap(mode)) {
//Disabling this image handler automatically causes a bitmap to be generated
return false;
}
}
return supported;
}
}
| apache-2.0 |
11xor6/presto | core/trino-main/src/main/java/io/trino/sql/planner/planprinter/OperatorHashCollisionsStats.java | 2275 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.sql.planner.planprinter;
class OperatorHashCollisionsStats
{
private final double weightedHashCollisions;
private final double weightedSumSquaredHashCollisions;
private final double weightedExpectedHashCollisions;
private final long inputPositions;
public OperatorHashCollisionsStats(
double weightedHashCollisions,
double weightedSumSquaredHashCollisions,
double weightedExpectedHashCollisions,
long inputPositions)
{
this.weightedHashCollisions = weightedHashCollisions;
this.weightedSumSquaredHashCollisions = weightedSumSquaredHashCollisions;
this.weightedExpectedHashCollisions = weightedExpectedHashCollisions;
this.inputPositions = inputPositions;
}
public double getWeightedHashCollisions()
{
return weightedHashCollisions;
}
public double getWeightedSumSquaredHashCollisions()
{
return weightedSumSquaredHashCollisions;
}
public double getWeightedExpectedHashCollisions()
{
return weightedExpectedHashCollisions;
}
public long getInputPositions()
{
return inputPositions;
}
public static OperatorHashCollisionsStats merge(OperatorHashCollisionsStats first, OperatorHashCollisionsStats second)
{
return new OperatorHashCollisionsStats(
first.weightedHashCollisions + second.weightedHashCollisions,
first.weightedSumSquaredHashCollisions + second.weightedSumSquaredHashCollisions,
first.weightedExpectedHashCollisions + second.weightedExpectedHashCollisions,
first.inputPositions + second.inputPositions);
}
}
| apache-2.0 |
tillrohrmann/flink | flink-runtime/src/test/java/org/apache/flink/runtime/dispatcher/runner/TestingDispatcherGatewayService.java | 4974 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.dispatcher.runner;
import org.apache.flink.api.common.JobID;
import org.apache.flink.runtime.clusterframework.ApplicationStatus;
import org.apache.flink.runtime.dispatcher.DispatcherGateway;
import org.apache.flink.runtime.webmonitor.TestingDispatcherGateway;
import org.apache.flink.util.concurrent.FutureUtils;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
class TestingDispatcherGatewayService
implements AbstractDispatcherLeaderProcess.DispatcherGatewayService {
private final Function<JobID, CompletableFuture<Void>> onRemovedJobGraphFunction;
private final DispatcherGateway dispatcherGateway;
private final CompletableFuture<ApplicationStatus> shutDownFuture;
private final CompletableFuture<Void> terminationFuture;
private final boolean completeTerminationFutureOnClose;
private TestingDispatcherGatewayService(
CompletableFuture<Void> terminationFuture,
Function<JobID, CompletableFuture<Void>> onRemovedJobGraphFunction,
DispatcherGateway dispatcherGateway,
CompletableFuture<ApplicationStatus> shutDownFuture,
boolean completeTerminationFutureOnClose) {
this.terminationFuture = terminationFuture;
this.onRemovedJobGraphFunction = onRemovedJobGraphFunction;
this.dispatcherGateway = dispatcherGateway;
this.shutDownFuture = shutDownFuture;
this.completeTerminationFutureOnClose = completeTerminationFutureOnClose;
}
@Override
public DispatcherGateway getGateway() {
return dispatcherGateway;
}
@Override
public CompletableFuture<Void> onRemovedJobGraph(JobID jobId) {
return onRemovedJobGraphFunction.apply(jobId);
}
@Override
public CompletableFuture<ApplicationStatus> getShutDownFuture() {
return shutDownFuture;
}
public CompletableFuture<Void> getTerminationFuture() {
return terminationFuture;
}
@Override
public CompletableFuture<Void> closeAsync() {
if (completeTerminationFutureOnClose) {
terminationFuture.complete(null);
}
return terminationFuture;
}
public static Builder newBuilder() {
return new Builder();
}
public static class Builder {
private CompletableFuture<Void> terminationFuture = new CompletableFuture<>();
private Function<JobID, CompletableFuture<Void>> onRemovedJobGraphFunction =
ignored -> FutureUtils.completedVoidFuture();
private DispatcherGateway dispatcherGateway =
new TestingDispatcherGateway.Builder().build();
private CompletableFuture<ApplicationStatus> shutDownFuture = new CompletableFuture<>();
private boolean completeTerminationFutureOnClose = true;
private Builder() {}
public Builder setTerminationFuture(CompletableFuture<Void> terminationFuture) {
this.terminationFuture = terminationFuture;
return this;
}
public Builder setDispatcherGateway(DispatcherGateway dispatcherGateway) {
this.dispatcherGateway = dispatcherGateway;
return this;
}
public Builder setOnRemovedJobGraphFunction(
Function<JobID, CompletableFuture<Void>> onRemovedJobGraphFunction) {
this.onRemovedJobGraphFunction = onRemovedJobGraphFunction;
return this;
}
public Builder setShutDownFuture(CompletableFuture<ApplicationStatus> shutDownFuture) {
this.shutDownFuture = shutDownFuture;
return this;
}
public Builder withManualTerminationFutureCompletion() {
completeTerminationFutureOnClose = false;
return this;
}
public TestingDispatcherGatewayService build() {
return new TestingDispatcherGatewayService(
terminationFuture,
onRemovedJobGraphFunction,
dispatcherGateway,
shutDownFuture,
completeTerminationFutureOnClose);
}
}
}
| apache-2.0 |
ymn/lorsource | src/test/java/ru/org/linux/util/StringUtilTest.java | 1507 | /*
* Copyright 1998-2012 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.util;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Тесты для {@link StringUtil}.
*/
public class StringUtilTest {
@Test
public void processTitle() {
// given
//when
String actualResult = StringUtil.processTitle("one -- two --- three -- four-- five --six --");
// then
assertEquals("one — two --- three — four-- five --six --", actualResult);
}
@Test
public void makeTitle() {
// given
//when
String actualResult = StringUtil.makeTitle("\"Test of \"quotes '' \"in quotes\" in title\"\"");
// then
assertEquals("«Test of „quotes " „in quotes“ in title“»", actualResult);
}
@Test
public void escapeXml() {
assertEquals("test test&", StringUtil.escapeXml("test test&"));
}
}
| apache-2.0 |
tebeka/arrow | rust/parquet/src/util/memory.rs | 16016 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utility methods and structs for working with memory.
use std::{
cell::Cell,
fmt::{Debug, Display, Formatter, Result as FmtResult},
io::{Result as IoResult, Write},
mem,
ops::{Index, IndexMut},
rc::{Rc, Weak},
};
// ----------------------------------------------------------------------
// Memory Tracker classes
/// Reference counted pointer for [`MemTracker`].
pub type MemTrackerPtr = Rc<MemTracker>;
/// Non-owning reference for [`MemTracker`].
pub type WeakMemTrackerPtr = Weak<MemTracker>;
/// Struct to track memory usage information.
#[derive(Debug)]
pub struct MemTracker {
// In the tuple, the first element is the current memory allocated (in bytes),
// and the second element is the maximum memory allocated so far (in bytes).
memory_usage: Cell<(i64, i64)>,
}
impl MemTracker {
/// Creates new memory tracker.
#[inline]
pub fn new() -> MemTracker {
MemTracker {
memory_usage: Cell::new((0, 0)),
}
}
/// Returns the current memory consumption, in bytes.
pub fn memory_usage(&self) -> i64 {
self.memory_usage.get().0
}
/// Returns the maximum memory consumption so far, in bytes.
pub fn max_memory_usage(&self) -> i64 {
self.memory_usage.get().1
}
/// Adds `num_bytes` to the memory consumption tracked by this memory tracker.
#[inline]
pub fn alloc(&self, num_bytes: i64) {
let (current, mut maximum) = self.memory_usage.get();
let new_current = current + num_bytes;
if new_current > maximum {
maximum = new_current
}
self.memory_usage.set((new_current, maximum));
}
}
// ----------------------------------------------------------------------
// Buffer classes
/// Type alias for [`Buffer`].
pub type ByteBuffer = Buffer<u8>;
/// Type alias for [`BufferPtr`].
pub type ByteBufferPtr = BufferPtr<u8>;
/// A resize-able buffer class with generic member, with optional memory tracker.
///
/// Note that a buffer has two attributes:
/// `capacity` and `size`: the former is the total number of space reserved for
/// the buffer, while the latter is the actual number of elements.
/// Invariant: `capacity` >= `size`.
/// The total allocated bytes for a buffer equals to `capacity * sizeof<T>()`.
pub struct Buffer<T: Clone> {
data: Vec<T>,
mem_tracker: Option<MemTrackerPtr>,
type_length: usize,
}
impl<T: Clone> Buffer<T> {
/// Creates new empty buffer.
pub fn new() -> Self {
Buffer {
data: vec![],
mem_tracker: None,
type_length: ::std::mem::size_of::<T>(),
}
}
/// Adds [`MemTracker`] for this buffer.
#[inline]
pub fn with_mem_tracker(mut self, mc: MemTrackerPtr) -> Self {
mc.alloc((self.data.capacity() * self.type_length) as i64);
self.mem_tracker = Some(mc);
self
}
/// Returns slice of data in this buffer.
#[inline]
pub fn data(&self) -> &[T] {
self.data.as_slice()
}
/// Sets data for this buffer.
#[inline]
pub fn set_data(&mut self, new_data: Vec<T>) {
if let Some(ref mc) = self.mem_tracker {
let capacity_diff = new_data.capacity() as i64 - self.data.capacity() as i64;
mc.alloc(capacity_diff * self.type_length as i64);
}
self.data = new_data;
}
/// Resizes underlying data in place to a new length `new_size`.
///
/// If `new_size` is less than current length, data is truncated, otherwise, it is
/// extended to `new_size` with provided default value `init_value`.
///
/// Memory tracker is also updated, if available.
#[inline]
pub fn resize(&mut self, new_size: usize, init_value: T) {
let old_capacity = self.data.capacity();
self.data.resize(new_size, init_value);
if let Some(ref mc) = self.mem_tracker {
let capacity_diff = self.data.capacity() as i64 - old_capacity as i64;
mc.alloc(capacity_diff * self.type_length as i64);
}
}
/// Clears underlying data.
#[inline]
pub fn clear(&mut self) {
self.data.clear()
}
/// Reserves capacity `additional_capacity` for underlying data vector.
///
/// Memory tracker is also updated, if available.
#[inline]
pub fn reserve(&mut self, additional_capacity: usize) {
let old_capacity = self.data.capacity();
self.data.reserve(additional_capacity);
if self.data.capacity() > old_capacity {
if let Some(ref mc) = self.mem_tracker {
let capacity_diff = self.data.capacity() as i64 - old_capacity as i64;
mc.alloc(capacity_diff * self.type_length as i64);
}
}
}
/// Returns [`BufferPtr`] with buffer data.
/// Buffer data is reset.
#[inline]
pub fn consume(&mut self) -> BufferPtr<T> {
let old_data = mem::replace(&mut self.data, vec![]);
let mut result = BufferPtr::new(old_data);
if let Some(ref mc) = self.mem_tracker {
result = result.with_mem_tracker(mc.clone());
}
result
}
/// Adds `value` to the buffer.
#[inline]
pub fn push(&mut self, value: T) {
self.data.push(value)
}
/// Returns current capacity for the buffer.
#[inline]
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns current size for the buffer.
#[inline]
pub fn size(&self) -> usize {
self.data.len()
}
/// Returns `true` if memory tracker is added to buffer, `false` otherwise.
#[inline]
pub fn is_mem_tracked(&self) -> bool {
self.mem_tracker.is_some()
}
/// Returns memory tracker associated with this buffer.
/// This may panic, if memory tracker is not set, use method above to check if
/// memory tracker is available.
#[inline]
pub fn mem_tracker(&self) -> &MemTrackerPtr {
self.mem_tracker.as_ref().unwrap()
}
}
impl<T: Sized + Clone> Index<usize> for Buffer<T> {
type Output = T;
fn index(&self, index: usize) -> &T {
&self.data[index]
}
}
impl<T: Sized + Clone> IndexMut<usize> for Buffer<T> {
fn index_mut(&mut self, index: usize) -> &mut T {
&mut self.data[index]
}
}
// TODO: implement this for other types
impl Write for Buffer<u8> {
#[inline]
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
let old_capacity = self.data.capacity();
let bytes_written = self.data.write(buf)?;
if let Some(ref mc) = self.mem_tracker {
if self.data.capacity() - old_capacity > 0 {
mc.alloc((self.data.capacity() - old_capacity) as i64)
}
}
Ok(bytes_written)
}
fn flush(&mut self) -> IoResult<()> {
// No-op
self.data.flush()
}
}
impl AsRef<[u8]> for Buffer<u8> {
fn as_ref(&self) -> &[u8] {
self.data.as_slice()
}
}
impl<T: Clone> Drop for Buffer<T> {
#[inline]
fn drop(&mut self) {
if let Some(ref mc) = self.mem_tracker {
mc.alloc(-((self.data.capacity() * self.type_length) as i64));
}
}
}
// ----------------------------------------------------------------------
// Immutable Buffer (BufferPtr) classes
/// An representation of a slice on a reference-counting and read-only byte array.
/// Sub-slices can be further created from this. The byte array will be released
/// when all slices are dropped.
#[derive(Clone, Debug)]
pub struct BufferPtr<T> {
data: Rc<Vec<T>>,
start: usize,
len: usize,
// TODO: will this create too many references? rethink about this.
mem_tracker: Option<MemTrackerPtr>,
}
impl<T> BufferPtr<T> {
/// Creates new buffer from a vector.
pub fn new(v: Vec<T>) -> Self {
let len = v.len();
Self {
data: Rc::new(v),
start: 0,
len,
mem_tracker: None,
}
}
/// Returns slice of data in this buffer.
pub fn data(&self) -> &[T] {
&self.data[self.start..self.start + self.len]
}
/// Updates this buffer with new `start` position and length `len`.
///
/// Range should be within current start position and length.
pub fn with_range(mut self, start: usize, len: usize) -> Self {
assert!(start <= self.len);
assert!(start + len <= self.len);
self.start = start;
self.len = len;
self
}
/// Adds memory tracker to this buffer.
pub fn with_mem_tracker(mut self, mc: MemTrackerPtr) -> Self {
self.mem_tracker = Some(mc);
self
}
/// Returns start position of this buffer.
pub fn start(&self) -> usize {
self.start
}
/// Returns length of this buffer
pub fn len(&self) -> usize {
self.len
}
/// Returns `true` if this buffer has memory tracker, `false` otherwise.
pub fn is_mem_tracked(&self) -> bool {
self.mem_tracker.is_some()
}
/// Returns a shallow copy of the buffer.
/// Reference counted pointer to the data is copied.
pub fn all(&self) -> BufferPtr<T> {
BufferPtr {
data: self.data.clone(),
start: self.start,
len: self.len,
mem_tracker: self.mem_tracker.as_ref().map(|p| p.clone()),
}
}
/// Returns a shallow copy of the buffer that starts with `start` position.
pub fn start_from(&self, start: usize) -> BufferPtr<T> {
assert!(start <= self.len);
BufferPtr {
data: self.data.clone(),
start: self.start + start,
len: self.len - start,
mem_tracker: self.mem_tracker.as_ref().map(|p| p.clone()),
}
}
/// Returns a shallow copy that is a range slice within this buffer.
pub fn range(&self, start: usize, len: usize) -> BufferPtr<T> {
assert!(start + len <= self.len);
BufferPtr {
data: self.data.clone(),
start: self.start + start,
len,
mem_tracker: self.mem_tracker.as_ref().map(|p| p.clone()),
}
}
}
impl<T: Sized> Index<usize> for BufferPtr<T> {
type Output = T;
fn index(&self, index: usize) -> &T {
assert!(index < self.len);
&self.data[self.start + index]
}
}
impl<T: Debug> Display for BufferPtr<T> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:?}", self.data)
}
}
impl<T> Drop for BufferPtr<T> {
fn drop(&mut self) {
if self.is_mem_tracked()
&& Rc::strong_count(&self.data) == 1
&& Rc::weak_count(&self.data) == 0
{
let mc = self.mem_tracker.as_ref().unwrap();
mc.alloc(-(self.data.capacity() as i64));
}
}
}
impl AsRef<[u8]> for BufferPtr<u8> {
fn as_ref(&self) -> &[u8] {
&self.data[self.start..self.start + self.len]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_byte_buffer_mem_tracker() {
let mem_tracker = Rc::new(MemTracker::new());
let mut buffer = ByteBuffer::new().with_mem_tracker(mem_tracker.clone());
buffer.set_data(vec![0; 10]);
assert_eq!(mem_tracker.memory_usage(), buffer.capacity() as i64);
buffer.set_data(vec![0; 20]);
let capacity = buffer.capacity() as i64;
assert_eq!(mem_tracker.memory_usage(), capacity);
let max_capacity = {
let mut buffer2 = ByteBuffer::new().with_mem_tracker(mem_tracker.clone());
buffer2.reserve(30);
assert_eq!(
mem_tracker.memory_usage(),
buffer2.capacity() as i64 + capacity
);
buffer2.set_data(vec![0; 100]);
assert_eq!(
mem_tracker.memory_usage(),
buffer2.capacity() as i64 + capacity
);
buffer2.capacity() as i64 + capacity
};
assert_eq!(mem_tracker.memory_usage(), capacity);
assert_eq!(mem_tracker.max_memory_usage(), max_capacity);
buffer.reserve(40);
assert_eq!(mem_tracker.memory_usage(), buffer.capacity() as i64);
buffer.consume();
assert_eq!(mem_tracker.memory_usage(), buffer.capacity() as i64);
}
#[test]
fn test_byte_ptr_mem_tracker() {
let mem_tracker = Rc::new(MemTracker::new());
let mut buffer = ByteBuffer::new().with_mem_tracker(mem_tracker.clone());
buffer.set_data(vec![0; 60]);
{
let buffer_capacity = buffer.capacity() as i64;
let buf_ptr = buffer.consume();
assert_eq!(mem_tracker.memory_usage(), buffer_capacity);
{
let buf_ptr1 = buf_ptr.all();
{
let _ = buf_ptr.start_from(20);
assert_eq!(mem_tracker.memory_usage(), buffer_capacity);
}
assert_eq!(mem_tracker.memory_usage(), buffer_capacity);
let _ = buf_ptr1.range(30, 20);
assert_eq!(mem_tracker.memory_usage(), buffer_capacity);
}
assert_eq!(mem_tracker.memory_usage(), buffer_capacity);
}
assert_eq!(mem_tracker.memory_usage(), buffer.capacity() as i64);
}
#[test]
fn test_byte_buffer() {
let mut buffer = ByteBuffer::new();
assert_eq!(buffer.size(), 0);
assert_eq!(buffer.capacity(), 0);
let mut buffer2 = ByteBuffer::new();
buffer2.reserve(40);
assert_eq!(buffer2.size(), 0);
assert_eq!(buffer2.capacity(), 40);
buffer.set_data((0..5).collect());
assert_eq!(buffer.size(), 5);
assert_eq!(buffer[4], 4);
buffer.set_data((0..20).collect());
assert_eq!(buffer.size(), 20);
assert_eq!(buffer[10], 10);
let expected: Vec<u8> = (0..20).collect();
{
let data = buffer.data();
assert_eq!(data, expected.as_slice());
}
buffer.reserve(40);
assert!(buffer.capacity() >= 40);
let byte_ptr = buffer.consume();
assert_eq!(buffer.size(), 0);
assert_eq!(byte_ptr.as_ref(), expected.as_slice());
let values: Vec<u8> = (0..30).collect();
let _ = buffer.write(values.as_slice());
let _ = buffer.flush();
assert_eq!(buffer.data(), values.as_slice());
}
#[test]
fn test_byte_ptr() {
let values = (0..50).collect();
let ptr = ByteBufferPtr::new(values);
assert_eq!(ptr.len(), 50);
assert_eq!(ptr.start(), 0);
assert_eq!(ptr[40], 40);
let ptr2 = ptr.all();
assert_eq!(ptr2.len(), 50);
assert_eq!(ptr2.start(), 0);
assert_eq!(ptr2[40], 40);
let ptr3 = ptr.start_from(20);
assert_eq!(ptr3.len(), 30);
assert_eq!(ptr3.start(), 20);
assert_eq!(ptr3[0], 20);
let ptr4 = ptr3.range(10, 10);
assert_eq!(ptr4.len(), 10);
assert_eq!(ptr4.start(), 30);
assert_eq!(ptr4[0], 30);
let expected: Vec<u8> = (30..40).collect();
assert_eq!(ptr4.as_ref(), expected.as_slice());
}
}
| apache-2.0 |
snnn/bazel | third_party/java/jarjar/jarjar-ant/src/main/java/com/tonicsystems/jarjar/PatternElement.java | 1014 | /**
* Copyright 2007 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tonicsystems.jarjar;
import javax.annotation.Nonnull;
/**
* This object and its subclasses are also exposed to ant, so need setters for XML.
*
* @author shevek
*/
public abstract class PatternElement {
private String pattern;
@Nonnull
public String getPattern() {
return pattern;
}
public void setPattern(@Nonnull String pattern) {
this.pattern = pattern;
}
}
| apache-2.0 |
lshain-android-source/tools-idea | java/java-impl/src/com/intellij/codeInsight/completion/methodChains/completion/lookup/sub/GetterLookupSubLookupElement.java | 945 | package com.intellij.codeInsight.completion.methodChains.completion.lookup.sub;
import com.intellij.psi.PsiJavaFile;
import org.jetbrains.annotations.Nullable;
/**
* @author Dmitry Batkovich
*/
public class GetterLookupSubLookupElement implements SubLookupElement {
private final String myVariableName;
private final String myMethodName;
public GetterLookupSubLookupElement(final String methodName) {
this(null, methodName);
}
public GetterLookupSubLookupElement(@Nullable final String variableName, final String methodName) {
myVariableName = variableName;
myMethodName = methodName;
}
@Override
public void doImport(final PsiJavaFile javaFile) {
}
@Override
public String getInsertString() {
final StringBuilder sb = new StringBuilder();
if (myVariableName != null) {
sb.append(myVariableName).append(".");
}
sb.append(myMethodName).append("()");
return sb.toString();
}
}
| apache-2.0 |
joserabal/sakai | basiclti/basiclti-impl/src/java/org/sakaiproject/basiclti/impl/BasicLTISecurityServiceImpl.java | 20460 | /**
* $URL$
* $Id$
*
* Copyright (c) 2009 The Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sakaiproject.basiclti.impl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import java.util.Properties;
import java.util.Enumeration;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.ServletOutputStream;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tsugi.basiclti.BasicLTIUtil;
import org.sakaiproject.authz.cover.SecurityService;
import org.sakaiproject.entity.api.Entity;
import org.sakaiproject.entity.api.EntityAccessOverloadException;
import org.sakaiproject.entity.api.EntityCopyrightException;
import org.sakaiproject.entity.cover.EntityManager;
import org.sakaiproject.entity.api.EntityNotDefinedException;
import org.sakaiproject.entity.api.EntityPermissionException;
import org.sakaiproject.entity.api.EntityProducer;
import org.sakaiproject.entity.api.HttpAccess;
import org.sakaiproject.entity.api.Reference;
import org.sakaiproject.entity.api.ResourceProperties;
import org.sakaiproject.tool.cover.SessionManager;
import org.sakaiproject.tool.api.Session;
import org.sakaiproject.tool.cover.ToolManager;
import org.sakaiproject.site.api.Site;
import org.sakaiproject.site.cover.SiteService;
import org.sakaiproject.component.cover.ComponentManager;
import org.sakaiproject.component.cover.ServerConfigurationService;
import org.sakaiproject.util.StringUtil;
import org.sakaiproject.util.FormattedText;
import org.sakaiproject.exception.IdUnusedException;
import org.sakaiproject.exception.PermissionException;
import org.sakaiproject.util.ResourceLoader;
import org.sakaiproject.event.api.Event;
import org.sakaiproject.event.api.NotificationService;
import org.sakaiproject.lti.api.LTIService;
//import org.sakaiproject.event.cover.EventTrackingService;
import org.sakaiproject.component.cover.ComponentManager;
import org.sakaiproject.util.Validator;
import org.sakaiproject.util.Web;
import org.sakaiproject.site.api.SitePage;
import org.sakaiproject.site.api.ToolConfiguration;
import org.sakaiproject.util.foorm.SakaiFoorm;
import org.sakaiproject.basiclti.LocalEventTrackingService;
import org.sakaiproject.basiclti.util.SakaiBLTIUtil;
import org.sakaiproject.basiclti.impl.BasicLTIArchiveBean;
@SuppressWarnings("deprecation")
public class BasicLTISecurityServiceImpl implements EntityProducer {
public static final String SERVICE_NAME = BasicLTISecurityServiceImpl.class.getName();
private static ResourceLoader rb = new ResourceLoader("basicltisvc");
public static final String MIME_TYPE_BLTI="ims/basiclti";
public static final String REFERENCE_ROOT="/basiclti";
public static final String APPLICATION_ID = "sakai:basiclti";
public static final String TOOL_REGISTRATION = "sakai.basiclti";
public static final String EVENT_BASICLTI_LAUNCH = "basiclti.launch";
protected static SakaiFoorm foorm = new SakaiFoorm();
// Note: security needs a proper Resource reference
/*******************************************************************************
* Dependencies and their setter methods
*******************************************************************************/
/** Dependency: a logger component. */
private Logger logger = LoggerFactory.getLogger(BasicLTISecurityServiceImpl.class);
/**
* Check security for this entity.
*
* @param ref
* The Reference to the entity.
* @return true if allowed, false if not.
*/
protected boolean checkSecurity(Reference ref)
{
String contextId = ref.getContext();
try
{
Site site = SiteService.getSiteVisit(contextId);
if ( site != null ) return true;
}
catch(IdUnusedException ex)
{
return false;
}
catch(PermissionException ex)
{
return false;
}
// System.out.println("ID="+ref.getId());
// System.out.println("Type="+ref.getType());
// System.out.println("SubType="+ref.getSubType());
return false;
}
/*******************************************************************************
* Init and Destroy
*******************************************************************************/
/** A service */
protected static LTIService ltiService = null;
/**
* Final initialization, once all dependencies are set.
*/
public void init()
{
logger.info(this +".init()");
if (ServerConfigurationService.getString(SakaiBLTIUtil.BASICLTI_ENCRYPTION_KEY, null) == null) {
logger.error("BasicLTI secrets in database unencrypted, please set "+ SakaiBLTIUtil.BASICLTI_ENCRYPTION_KEY);
}
try
{
// register as an entity producer
EntityManager.registerEntityProducer(this,REFERENCE_ROOT);
}
catch (Throwable t)
{
logger.warn("init(): ", t);
}
if ( ltiService == null ) ltiService = (LTIService) ComponentManager.get("org.sakaiproject.lti.api.LTIService");
}
/**
* Final cleanup.
*/
public void destroy()
{
logger.info(this +".destroy()");
}
/**
*
*/
public BasicLTISecurityServiceImpl() {
super();
}
public boolean isSuperUser(String userId)
{
return SecurityService.isSuperUser(userId);
}
/*******************************************************************************************************************************
* EntityProducer
******************************************************************************************************************************/
/**
* {@inheritDoc}
/access/basiclti/site/12-siteid-456/98-placement-id
/access/basiclti/content/ --- content path ---- (Future)
*/
public boolean parseEntityReference(String reference, Reference ref)
{
if (reference.startsWith(REFERENCE_ROOT))
{
// we will get null, simplelti, site, <context>, <placement>
// we will store the context, and the ContentHosting reference in our id field.
String id = null;
String context = null;
String[] parts = StringUtil.split(reference, Entity.SEPARATOR);
if ( parts.length == 5 && parts[2].equals("site") )
{
context = parts[3];
id = parts[4];
//Should the slashes below be entityseparator
// id = "/" + StringUtil.unsplit(parts, 2, parts.length - 2, "/");
}
ref.set(APPLICATION_ID, "site", id, null, context);
return true;
}
return false;
}
private void sendHTMLPage(HttpServletResponse res, String body)
{
try
{
res.setContentType("text/html; charset=UTF-8");
res.setCharacterEncoding("utf-8");
res.addDateHeader("Expires", System.currentTimeMillis() - (1000L * 60L * 60L * 24L * 365L));
res.addDateHeader("Last-Modified", System.currentTimeMillis());
res.addHeader("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0, post-check=0, pre-check=0");
res.addHeader("Pragma", "no-cache");
java.io.PrintWriter out = res.getWriter();
out.println("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">");
out.println("<html xmlns=\"http://www.w3.org/1999/xhtml\" lang=\"en\" xml:lang=\"en\">");
out.println("<html>\n<head>");
out.println("<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />");
out.println("</head>\n<body>\n");
out.println(body);
out.println("\n</body>\n</html>");
}
catch (Exception e)
{
e.printStackTrace();
}
}
private void doSplash(HttpServletRequest req, HttpServletResponse res, String splash, ResourceLoader rb)
{
// req.getRequestURL()=http://localhost:8080/access/basiclti/site/85fd092b-1755-4aa9-8abc-e6549527dce0/content:0
// req.getRequestURI()=/access/basiclti/site/85fd092b-1755-4aa9-8abc-e6549527dce0/content:0
String acceptPath = req.getRequestURI().toString() + "?splash=bypass";
String body = "<div align=\"center\" style=\"text-align:left;width:80%;margin-top:5px;margin-left:auto;margin-right:auto;border-width:1px 1px 1px 1px;border-style:solid;border-color: gray;padding:.5em;font-family:Verdana,Arial,Helvetica,sans-serif;font-size:.8em\">";
body += splash+"</div><p>";
String txt = rb.getString("launch.button", "Press to continue to external tool.");
body += "<form><input type=\"submit\" onclick=\"window.location='"+acceptPath+"';return false;\" value=\"";
body += rb.getString("launch.button", "Press to continue to proceed to external tool.");
body += "\"></form></p>\n";
sendHTMLPage(res, body);
}
/**
* {@inheritDoc}
*/
public HttpAccess getHttpAccess()
{
return new HttpAccess()
{
@SuppressWarnings("unchecked")
public void handleAccess(HttpServletRequest req, HttpServletResponse res, Reference ref,
Collection copyrightAcceptedRefs) throws EntityPermissionException, EntityNotDefinedException,
EntityAccessOverloadException, EntityCopyrightException
{
// decide on security
if (!checkSecurity(ref))
{
throw new EntityPermissionException(SessionManager.getCurrentSessionUserId(), "basiclti", ref.getReference());
}
String refId = ref.getId();
String [] retval = null;
if ( refId.startsWith("deploy:") && refId.length() > 7 )
{
if ("!admin".equals(ref.getContext()) )
{
throw new EntityPermissionException(SessionManager.getCurrentSessionUserId(), "basiclti", ref.getReference());
}
Map<String,Object> deploy = null;
String deployStr = refId.substring(7);
Long deployKey = foorm.getLongKey(deployStr);
if ( deployKey >= 0 ) deploy = ltiService.getDeployDao(deployKey);
String placementId = req.getParameter("placement");
// System.out.println("deployStr="+deployStr+" deployKey="+deployKey+" placementId="+placementId);
// System.out.println(deploy);
Long reg_state = foorm.getLongKey(deploy.get(LTIService.LTI_REG_STATE));
if ( reg_state == 0 )
{
retval = SakaiBLTIUtil.postRegisterHTML(deployKey, deploy, rb, placementId);
}
else
{
retval = SakaiBLTIUtil.postReregisterHTML(deployKey, deploy, rb, placementId);
}
}
else if ( refId.startsWith("tool:") && refId.length() > 5 )
{
Map<String,Object> tool = null;
String toolStr = refId.substring(5);
String contentReturn = req.getParameter("contentReturn");
Enumeration attrs = req.getParameterNames();
Properties propData = new Properties();
while(attrs.hasMoreElements()) {
String key = (String) attrs.nextElement();
if ( "contentReturn".equals(key) ) continue;
if ( key == null ) continue;
String value = req.getParameter(key);
if ( value == null ) continue;
propData.setProperty(key,value);
}
Long toolKey = foorm.getLongKey(toolStr);
if ( toolKey >= 0 )
{
tool = ltiService.getToolDao(toolKey, ref.getContext());
if ( tool != null ) {
tool.put(LTIService.LTI_SITE_ID, ref.getContext());
}
retval = SakaiBLTIUtil.postContentItemSelectionRequest(toolKey, tool, rb, contentReturn, propData);
}
}
else if ( refId.startsWith("content:") && refId.length() > 8 )
{
Map<String,Object> content = null;
Map<String,Object> tool = null;
String contentStr = refId.substring(8);
Long contentKey = foorm.getLongKey(contentStr);
if ( contentKey >= 0 )
{
content = ltiService.getContentDao(contentKey,ref.getContext());
if ( content != null )
{
String siteId = (String) content.get(LTIService.LTI_SITE_ID);
if ( siteId == null || ! siteId.equals(ref.getContext()) )
{
content = null;
}
}
if ( content != null )
{
Long toolKey = foorm.getLongKey(content.get(LTIService.LTI_TOOL_ID));
if ( toolKey >= 0 ) tool = ltiService.getToolDao(toolKey, ref.getContext());
if ( tool != null )
{
// SITE_ID can be null for the tool
String siteId = (String) tool.get(LTIService.LTI_SITE_ID);
if ( siteId != null && ! siteId.equals(ref.getContext()) )
{
tool = null;
}
}
}
ltiService.filterContent(content, tool);
}
String splash = null;
if ( tool != null ) splash = (String) tool.get("splash");
String splashParm = req.getParameter("splash");
String siteId = null;
if ( tool != null ) siteId = (String) tool.get(LTIService.LTI_SITE_ID);
if ( splashParm == null && splash != null && splash.trim().length() > 1 )
{
// XSS Note: Administrator-created tools can put HTML in the splash.
if ( siteId != null ) splash = FormattedText.escapeHtml(splash,false);
doSplash(req, res, splash, rb);
return;
}
retval = SakaiBLTIUtil.postLaunchHTML(content, tool, ltiService, rb);
}
else
{
String splashParm = req.getParameter("splash");
if ( splashParm == null )
{
ToolConfiguration placement = SiteService.findTool(refId);
Properties config = placement == null ? null : placement.getConfig();
if ( placement != null )
{
// XSS Note: Only the Administrator can set overridesplash - so we allow HTML
String splash = SakaiBLTIUtil.toNull(SakaiBLTIUtil.getCorrectProperty(config,"overridesplash", placement));
String send_session = SakaiBLTIUtil.toNull(SakaiBLTIUtil.getCorrectProperty(config,"ext_sakai_encrypted_session", placement));
if ( splash == null && send_session != null && send_session.equals("true") && ! SecurityService.isSuperUser() )
{
splash = rb.getString("session.warning", "<p><span style=\"color:red\">Warning:</span> This tool makes use of your logged in session. This means that the tool can access your data in this system. Only continue to this tool if you are willing to share your data with this tool.</p>");
}
if ( splash == null )
{
// This may be user-set so no HTML
splash = SakaiBLTIUtil.toNull(SakaiBLTIUtil.getCorrectProperty(config,"splash", placement));
if ( splash != null ) splash = FormattedText.escapeHtml(splash,false);
}
// XSS Note: Only the Administrator can set defaultsplash - so we allow HTML
if ( splash == null )
{
splash = SakaiBLTIUtil.toNull(SakaiBLTIUtil.getCorrectProperty(config,"defaultsplash", placement));
}
if ( splash != null && splash.trim().length() > 1 )
{
doSplash(req, res, splash, rb);
return;
}
}
}
// Get the post data for the placement
retval = SakaiBLTIUtil.postLaunchHTML(refId, rb);
}
try
{
sendHTMLPage(res, retval[0]);
String refstring = ref.getReference();
if ( retval.length > 1 ) refstring = retval[1];
Event event = LocalEventTrackingService.newEvent(EVENT_BASICLTI_LAUNCH, refstring, ref.getContext(), false, NotificationService.NOTI_OPTIONAL);
// SAK-24069 - Extend Sakai session lifetime on LTI tool launch
Session session = SessionManager.getCurrentSession();
if (session !=null) {
int seconds = ServerConfigurationService.getInt(SakaiBLTIUtil.BASICLTI_LAUNCH_SESSION_TIMEOUT, 10800);
if ( seconds != 0 ) session.setMaxInactiveInterval(seconds);
}
LocalEventTrackingService.post(event);
}
catch (Exception e)
{
e.printStackTrace();
}
}
};
}
/**
* {@inheritDoc}
*/
public Entity getEntity(Reference ref)
{
return null;
}
/**
* {@inheritDoc}
*/
public Collection<String> getEntityAuthzGroups(Reference ref, String userId)
{
// Since we handle security ourself, we won't support anyone else asking
return null;
}
/**
* {@inheritDoc}
*/
public String getEntityDescription(Reference ref)
{
return null;
}
/**
* {@inheritDoc}
*/
public ResourceProperties getEntityResourceProperties(Reference ref)
{
return null;
}
/**
* {@inheritDoc}
*/
public String getEntityUrl(Reference ref)
{
return ServerConfigurationService.getAccessUrl() + ref.getReference();
}
/**
* {@inheritDoc}
*/
public String getLabel()
{
return "basiclti";
}
public boolean willArchiveMerge()
{
return true;
}
@SuppressWarnings("unchecked")
public String merge(String siteId, Element root, String archivePath, String fromSiteId, Map attachmentNames, Map userIdTrans,
Set userListAllowImport)
{
StringBuilder results = new StringBuilder("Merging BasicLTI ");
org.w3c.dom.NodeList nodeList = root.getElementsByTagName("basicLTI");
try {
Site site = SiteService.getSite(siteId);
for(int i=0; i < nodeList.getLength(); i++)
{
BasicLTIArchiveBean basicLTI = new BasicLTIArchiveBean(nodeList.item(i));
logger.info("BASIC LTI: " + basicLTI);
results.append(", merging basicLTI tool " + basicLTI.getPageTitle());
SitePage sitePage = site.addPage();
sitePage.setTitle(basicLTI.getPageTitle());
// This property affects both the Tool and SitePage.
sitePage.setTitleCustom(true);
ToolConfiguration toolConfiguration = sitePage.addTool();
toolConfiguration.setTool(TOOL_REGISTRATION, ToolManager.getTool(TOOL_REGISTRATION));
toolConfiguration.setTitle(basicLTI.getToolTitle());
for(Object key: basicLTI.getSiteToolProperties().keySet())
{
toolConfiguration.getPlacementConfig().setProperty((String)key, (String)basicLTI.getSiteToolProperties().get(key));
}
SiteService.save(site);
}
} catch (IdUnusedException ie) {
// This would be thrown by SiteService.getSite(siteId)
ie.printStackTrace();
} catch (PermissionException pe) {
// This would be thrown by SiteService.save(site)
pe.printStackTrace();
} catch (Exception e) {
// This is a generic exception that would be thrown by the BasicLTIArchiveBean constructor.
e.printStackTrace();
}
results.append(".");
return results.toString();
}
@SuppressWarnings("unchecked")
public String archive(String siteId, Document doc, Stack stack, String archivePath, List attachments)
{
logger.info("-------basic-lti-------- archive('"
+ StringUtils.join(new Object[] { siteId, doc, stack,
archivePath, attachments }, "','") + "')");
StringBuilder results = new StringBuilder("archiving basiclti "+siteId+"\n");
int count = 0;
try {
Site site = SiteService.getSite(siteId);
logger.info("SITE: " + site.getId() + " : " + site.getTitle());
Element basicLtiList = doc.createElement("org.sakaiproject.basiclti.service.BasicLTISecurityService");
for (SitePage sitePage : site.getPages()) {
for (ToolConfiguration toolConfiguration : sitePage.getTools()) {
if ( toolConfiguration.getTool() == null ) continue;
if (toolConfiguration.getTool().getId().equals(
TOOL_REGISTRATION)) {
// results.append(" tool=" + toolConfiguration.getId() + "\n");
count++;
BasicLTIArchiveBean basicLTIArchiveBean = new BasicLTIArchiveBean();
basicLTIArchiveBean.setPageTitle(sitePage.getTitle());
basicLTIArchiveBean.setToolTitle(toolConfiguration.getTitle());
basicLTIArchiveBean.setSiteToolProperties(toolConfiguration.getConfig());
Node newNode = basicLTIArchiveBean.toNode(doc);
basicLtiList.appendChild(newNode);
}
}
}
((Element) stack.peek()).appendChild(basicLtiList);
stack.push(basicLtiList);
stack.pop();
}
catch (IdUnusedException iue) {
logger.info("SITE ID " + siteId + " DOES NOT EXIST.");
results.append("Basic LTI Site does not exist\n");
}
// Something we did not expect
catch (Exception e) {
e.printStackTrace();
results.append("basiclti exception:"+e.getClass().getName()+"\n");
}
results.append("archiving basiclti ("+count+") tools archived\n");
return results.toString();
}
}
| apache-2.0 |
ebyhr/presto | plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/metadata/ShardRecorder.java | 735 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.raptor.legacy.metadata;
import java.util.UUID;
public interface ShardRecorder
{
void recordCreatedShard(long transactionId, UUID shardUuid);
}
| apache-2.0 |
NSAmelchev/ignite | modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryUtils.cs | 65455 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Apache.Ignite.Core.Impl.Binary
{
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.IO;
using System.Reflection;
using System.Runtime.InteropServices;
using System.Text;
using Apache.Ignite.Core.Binary;
using Apache.Ignite.Core.Impl.Binary.IO;
using Apache.Ignite.Core.Impl.Common;
/// <summary>
/// Utilities for binary serialization.
/// </summary>
internal static class BinaryUtils
{
/** Header of NULL object. */
public const byte HdrNull = 101;
/** Header of object handle. */
public const byte HdrHnd = 102;
/** Header of object in fully serialized form. */
public const byte HdrFull = 103;
/** Protocol versnion. */
public const byte ProtoVer = 1;
/** Collection: custom. */
public const byte CollectionCustom = 0;
/** Collection: array list. */
public const byte CollectionArrayList = 1;
/** Collection: linked list. */
public const byte CollectionLinkedList = 2;
/** Map: custom. */
public const byte MapCustom = 0;
/** Map: hash map. */
public const byte MapHashMap = 1;
/** Byte "0". */
public const byte ByteZero = 0;
/** Indicates object array. */
public const int ObjTypeId = -1;
/** Ticks for Java epoch. */
public static readonly long JavaDateTicks = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc).Ticks;
/** Binding flags for static search. */
private const BindingFlags BindFlagsStatic = BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic;
/** System marshaller. */
private static readonly Marshaller Marsh = new Marshaller(
new BinaryConfiguration { CompactFooter = false })
{
RegistrationDisabled = true
};
/** Method: ReadArray. */
public static readonly MethodInfo MtdhReadArray =
typeof(BinaryUtils).GetMethod("ReadArray", BindFlagsStatic);
/** Cached UTF8 encoding. */
private static readonly Encoding Utf8 = Encoding.UTF8;
/** Cached generic array read funcs. */
private static readonly CopyOnWriteConcurrentDictionary<Type, Func<BinaryReader, bool, object>>
ArrayReaders = new CopyOnWriteConcurrentDictionary<Type, Func<BinaryReader, bool, object>>();
/** Flag indicating whether Guid struct is sequential in current runtime. */
private static readonly bool IsGuidSequential = GetIsGuidSequential();
/** Guid writer. */
public static readonly Action<Guid, IBinaryStream> WriteGuid = IsGuidSequential
? (Action<Guid, IBinaryStream>)WriteGuidFast : WriteGuidSlow;
/** Guid reader. */
public static readonly Func<IBinaryStream, Guid> ReadGuid = IsGuidSequential
? (Func<IBinaryStream, Guid>)ReadGuidFast : ReadGuidSlow;
/** String mode environment variable. */
public const string IgniteBinaryMarshallerUseStringSerializationVer2 =
"IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2";
/** String mode. */
public static readonly bool UseStringSerializationVer2 =
(Environment.GetEnvironmentVariable(IgniteBinaryMarshallerUseStringSerializationVer2) ?? "false") == "true";
/** Cached maps of enum members per type. */
private static readonly CopyOnWriteConcurrentDictionary<Type, Dictionary<string, int>> EnumValues =
new CopyOnWriteConcurrentDictionary<Type, Dictionary<string, int>>();
/// <summary>
/// Default marshaller.
/// </summary>
public static Marshaller Marshaller
{
get { return Marsh; }
}
/**
* <summary>Write boolean array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteBooleanArray(bool[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteBoolArray(vals);
}
/**
* <summary>Read boolean array.</summary>
* <param name="stream">Output stream.</param>
* <returns>Value.</returns>
*/
public static bool[] ReadBooleanArray(IBinaryStream stream)
{
int len = stream.ReadInt();
return stream.ReadBoolArray(len);
}
/**
* <summary>Write byte array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
* <returns>Length of written data.</returns>
*/
public static void WriteByteArray(byte[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteByteArray(vals);
}
/**
* <summary>Read byte array.</summary>
* <param name="stream">Output stream.</param>
* <returns>Value.</returns>
*/
public static byte[] ReadByteArray(IBinaryStream stream)
{
return stream.ReadByteArray(stream.ReadInt());
}
/**
* <summary>Read byte array.</summary>
* <param name="stream">Output stream.</param>
* <returns>Value.</returns>
*/
public static unsafe sbyte[] ReadSbyteArray(IBinaryStream stream)
{
int len = stream.ReadInt();
sbyte[] res = new sbyte[len];
fixed (sbyte* res0 = res)
{
stream.Read((byte*)res0, len);
}
return res;
}
/**
* <summary>Write short array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteShortArray(short[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteShortArray(vals);
}
/**
* <summary>Read short array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static unsafe ushort[] ReadUshortArray(IBinaryStream stream)
{
int len = stream.ReadInt();
ushort[] res = new ushort[len];
fixed (ushort* res0 = res)
{
stream.Read((byte*)res0, len * 2);
}
return res;
}
/**
* <summary>Read short array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static short[] ReadShortArray(IBinaryStream stream)
{
return stream.ReadShortArray(stream.ReadInt());
}
/**
* <summary>Write int array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteIntArray(int[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteIntArray(vals);
}
/**
* <summary>Read int array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static int[] ReadIntArray(IBinaryStream stream)
{
return stream.ReadIntArray(stream.ReadInt());
}
/**
* <summary>Read int array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static unsafe uint[] ReadUintArray(IBinaryStream stream)
{
int len = stream.ReadInt();
uint[] res = new uint[len];
fixed (uint* res0 = res)
{
stream.Read((byte*)res0, len * 4);
}
return res;
}
/**
* <summary>Write long array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteLongArray(long[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteLongArray(vals);
}
/**
* <summary>Read long array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static long[] ReadLongArray(IBinaryStream stream)
{
return stream.ReadLongArray(stream.ReadInt());
}
/**
* <summary>Read ulong array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static unsafe ulong[] ReadUlongArray(IBinaryStream stream)
{
int len = stream.ReadInt();
ulong[] res = new ulong[len];
fixed (ulong* res0 = res)
{
stream.Read((byte*)res0, len * 8);
}
return res;
}
/**
* <summary>Write char array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteCharArray(char[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteCharArray(vals);
}
/**
* <summary>Read char array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static char[] ReadCharArray(IBinaryStream stream)
{
int len = stream.ReadInt();
return stream.ReadCharArray(len);
}
/**
* <summary>Write float array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteFloatArray(float[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteFloatArray(vals);
}
/**
* <summary>Read float array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static float[] ReadFloatArray(IBinaryStream stream)
{
int len = stream.ReadInt();
return stream.ReadFloatArray(len);
}
/**
* <summary>Write double array.</summary>
* <param name="vals">Value.</param>
* <param name="stream">Output stream.</param>
*/
public static void WriteDoubleArray(double[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
stream.WriteDoubleArray(vals);
}
/**
* <summary>Read double array.</summary>
* <param name="stream">Stream.</param>
* <returns>Value.</returns>
*/
public static double[] ReadDoubleArray(IBinaryStream stream)
{
int len = stream.ReadInt();
return stream.ReadDoubleArray(len);
}
/**
* <summary>Write date.</summary>
* <param name="val">Date.</param>
* <param name="stream">Stream.</param>
* <param name="converter">Timestamp Converter.</param>
*/
public static void WriteTimestamp(DateTime val, IBinaryStream stream, ITimestampConverter converter)
{
long high;
int low;
if (converter != null)
converter.ToJavaTicks(val, out high, out low);
else
ToJavaDate(val, out high, out low);
stream.WriteLong(high);
stream.WriteInt(low);
}
/**
* <summary>Read date.</summary>
* <param name="stream">Stream.</param>
* <param name="converter">Timestamp Converter.</param>
* <returns>Date</returns>
*/
public static DateTime? ReadTimestamp(IBinaryStream stream, ITimestampConverter converter)
{
long high = stream.ReadLong();
int low = stream.ReadInt();
if (converter != null)
return converter.FromJavaTicks(high, low);
else
return new DateTime(JavaDateTicks + high * TimeSpan.TicksPerMillisecond + low / 100, DateTimeKind.Utc);
}
/// <summary>
/// Convert Java ticks to DateTime.
/// </summary>
/// <param name="javaTicks">Ticks.</param>
/// <returns>Resulting DateTime.</returns>
public static DateTime JavaTicksToDateTime(long javaTicks)
{
return new DateTime(JavaDateTicks + javaTicks * 1000, DateTimeKind.Utc);
}
/// <summary>
/// Convert DateTime struct to Java ticks
/// <param name="dateTime">DateTime to convert</param>
/// </summary>
/// <returns>Ticks count</returns>
public static long DateTimeToJavaTicks(DateTime dateTime)
{
return (dateTime.Ticks - JavaDateTicks) / 1000;
}
/// <summary>
/// Write nullable date array.
/// </summary>
/// <param name="vals">Values.</param>
/// <param name="stream">Stream.</param>
/// <param name="converter">Timestamp Converter.</param>
public static void WriteTimestampArray(DateTime?[] vals, IBinaryStream stream, ITimestampConverter converter)
{
stream.WriteInt(vals.Length);
foreach (DateTime? val in vals)
{
if (val.HasValue)
{
stream.WriteByte(BinaryTypeId.Timestamp);
WriteTimestamp(val.Value, stream, converter);
}
else
stream.WriteByte(HdrNull);
}
}
/**
* <summary>Write string in UTF8 encoding.</summary>
* <param name="val">String.</param>
* <param name="stream">Stream.</param>
*/
public static unsafe void WriteString(string val, IBinaryStream stream)
{
int charCnt = val.Length;
fixed (char* chars = val)
{
int byteCnt = GetUtf8ByteCount(chars, charCnt);
stream.WriteInt(byteCnt);
stream.WriteString(chars, charCnt, byteCnt, Utf8);
}
}
/// <summary>
/// Converts string to UTF8 bytes.
/// </summary>
/// <param name="chars">Chars.</param>
/// <param name="charCnt">Chars count.</param>
/// <param name="byteCnt">Bytes count.</param>
/// <param name="enc">Encoding.</param>
/// <param name="data">Data.</param>
/// <returns>Amount of bytes written.</returns>
public static unsafe int StringToUtf8Bytes(char* chars, int charCnt, int byteCnt, Encoding enc, byte* data)
{
if (!UseStringSerializationVer2)
return enc.GetBytes(chars, charCnt, data, byteCnt);
int strLen = charCnt;
// ReSharper disable TooWideLocalVariableScope (keep code similar to Java part)
int c, cnt;
// ReSharper restore TooWideLocalVariableScope
int position = 0;
for (cnt = 0; cnt < strLen; cnt++)
{
c = *(chars + cnt);
if (c >= 0x0001 && c <= 0x007F)
*(data + position++) = (byte)c;
else if (c > 0x07FF)
{
*(data + position++) = (byte)(0xE0 | ((c >> 12) & 0x0F));
*(data + position++) = (byte)(0x80 | ((c >> 6) & 0x3F));
*(data + position++) = (byte)(0x80 | (c & 0x3F));
}
else
{
*(data + position++) = (byte)(0xC0 | ((c >> 6) & 0x1F));
*(data + position++) = (byte)(0x80 | (c & 0x3F));
}
}
return position;
}
/// <summary>
/// Gets the UTF8 byte count.
/// </summary>
/// <param name="chars">The chars.</param>
/// <param name="strLen">Length of the string.</param>
/// <returns>UTF byte count.</returns>
private static unsafe int GetUtf8ByteCount(char* chars, int strLen)
{
int utfLen = 0;
int cnt;
for (cnt = 0; cnt < strLen; cnt++)
{
var c = *(chars + cnt);
// ASCII
if (c >= 0x0001 && c <= 0x007F)
utfLen++;
// Special symbols (surrogates)
else if (c > 0x07FF)
utfLen += 3;
// The rest of the symbols.
else
utfLen += 2;
}
return utfLen;
}
/// <summary>
/// Converts UTF8 bytes to string.
/// </summary>
/// <param name="arr">The bytes.</param>
/// <returns>Resulting string.</returns>
private static string Utf8BytesToString(byte[] arr)
{
if (!UseStringSerializationVer2)
return Utf8.GetString(arr);
int len = arr.Length, off = 0;
int c, charArrCnt = 0, total = len;
// ReSharper disable TooWideLocalVariableScope (keep code similar to Java part)
int c2, c3;
// ReSharper restore TooWideLocalVariableScope
char[] res = new char[len];
// try reading ascii
while (off < total)
{
c = arr[off] & 0xff;
if (c > 127)
break;
off++;
res[charArrCnt++] = (char)c;
}
// read other
while (off < total)
{
c = arr[off] & 0xff;
switch (c >> 4)
{
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
/* 0xxxxxxx*/
off++;
res[charArrCnt++] = (char)c;
break;
case 12:
case 13:
/* 110x xxxx 10xx xxxx*/
off += 2;
if (off > total)
throw new BinaryObjectException("Malformed input: partial character at end");
c2 = arr[off - 1];
if ((c2 & 0xC0) != 0x80)
throw new BinaryObjectException("Malformed input around byte: " + off);
res[charArrCnt++] = (char)(((c & 0x1F) << 6) | (c2 & 0x3F));
break;
case 14:
/* 1110 xxxx 10xx xxxx 10xx xxxx */
off += 3;
if (off > total)
throw new BinaryObjectException("Malformed input: partial character at end");
c2 = arr[off - 2];
c3 = arr[off - 1];
if (((c2 & 0xC0) != 0x80) || ((c3 & 0xC0) != 0x80))
throw new BinaryObjectException("Malformed input around byte: " + (off - 1));
// ReSharper disable once ShiftExpressionRealShiftCountIsZero (reviewed - readability)
res[charArrCnt++] = (char)(
((c & 0x0F) << 12) |
((c2 & 0x3F) << 6) |
((c3 & 0x3F) << 0));
break;
default:
/* 10xx xxxx, 1111 xxxx */
throw new BinaryObjectException("Malformed input around byte: " + off);
}
}
return len == charArrCnt ? new string(res) : new string(res, 0, charArrCnt);
}
/**
* <summary>Read string in UTF8 encoding.</summary>
* <param name="stream">Stream.</param>
* <returns>String.</returns>
*/
public static string ReadString(IBinaryStream stream)
{
byte[] bytes = ReadByteArray(stream);
return bytes != null ? Utf8BytesToString(bytes) : null;
}
/**
* <summary>Write string array in UTF8 encoding.</summary>
* <param name="vals">String array.</param>
* <param name="stream">Stream.</param>
*/
public static void WriteStringArray(string[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
foreach (string val in vals)
{
if (val != null)
{
stream.WriteByte(BinaryTypeId.String);
WriteString(val, stream);
}
else
stream.WriteByte(HdrNull);
}
}
/**
* <summary>Write decimal value.</summary>
* <param name="val">Decimal value.</param>
* <param name="stream">Stream.</param>
*/
public static void WriteDecimal(decimal val, IBinaryStream stream)
{
// Vals are:
// [0] = lo
// [1] = mid
// [2] = high
// [3] = flags
int[] vals = decimal.GetBits(val);
// Get start index skipping leading zeros.
int idx = vals[2] != 0 ? 2 : vals[1] != 0 ? 1 : vals[0] != 0 ? 0 : -1;
// Write scale and negative flag.
int scale = (vals[3] & 0x00FF0000) >> 16;
stream.WriteInt(scale);
Boolean neg = vals[3] < 0;
if (idx == -1)
{
// Writing zero.
stream.WriteInt(1);
stream.WriteByte(0);
}
else
{
int len = (idx + 1) << 2;
// Write data.
for (int i = idx; i >= 0; i--)
{
int curPart = vals[i];
int part24 = (curPart >> 24) & 0xFF;
int part16 = (curPart >> 16) & 0xFF;
int part8 = (curPart >> 8) & 0xFF;
int part0 = curPart & 0xFF;
if (i == idx)
{
// Possibly skipping some values here.
if (part24 != 0)
{
if ((part24 & 0x80) == 0x80)
{
stream.WriteInt(len + 1);
stream.WriteByte((byte)(neg ? -0x80 : ByteZero));
neg = false;
}
else
stream.WriteInt(len);
stream.WriteByte((byte)(neg ? ((sbyte)part24 | -0x80) : part24));
stream.WriteByte((byte)part16);
stream.WriteByte((byte)part8);
stream.WriteByte((byte)part0);
}
else if (part16 != 0)
{
if ((part16 & 0x80) == 0x80)
{
stream.WriteInt(len);
stream.WriteByte((byte)(neg ? -0x80 : ByteZero));
neg = false;
}
else
stream.WriteInt(len - 1);
stream.WriteByte((byte)(neg ? ((sbyte)part16 | -0x80) : part16));
stream.WriteByte((byte)part8);
stream.WriteByte((byte)part0);
}
else if (part8 != 0)
{
if ((part8 & 0x80) == 0x80)
{
stream.WriteInt(len - 1);
stream.WriteByte((byte)(neg ? -0x80 : ByteZero));
neg = false;
}
else
stream.WriteInt(len - 2);
stream.WriteByte((byte)(neg ? ((sbyte)part8 | -0x80) : part8));
stream.WriteByte((byte)part0);
}
else
{
if ((part0 & 0x80) == 0x80)
{
stream.WriteInt(len - 2);
stream.WriteByte((byte)(neg ? -0x80 : ByteZero));
neg = false;
}
else
stream.WriteInt(len - 3);
stream.WriteByte((byte)(neg ? ((sbyte)part0 | -0x80) : part0));
}
}
else
{
stream.WriteByte((byte)part24);
stream.WriteByte((byte)part16);
stream.WriteByte((byte)part8);
stream.WriteByte((byte)part0);
}
}
}
}
/**
* <summary>Read decimal value.</summary>
* <param name="stream">Stream.</param>
* <returns>Decimal value.</returns>
*/
public static decimal? ReadDecimal(IBinaryStream stream)
{
int scale = stream.ReadInt();
bool neg = false;
byte[] mag = ReadByteArray(stream);
if ((sbyte)mag[0] < 0)
{
mag[0] &= 0x7F;
neg = true;
}
if (scale < 0 || scale > 28)
throw new BinaryObjectException("Decimal value scale overflow (must be between 0 and 28): " + scale);
if (mag.Length > 13)
throw new BinaryObjectException("Decimal magnitude overflow (must be less than 96 bits): " +
mag.Length * 8);
if (mag.Length == 13 && mag[0] != 0)
throw new BinaryObjectException("Decimal magnitude overflow (must be less than 96 bits): " +
mag.Length * 8);
int hi = 0;
int mid = 0;
int lo = 0;
int ctr = -1;
for (int i = mag.Length - 12; i < mag.Length; i++)
{
if (++ctr == 4)
{
mid = lo;
lo = 0;
}
else if (ctr == 8)
{
hi = mid;
mid = lo;
lo = 0;
}
if (i >= 0)
lo = (lo << 8) + mag[i];
}
return new decimal(lo, mid, hi, neg, (byte)scale);
}
/**
* <summary>Write decimal array.</summary>
* <param name="vals">Decimal array.</param>
* <param name="stream">Stream.</param>
*/
public static void WriteDecimalArray(decimal?[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
foreach (var val in vals)
{
if (val.HasValue)
{
stream.WriteByte(BinaryTypeId.Decimal);
WriteDecimal(val.Value, stream);
}
else
stream.WriteByte(HdrNull);
}
}
/**
* <summary>Read decimal array.</summary>
* <param name="stream">Stream.</param>
* <returns>Decimal array.</returns>
*/
public static decimal?[] ReadDecimalArray(IBinaryStream stream)
{
int len = stream.ReadInt();
var vals = new decimal?[len];
for (int i = 0; i < len; i++)
vals[i] = stream.ReadByte() == HdrNull ? null : ReadDecimal(stream);
return vals;
}
/// <summary>
/// Gets a value indicating whether <see cref="Guid"/> fields are stored sequentially in memory.
/// </summary>
/// <returns></returns>
private static unsafe bool GetIsGuidSequential()
{
// Check that bitwise conversion returns correct result
var guid = Guid.NewGuid();
var bytes = guid.ToByteArray();
var bytes0 = (byte*)&guid;
for (var i = 0; i < bytes.Length; i++)
if (bytes[i] != bytes0[i])
return false;
return true;
}
/// <summary>
/// Writes a guid with bitwise conversion, assuming that <see cref="Guid"/>
/// is laid out in memory sequentially and without gaps between fields.
/// </summary>
/// <param name="val">The value.</param>
/// <param name="stream">The stream.</param>
public static unsafe void WriteGuidFast(Guid val, IBinaryStream stream)
{
var jguid = new JavaGuid(val);
var ptr = &jguid;
stream.Write((byte*)ptr, 16);
}
/// <summary>
/// Writes a guid byte by byte.
/// </summary>
/// <param name="val">The value.</param>
/// <param name="stream">The stream.</param>
private static unsafe void WriteGuidSlow(Guid val, IBinaryStream stream)
{
var bytes = val.ToByteArray();
byte* jBytes = stackalloc byte[16];
jBytes[0] = bytes[6]; // c1
jBytes[1] = bytes[7]; // c2
jBytes[2] = bytes[4]; // b1
jBytes[3] = bytes[5]; // b2
jBytes[4] = bytes[0]; // a1
jBytes[5] = bytes[1]; // a2
jBytes[6] = bytes[2]; // a3
jBytes[7] = bytes[3]; // a4
jBytes[8] = bytes[15]; // k
jBytes[9] = bytes[14]; // j
jBytes[10] = bytes[13]; // i
jBytes[11] = bytes[12]; // h
jBytes[12] = bytes[11]; // g
jBytes[13] = bytes[10]; // f
jBytes[14] = bytes[9]; // e
jBytes[15] = bytes[8]; // d
stream.Write(jBytes, 16);
}
/// <summary>
/// Reads a guid with bitwise conversion, assuming that <see cref="Guid"/>
/// is laid out in memory sequentially and without gaps between fields.
/// </summary>
/// <param name="stream">The stream.</param>
/// <returns>Guid.</returns>
public static unsafe Guid ReadGuidFast(IBinaryStream stream)
{
JavaGuid jguid;
var ptr = (byte*)&jguid;
stream.Read(ptr, 16);
var dotnetGuid = new GuidAccessor(jguid);
return *(Guid*)(&dotnetGuid);
}
/// <summary>
/// Reads a guid byte by byte.
/// </summary>
/// <param name="stream">The stream.</param>
/// <returns>Guid.</returns>
public static unsafe Guid ReadGuidSlow(IBinaryStream stream)
{
byte* jBytes = stackalloc byte[16];
stream.Read(jBytes, 16);
var bytes = new byte[16];
bytes[0] = jBytes[4]; // a1
bytes[1] = jBytes[5]; // a2
bytes[2] = jBytes[6]; // a3
bytes[3] = jBytes[7]; // a4
bytes[4] = jBytes[2]; // b1
bytes[5] = jBytes[3]; // b2
bytes[6] = jBytes[0]; // c1
bytes[7] = jBytes[1]; // c2
bytes[8] = jBytes[15]; // d
bytes[9] = jBytes[14]; // e
bytes[10] = jBytes[13]; // f
bytes[11] = jBytes[12]; // g
bytes[12] = jBytes[11]; // h
bytes[13] = jBytes[10]; // i
bytes[14] = jBytes[9]; // j
bytes[15] = jBytes[8]; // k
return new Guid(bytes);
}
/// <summary>
/// Write GUID array.
/// </summary>
/// <param name="vals">Values.</param>
/// <param name="stream">Stream.</param>
public static void WriteGuidArray(Guid?[] vals, IBinaryStream stream)
{
stream.WriteInt(vals.Length);
foreach (Guid? val in vals)
{
if (val.HasValue)
{
stream.WriteByte(BinaryTypeId.Guid);
WriteGuid(val.Value, stream);
}
else
stream.WriteByte(HdrNull);
}
}
/// <summary>
/// Write array.
/// </summary>
/// <param name="val">Array.</param>
/// <param name="ctx">Write context.</param>
/// <param name="elemTypeId">The element type id.</param>
public static void WriteArray(Array val, BinaryWriter ctx, int? elemTypeId = null)
{
Debug.Assert(val != null && ctx != null);
Debug.Assert(val.Rank == 1);
IBinaryStream stream = ctx.Stream;
if (elemTypeId != null && elemTypeId != BinaryTypeId.Unregistered)
{
stream.WriteInt(elemTypeId.Value);
}
else
{
var elemType = val.GetType().GetElementType();
Debug.Assert(elemType != null);
var typeId = GetArrayElementTypeId(val, ctx.Marshaller);
stream.WriteInt(typeId);
if (typeId == BinaryTypeId.Unregistered)
{
ctx.WriteString(elemType.FullName);
}
}
stream.WriteInt(val.Length);
for (int i = 0; i < val.Length; i++)
ctx.WriteObjectDetached(val.GetValue(i), parentCollection: val);
}
/// <summary>
/// Gets the array element type identifier.
/// </summary>
public static int GetArrayElementTypeId(Array val, Marshaller marsh)
{
var elemType = val.GetType().GetElementType();
Debug.Assert(elemType != null);
return GetArrayElementTypeId(elemType, marsh);
}
/// <summary>
/// Gets the array element type identifier.
/// </summary>
public static int GetArrayElementTypeId(Type elemType, Marshaller marsh)
{
return elemType == typeof(object)
? ObjTypeId
: marsh.GetDescriptor(elemType).TypeId;
}
/// <summary>
/// Gets the type of the array element.
/// </summary>
public static Type GetArrayElementType(int typeId, Marshaller marsh)
{
return typeId == ObjTypeId
? typeof(object)
: marsh.GetDescriptor(true, typeId, true).Type;
}
/// <summary>
/// Read array.
/// </summary>
/// <param name="ctx">Read context.</param>
/// <param name="typed">Typed flag.</param>
/// <param name="elementType">Type of the element.</param>
/// <returns>Array.</returns>
public static object ReadTypedArray(BinaryReader ctx, bool typed, Type elementType)
{
Func<BinaryReader, bool, object> result;
if (!ArrayReaders.TryGetValue(elementType, out result))
result = ArrayReaders.GetOrAdd(elementType, t =>
DelegateConverter.CompileFunc<Func<BinaryReader, bool, object>>(null,
MtdhReadArray.MakeGenericMethod(t),
new[] { typeof(BinaryReader), typeof(bool) }, new[] { false, false, true }));
return result(ctx, typed);
}
/// <summary>
/// Read array.
/// </summary>
/// <param name="ctx">Read context.</param>
/// <param name="typed">Typed flag.</param>
/// <returns>Array.</returns>
public static T[] ReadArray<T>(BinaryReader ctx, bool typed)
{
var stream = ctx.Stream;
var pos = stream.Position;
if (typed)
{
int typeId = stream.ReadInt();
if (typeId == BinaryTypeId.Unregistered)
ctx.ReadString();
}
int len = stream.ReadInt();
var vals = new T[len];
ctx.AddHandle(pos - 1, vals);
for (int i = 0; i < len; i++)
vals[i] = ctx.Deserialize<T>();
return vals;
}
/// <summary>
/// Read string array.
/// </summary>
/// <param name="stream">Stream</param>
/// <returns>String array.</returns>
public static string[] ReadStringArray(IBinaryStream stream)
{
var len = stream.ReadInt();
var res = new string[len];
for (var i = 0; i < len; i++)
res[i] = stream.ReadByte() == HdrNull ? null : ReadString(stream);
return res;
}
/// <summary>
/// Read string array.
/// </summary>
/// <param name="stream">Stream</param>
/// <returns>String array.</returns>
public static Guid?[] ReadGuidArray(IBinaryStream stream)
{
var len = stream.ReadInt();
var res = new Guid?[len];
for (var i = 0; i < len; i++)
res[i] = stream.ReadByte() == HdrNull ? (Guid?) null : ReadGuid(stream);
return res;
}
/// <summary>
/// Read timestamp array.
/// </summary>
/// <param name="stream">Stream.</param>
/// <param name="converter">Timestamp Converter.</param>
/// <returns>Timestamp array.</returns>
public static DateTime?[] ReadTimestampArray(IBinaryStream stream, ITimestampConverter converter)
{
int len = stream.ReadInt();
DateTime?[] vals = new DateTime?[len];
for (int i = 0; i < len; i++)
vals[i] = stream.ReadByte() == HdrNull ? null : ReadTimestamp(stream, converter);
return vals;
}
/**
* <summary>Write collection.</summary>
* <param name="val">Value.</param>
* <param name="ctx">Write context.</param>
*/
public static void WriteCollection(ICollection val, BinaryWriter ctx)
{
var valType = val.GetType();
byte colType;
if (valType.IsGenericType)
{
var genType = valType.GetGenericTypeDefinition();
if (genType == typeof(List<>))
colType = CollectionArrayList;
else if (genType == typeof(LinkedList<>))
colType = CollectionLinkedList;
else
colType = CollectionCustom;
}
else
colType = valType == typeof(ArrayList) ? CollectionArrayList : CollectionCustom;
WriteCollection(val, ctx, colType);
}
/**
* <summary>Write non-null collection with known type.</summary>
* <param name="val">Value.</param>
* <param name="ctx">Write context.</param>
* <param name="colType">Collection type.</param>
*/
public static void WriteCollection(ICollection val, BinaryWriter ctx, byte colType)
{
ctx.Stream.WriteInt(val.Count);
ctx.Stream.WriteByte(colType);
foreach (object elem in val)
ctx.WriteObjectDetached(elem, parentCollection: val);
}
/**
* <summary>Read collection.</summary>
* <param name="ctx">Context.</param>
* <param name="factory">Factory delegate.</param>
* <param name="adder">Adder delegate.</param>
* <returns>Collection.</returns>
*/
public static ICollection ReadCollection(BinaryReader ctx,
Func<int, ICollection> factory, Action<ICollection, object> adder)
{
IBinaryStream stream = ctx.Stream;
int pos = stream.Position;
int len = stream.ReadInt();
byte colType = ctx.Stream.ReadByte();
ICollection res;
if (factory == null)
{
if (colType == CollectionLinkedList)
res = new LinkedList<object>();
else
res = new ArrayList(len);
}
else
res = factory.Invoke(len);
ctx.AddHandle(pos - 1, res);
if (adder == null)
adder = (col, elem) => ((ArrayList)col).Add(elem);
for (int i = 0; i < len; i++)
adder.Invoke(res, ctx.Deserialize<object>());
return res;
}
/**
* <summary>Write dictionary.</summary>
* <param name="val">Value.</param>
* <param name="ctx">Write context.</param>
*/
public static void WriteDictionary(IDictionary val, BinaryWriter ctx)
{
var valType = val.GetType();
byte dictType;
if (valType.IsGenericType)
{
var genType = valType.GetGenericTypeDefinition();
dictType = genType == typeof(Dictionary<,>) ? MapHashMap : MapCustom;
}
else
dictType = valType == typeof(Hashtable) ? MapHashMap : MapCustom;
WriteDictionary(val, ctx, dictType);
}
/**
* <summary>Write non-null dictionary with known type.</summary>
* <param name="val">Value.</param>
* <param name="ctx">Write context.</param>
* <param name="dictType">Dictionary type.</param>
*/
public static void WriteDictionary(IDictionary val, BinaryWriter ctx, byte dictType)
{
ctx.Stream.WriteInt(val.Count);
ctx.Stream.WriteByte(dictType);
foreach (DictionaryEntry entry in val)
{
ctx.WriteObjectDetached(entry.Key, parentCollection: val);
ctx.WriteObjectDetached(entry.Value, parentCollection: val);
}
}
/**
* <summary>Read dictionary.</summary>
* <param name="ctx">Context.</param>
* <param name="factory">Factory delegate.</param>
* <returns>Dictionary.</returns>
*/
public static IDictionary ReadDictionary(BinaryReader ctx, Func<int, IDictionary> factory)
{
IBinaryStream stream = ctx.Stream;
int pos = stream.Position;
int len = stream.ReadInt();
// Skip dictionary type as we can do nothing with it here.
ctx.Stream.ReadByte();
var res = factory == null ? new Hashtable(len) : factory.Invoke(len);
ctx.AddHandle(pos - 1, res);
for (int i = 0; i < len; i++)
{
object key = ctx.Deserialize<object>();
object val = ctx.Deserialize<object>();
res[key] = val;
}
return res;
}
/**
* <summary>Write binary object.</summary>
* <param name="stream">Stream.</param>
* <param name="val">Value.</param>
*/
public static void WriteBinary(IBinaryStream stream, BinaryObject val)
{
WriteByteArray(val.Data, stream);
stream.WriteInt(val.Offset);
}
/// <summary>
/// Gets the enum value by type id and int representation.
/// </summary>
/// <typeparam name="T">Result type.</typeparam>
/// <param name="value">The value.</param>
/// <param name="typeId">The type identifier.</param>
/// <param name="marsh">The marshaller.</param>
/// <returns>value in form of enum, if typeId is known; value in for of int, if typeId is -1.</returns>
public static T GetEnumValue<T>(int value, int typeId, Marshaller marsh)
{
if (typeId == ObjTypeId)
return TypeCaster<T>.Cast(value);
// All enums are user types
var desc = marsh.GetDescriptor(true, typeId, true);
if (desc == null || desc.Type == null)
return TypeCaster<T>.Cast(value);
return (T)Enum.ToObject(desc.Type, value);
}
/**
* <summary>Gets type key.</summary>
* <param name="userType">User type flag.</param>
* <param name="typeId">Type ID.</param>
* <returns>Type key.</returns>
*/
public static long TypeKey(bool userType, int typeId)
{
long res = typeId;
if (userType)
res |= (long)1 << 32;
return res;
}
/// <summary>
/// Gets the string hash code using Java algorithm, converting English letters to lower case.
/// </summary>
public static int GetStringHashCodeLowerCase(string val)
{
if (val == null)
return 0;
int hash = 0;
unchecked
{
// ReSharper disable once LoopCanBeConvertedToQuery (performance)
foreach (var c in val)
hash = 31 * hash + ('A' <= c && c <= 'Z' ? c | 0x20 : c);
}
return hash;
}
/// <summary>
/// Gets the string hash code using Java algorithm.
/// </summary>
public static int GetStringHashCode(string val)
{
if (val == null)
return 0;
int hash = 0;
unchecked
{
// ReSharper disable once LoopCanBeConvertedToQuery (performance)
foreach (var c in val)
hash = 31 * hash + c;
}
return hash;
}
/// <summary>
/// Gets the cache identifier.
/// </summary>
public static int GetCacheId(string cacheName)
{
return string.IsNullOrEmpty(cacheName) ? 1 : GetStringHashCode(cacheName);
}
/// <summary>
/// Cleans the name of the field.
/// </summary>
public static string CleanFieldName(string fieldName)
{
// C# auto property backing field:
if (fieldName.StartsWith("<", StringComparison.Ordinal)
&& fieldName.EndsWith(">k__BackingField", StringComparison.Ordinal))
{
return fieldName.Substring(1, fieldName.IndexOf(">", StringComparison.Ordinal) - 1);
}
// F# backing field:
if (fieldName.EndsWith("@", StringComparison.Ordinal))
{
return fieldName.Substring(0, fieldName.Length - 1);
}
return fieldName;
}
/// <summary>
/// Convert field name.
/// </summary>
/// <param name="fieldName">Field name.</param>
/// <param name="converter">Converter.</param>
private static string ConvertFieldName(string fieldName, IBinaryNameMapper converter)
{
var fieldName0 = fieldName;
try
{
if (converter != null)
fieldName = converter.GetFieldName(fieldName);
}
catch (Exception e)
{
throw new BinaryObjectException("Failed to convert field name due to converter exception " +
"[fieldName=" + fieldName + ", converter=" + converter + ']', e);
}
if (fieldName == null)
throw new BinaryObjectException("Name converter returned null name for field [fieldName=" +
fieldName0 + ", converter=" + converter + "]");
return fieldName;
}
/// <summary>
/// Gets the SQL name of the type.
/// </summary>
public static string GetSqlTypeName(Type type)
{
// Ignite SQL engine always uses simple type name without namespace, parent class, etc -
// see QueryUtils.typeName.
// GridQueryProcessor.store uses this type name to ensure that we put correct data to the cache:
// cacheObjects().typeId(QueryEntity.ValueTypeName) is matched against BinaryObject.typeId.
// Additionally, this type name is passed back to UnmanagedCallbacks.BinaryTypeGet to register the
// query types on cache start.
return BinaryBasicNameMapper.FullNameInstance.GetTypeName(type.AssemblyQualifiedName);
}
/**
* <summary>Resolve field ID.</summary>
* <param name="typeId">Type ID.</param>
* <param name="fieldName">Field name.</param>
* <param name="nameMapper">Name mapper.</param>
* <param name="idMapper">ID mapper.</param>
*/
public static int FieldId(int typeId, string fieldName, IBinaryNameMapper nameMapper,
IBinaryIdMapper idMapper)
{
Debug.Assert(fieldName != null);
fieldName = ConvertFieldName(fieldName, nameMapper);
int id = 0;
if (idMapper != null)
{
try
{
id = idMapper.GetFieldId(typeId, fieldName);
}
catch (Exception e)
{
throw new BinaryObjectException("Failed to resolve field ID due to ID mapper exception " +
"[typeId=" + typeId + ", fieldName=" + fieldName + ", idMapper=" + idMapper + ']', e);
}
}
if (id == 0)
id = GetStringHashCodeLowerCase(fieldName);
if (id == 0)
throw new BinaryObjectException("Field ID is zero (please provide ID mapper or change field name) " +
"[typeId=" + typeId + ", fieldName=" + fieldName + ", idMapper=" + idMapper + ']');
return id;
}
/// <summary>
/// Writes invocation result.
/// </summary>
/// <param name="writer">Writer.</param>
/// <param name="success">Success flag.</param>
/// <param name="res">Result.</param>
[SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")]
public static void WriteInvocationResult(BinaryWriter writer, bool success, object res)
{
var pos = writer.Stream.Position;
try
{
if (success)
writer.WriteBoolean(true);
else
{
writer.WriteBoolean(false); // Call failed.
writer.WriteBoolean(true); // Exception serialized sucessfully.
}
writer.Write(res);
}
catch (Exception marshErr)
{
// Failed to serialize result, fallback to plain string.
writer.Stream.Seek(pos, SeekOrigin.Begin);
writer.WriteBoolean(false); // Call failed.
writer.WriteBoolean(false); // Cannot serialize result or exception.
if (success)
{
writer.WriteString("Call completed successfully, but result serialization failed [resultType=" +
res.GetType().Name + ", serializationErrMsg=" + marshErr.Message + ']');
}
else
{
writer.WriteString("Call completed with error, but error serialization failed [errType=" +
res.GetType().Name + ", serializationErrMsg=" + marshErr.Message + ']');
}
}
}
/// <summary>
/// Reads invocation result.
/// </summary>
/// <param name="reader">Reader.</param>
/// <param name="err">Error.</param>
/// <returns>Result.</returns>
public static object ReadInvocationResult(BinaryReader reader, out object err)
{
err = null;
if (reader.ReadBoolean()) // success indication
return reader.ReadObject<object>();
err = reader.ReadBoolean() // native error indication
? reader.ReadObject<object>()
: ExceptionUtils.GetException(reader.Marshaller.Ignite, reader.ReadString(), reader.ReadString(),
reader.ReadString());
return null;
}
/// <summary>
/// Validate protocol version.
/// </summary>
/// <param name="version">The version.</param>
public static void ValidateProtocolVersion(byte version)
{
if (version != ProtoVer)
throw new BinaryObjectException("Unsupported protocol version: " + version);
}
/**
* <summary>Convert date to Java ticks.</summary>
* <param name="date">Date</param>
* <param name="high">High part (milliseconds).</param>
* <param name="low">Low part (nanoseconds)</param>
*/
public static void ToJavaDate(DateTime date, out long high, out int low)
{
if (date.Kind != DateTimeKind.Utc)
{
throw new BinaryObjectException(
"DateTime is not UTC. Only UTC DateTime can be used for interop with other platforms.");
}
long diff = date.Ticks - JavaDateTicks;
high = diff / TimeSpan.TicksPerMillisecond;
low = (int)(diff % TimeSpan.TicksPerMillisecond) * 100;
}
/// <summary>
/// Read additional configuration from the stream.
/// </summary>
/// <param name="reader">Reader.</param>
/// <param name="assemblies">Assemblies.</param>
/// <param name="cfg">Configuration.</param>
public static void ReadConfiguration(BinaryReader reader, out ICollection<string> assemblies, out BinaryConfiguration cfg)
{
if (reader.ReadBoolean())
{
int assemblyCnt = reader.ReadInt();
assemblies = new List<string>(assemblyCnt);
for (int i = 0; i < assemblyCnt; i++)
assemblies.Add(reader.ReadObject<string>());
}
else
assemblies = null;
if (reader.ReadBoolean())
{
cfg = new BinaryConfiguration();
// Read binary types in full form.
if (reader.ReadBoolean())
{
int typesCnt = reader.ReadInt();
cfg.TypeConfigurations = new List<BinaryTypeConfiguration>();
for (int i = 0; i < typesCnt; i++)
{
cfg.TypeConfigurations.Add(new BinaryTypeConfiguration
{
TypeName = reader.ReadString(),
NameMapper = CreateInstance<IBinaryNameMapper>(reader),
IdMapper = CreateInstance<IBinaryIdMapper>(reader),
Serializer = CreateInstance<IBinarySerializer>(reader),
AffinityKeyFieldName = reader.ReadString(),
KeepDeserialized = reader.ReadObject<bool?>(),
IsEnum = reader.ReadBoolean()
});
}
}
// Read binary types in compact form.
if (reader.ReadBoolean())
{
int typesCnt = reader.ReadInt();
cfg.Types = new List<string>(typesCnt);
for (int i = 0; i < typesCnt; i++)
cfg.Types.Add(reader.ReadString());
}
// Read the rest.
cfg.NameMapper = CreateInstance<IBinaryNameMapper>(reader);
cfg.IdMapper = CreateInstance<IBinaryIdMapper>(reader);
cfg.Serializer = CreateInstance<IBinarySerializer>(reader);
cfg.KeepDeserialized = reader.ReadBoolean();
}
else
cfg = null;
}
/// <summary>
/// Gets the unsupported type exception.
/// </summary>
public static BinaryObjectException GetUnsupportedTypeException(Type type, object obj)
{
return new BinaryObjectException(string.Format(
"Unsupported object type [type={0}, object={1}].\nSpecified type " +
"can not be serialized by Ignite: it is neither [Serializable], " +
"nor registered in IgniteConfiguration.BinaryConfiguration." +
"\nSee https://apacheignite-net.readme.io/docs/serialization for more details.", type, obj));
}
/// <summary>
/// Reinterprets int bits as a float.
/// </summary>
public static unsafe float IntToFloatBits(int val)
{
return *(float*)&val;
}
/// <summary>
/// Reinterprets long bits as a double.
/// </summary>
public static unsafe double LongToDoubleBits(long val)
{
return *(double*)&val;
}
/// <summary>
/// Determines whether specified type is Ignite-compatible enum (value fits into 4 bytes).
/// </summary>
public static bool IsIgniteEnum(Type type)
{
Debug.Assert(type != null);
type = Nullable.GetUnderlyingType(type) ?? type;
if (!type.IsEnum)
return false;
var enumType = Enum.GetUnderlyingType(type);
return enumType == typeof(int) || enumType == typeof(byte) || enumType == typeof(sbyte)
|| enumType == typeof(short) || enumType == typeof(ushort) || enumType == typeof(uint);
}
/// <summary>
/// Converts long to timespan.
/// </summary>
public static TimeSpan LongToTimeSpan(long ms)
{
if (ms >= TimeSpan.MaxValue.TotalMilliseconds)
return TimeSpan.MaxValue;
if (ms <= TimeSpan.MinValue.TotalMilliseconds)
return TimeSpan.MinValue;
return TimeSpan.FromMilliseconds(ms);
}
/// <summary>
/// Gets the enum values.
/// </summary>
public static IDictionary<string, int> GetEnumValues(Type enumType)
{
Debug.Assert(enumType != null);
Debug.Assert(enumType.IsEnum);
Dictionary<string,int> res;
if (EnumValues.TryGetValue(enumType, out res))
{
return res;
}
var values = Enum.GetValues(enumType);
res = new Dictionary<string, int>(values.Length);
var underlyingType = Enum.GetUnderlyingType(enumType);
foreach (var value in values)
{
var name = Enum.GetName(enumType, value);
Debug.Assert(name != null);
res[name] = GetEnumValueAsInt(underlyingType, value);
}
EnumValues.Set(enumType, res);
return res;
}
/// <summary>
/// Gets the enum value as int.
/// </summary>
private static int GetEnumValueAsInt(Type underlyingType, object value)
{
if (underlyingType == typeof(int))
{
return (int) value;
}
if (underlyingType == typeof(byte))
{
return (byte) value;
}
if (underlyingType == typeof(sbyte))
{
return (sbyte) value;
}
if (underlyingType == typeof(short))
{
return (short) value;
}
if (underlyingType == typeof(ushort))
{
return (ushort) value;
}
if (underlyingType == typeof(uint))
{
return unchecked((int) (uint) value);
}
throw new BinaryObjectException("Unexpected enum underlying type: " + underlyingType);
}
/// <summary>
/// Creates and instance from the type name in reader.
/// </summary>
private static T CreateInstance<T>(BinaryReader reader)
{
var typeName = reader.ReadString();
if (typeName == null)
return default(T);
return IgniteUtils.CreateInstance<T>(typeName);
}
/// <summary>
/// Reverses the byte order of an unsigned long.
/// </summary>
private static ulong ReverseByteOrder(ulong l)
{
// Fastest way would be to use bswap processor instruction.
return ((l >> 56) & 0x00000000000000FF) | ((l >> 40) & 0x000000000000FF00) |
((l >> 24) & 0x0000000000FF0000) | ((l >> 8) & 0x00000000FF000000) |
((l << 8) & 0x000000FF00000000) | ((l << 24) & 0x0000FF0000000000) |
((l << 40) & 0x00FF000000000000) | ((l << 56) & 0xFF00000000000000);
}
/// <summary>
/// Struct with .Net-style Guid memory layout.
/// </summary>
[StructLayout(LayoutKind.Sequential, Pack = 0)]
private struct GuidAccessor
{
public readonly ulong ABC;
public readonly ulong DEFGHIJK;
/// <summary>
/// Initializes a new instance of the <see cref="GuidAccessor"/> struct.
/// </summary>
/// <param name="val">The value.</param>
public GuidAccessor(JavaGuid val)
{
var l = val.CBA;
if (BitConverter.IsLittleEndian)
ABC = ((l >> 32) & 0x00000000FFFFFFFF) | ((l << 48) & 0xFFFF000000000000) |
((l << 16) & 0x0000FFFF00000000);
else
ABC = ((l << 32) & 0xFFFFFFFF00000000) | ((l >> 48) & 0x000000000000FFFF) |
((l >> 16) & 0x00000000FFFF0000);
// This is valid in any endianness (symmetrical)
DEFGHIJK = ReverseByteOrder(val.KJIHGFED);
}
}
/// <summary>
/// Struct with Java-style Guid memory layout.
/// </summary>
[StructLayout(LayoutKind.Explicit)]
private struct JavaGuid
{
[FieldOffset(0)] public readonly ulong CBA;
[FieldOffset(8)] public readonly ulong KJIHGFED;
[SuppressMessage("Microsoft.Performance", "CA1823:AvoidUnusedPrivateFields")]
[FieldOffset(0)]
public unsafe fixed byte Bytes[16];
/// <summary>
/// Initializes a new instance of the <see cref="JavaGuid"/> struct.
/// </summary>
/// <param name="val">The value.</param>
public unsafe JavaGuid(Guid val)
{
// .NET returns bytes in the following order: _a(4), _b(2), _c(2), _d, _e, _f, _g, _h, _i, _j, _k.
// And _a, _b and _c are always in little endian format irrespective of system configuration.
// To be compliant with Java we rearrange them as follows: _c, _b_, a_, _k, _j, _i, _h, _g, _f, _e, _d.
var accessor = *((GuidAccessor*)&val);
var l = accessor.ABC;
if (BitConverter.IsLittleEndian)
CBA = ((l << 32) & 0xFFFFFFFF00000000) | ((l >> 48) & 0x000000000000FFFF) |
((l >> 16) & 0x00000000FFFF0000);
else
CBA = ((l >> 32) & 0x00000000FFFFFFFF) | ((l << 48) & 0xFFFF000000000000) |
((l << 16) & 0x0000FFFF00000000);
// This is valid in any endianness (symmetrical)
KJIHGFED = ReverseByteOrder(accessor.DEFGHIJK);
}
}
}
}
| apache-2.0 |
ParasDPain/ejml | main/denseC64/test/org/ejml/ops/TestCRandomMatrices.java | 4719 | /*
* Copyright (c) 2009-2015, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ejml.ops;
import org.ejml.data.CDenseMatrix64F;
import org.junit.Test;
import java.util.Random;
import static org.junit.Assert.*;
/**
* @author Peter Abeles
*/
public class TestCRandomMatrices {
Random rand = new Random(234);
@Test
public void createRandom_min_max() {
CDenseMatrix64F A = CRandomMatrices.createRandom(30,20,-1,1,rand);
checkRandomRange(A);
}
@Test
public void setRandom() {
CDenseMatrix64F A = new CDenseMatrix64F(5,4);
CRandomMatrices.setRandom(A,rand);
checkRandom1(A);
}
private void checkRandom1(CDenseMatrix64F a) {
assertEquals(5, a.numRows);
assertEquals(4, a.numCols);
double totalReal = 0;
double totalImg = 0;
for( int i = 0; i < a.numRows; i++ ) {
for( int j = 0; j < a.numCols; j++ ) {
double real = a.getReal(i,j);
double img = a.getImaginary(i, j);
assertTrue( real >= 0);
assertTrue( real <= 1);
totalReal += real;
assertTrue( img >= 0);
assertTrue( img <= 1);
totalImg += img;
}
}
assertTrue(totalReal>0);
assertTrue(totalImg>0);
}
@Test
public void setRandom_min_max() {
CDenseMatrix64F A = new CDenseMatrix64F(30,20);
CRandomMatrices.setRandom(A,-1,1,rand);
checkRandomRange(A);
}
private void checkRandomRange(CDenseMatrix64F a) {
assertEquals(30, a.numRows);
assertEquals(20, a.numCols);
int numRealNeg = 0;
int numRealPos = 0;
int numImgNeg = 0;
int numImgPos = 0;
for( int i = 0; i < a.numRows; i++ ) {
for( int j = 0; j < a.numCols; j++ ) {
double real = a.getReal(i,j);
double img = a.getImaginary(i,j);
if( real < 0 )
numRealNeg++;
else
numRealPos++;
if( Math.abs(real) > 1 )
fail("Out of range");
if( img < 0 )
numImgNeg++;
else
numImgPos++;
if( Math.abs(img) > 1 )
fail("Out of range");
}
}
assertTrue(numRealNeg>0);
assertTrue(numRealPos>0);
assertTrue(numImgNeg>0);
assertTrue(numImgPos>0);
}
@Test
public void createHermPosDef() {
for( int i = 1; i < 20; i++ ) {
CDenseMatrix64F A = CRandomMatrices.createHermPosDef(i, rand);
assertTrue(CMatrixFeatures.isPositiveDefinite(A));
}
}
@Test
public void createHermitian() {
CDenseMatrix64F A = CRandomMatrices.createHermitian(10, -1, 1, rand);
assertTrue(CMatrixFeatures.isHermitian(A, 1e-8));
// see if it has the expected range of elements
double min = CCommonOps.elementMinReal(A);
double max = CCommonOps.elementMaxReal(A);
assertTrue(min < 0 && min >= -1);
assertTrue(max > 0 && max <= 1);
min = CCommonOps.elementMinImaginary(A);
max = CCommonOps.elementMaxImaginary(A);
assertTrue(min < 0 && min >= -1);
assertTrue(max > 0 && max <= 1);
}
//
// @Test
// public void createUpperTriangle() {
// for( int hess = 0; hess < 3; hess++ ) {
// CDenseMatrix64F A = CRandomMatrices.createUpperTriangle(10,hess,-1,1,rand);
//
// assertTrue(MatrixFeatures.isUpperTriangle(A,hess,1e-8));
//
// // quick sanity check to make sure it could be proper
// assertTrue(A.get(hess,0) != 0 );
//
// // see if it has the expected range of elements
// double min = CommonOps.elementMin(A);
// double max = CommonOps.elementMax(A);
//
// assertTrue(min < 0 && min >= -1);
// assertTrue(max > 0 && max <= 1);
// }
// }
}
| apache-2.0 |
radarsat1/siconos | externals/numeric_bindings/libs/numeric/bindings/tools/templates/auxiliary/larfx.hpp | 371 | $TEMPLATE[larfx.all.min_size_work.args]
SIDE,M,N
$TEMPLATE[larfx.includes]
#include <boost/numeric/bindings/detail/if_left.hpp>
$TEMPLATE[larfx.all.min_size_work]
$INTEGER_TYPE order = bindings::detail::if_left( side, n, m );
if ( order < 11)
return 1;
else
return std::max< $INTEGER_TYPE >( 1, order );
$TEMPLATE[larfx.all.LDC.assert_ge]
max(1,M)
$TEMPLATE[end]
| apache-2.0 |
caskdata/cdap | cdap-explore/src/main/java/co/cask/cdap/hive/stream/StreamStorageHandler.java | 3120 | /*
* Copyright © 2014 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.hive.stream;
import co.cask.cdap.common.conf.Constants;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.mapred.InputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
/**
* HiveStorageHandler to access Streams.
*
* <p>
* Referred to by string rather than {@code Class.getName()} in {@code ExploreServiceUtils.traceExploreDependencies()}
* because this class extends a Hive class, which isn't present in the {@code ExploreServiceUtils} class loader.
* </p>
*/
public class StreamStorageHandler extends DefaultStorageHandler {
private static final Logger LOG = LoggerFactory.getLogger(StreamStorageHandler.class);
@Override
public Class<? extends InputFormat> getInputFormatClass() {
return HiveStreamInputFormat.class;
}
@Override
public Class<? extends SerDe> getSerDeClass() {
return StreamSerDe.class;
}
@Override
public void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
configureTableJobProperties(tableDesc, jobProperties);
}
@Override
public void configureTableJobProperties(TableDesc tableDesc,
Map<String, String> jobProperties) {
// NOTE: the jobProperties map will be put in the jobConf passed to the StreamInputFormat.
// Hive ensures that the properties of the right table will be passed at the right time to those classes.
String streamName = tableDesc.getProperties().getProperty(Constants.Explore.STREAM_NAME);
jobProperties.put(Constants.Explore.STREAM_NAME, streamName);
String streamNamespace = tableDesc.getProperties().getProperty(Constants.Explore.STREAM_NAMESPACE);
jobProperties.put(Constants.Explore.STREAM_NAMESPACE, streamNamespace);
LOG.debug("Got stream {} for external table {}", streamName, tableDesc.getTableName());
}
@Override
public void configureOutputJobProperties(TableDesc tableDesc,
Map<String, String> jobProperties) {
// throw the exception here instead of in getOutputFormatClass because that method is called on table creation.
throw new UnsupportedOperationException("Writing to streams through Hive is not supported");
}
@Override
public HiveMetaHook getMetaHook() {
return null;
}
}
| apache-2.0 |
dinfuehr/rust | src/test/run-pass/tag-disr-val-shape.rs | 868 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(Show)]
enum color {
red = 0xff0000,
green = 0x00ff00,
blue = 0x0000ff,
black = 0x000000,
white = 0xFFFFFF,
}
pub fn main() {
let act = format!("{:?}", color::red);
println!("{}", act);
assert_eq!("red".to_string(), act);
assert_eq!("green".to_string(), format!("{:?}", color::green));
assert_eq!("white".to_string(), format!("{:?}", color::white));
}
| apache-2.0 |
msebire/intellij-community | plugins/properties/properties-psi-impl/src/com/intellij/psi/impl/cache/impl/idCache/PropertiesFilterLexer.java | 1901 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.cache.impl.idCache;
import com.intellij.lang.properties.parsing.PropertiesTokenTypes;
import com.intellij.lexer.Lexer;
import com.intellij.psi.impl.cache.impl.BaseFilterLexer;
import com.intellij.psi.impl.cache.impl.OccurrenceConsumer;
import com.intellij.psi.search.UsageSearchContext;
import com.intellij.psi.tree.IElementType;
/**
* @author ven
*/
public class PropertiesFilterLexer extends BaseFilterLexer {
public PropertiesFilterLexer(final Lexer originalLexer, final OccurrenceConsumer table) {
super(originalLexer, table);
}
@Override
public void advance() {
final IElementType tokenType = getDelegate().getTokenType();
if (tokenType == PropertiesTokenTypes.KEY_CHARACTERS) {
scanWordsInToken(UsageSearchContext.IN_CODE | UsageSearchContext.IN_FOREIGN_LANGUAGES | UsageSearchContext.IN_PLAIN_TEXT, false, false);
}
else if (PropertiesTokenTypes.COMMENTS.contains(tokenType)) {
scanWordsInToken(UsageSearchContext.IN_COMMENTS | UsageSearchContext.IN_PLAIN_TEXT, false, false);
advanceTodoItemCountsInToken();
}
else {
scanWordsInToken(UsageSearchContext.IN_CODE | UsageSearchContext.IN_FOREIGN_LANGUAGES | UsageSearchContext.IN_PLAIN_TEXT, false, false);
}
getDelegate().advance();
}
}
| apache-2.0 |
zdary/intellij-community | jps/jps-builders/src/org/jetbrains/jps/builders/java/dependencyView/ClassFileRepr.java | 2683 | // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package org.jetbrains.jps.builders.java.dependencyView;
import com.intellij.util.io.DataInputOutputUtil;
import gnu.trove.THashSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.jps.builders.storage.BuildDataCorruptedException;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Set;
/**
* @author Eugene Zhuravlev
*/
public abstract class ClassFileRepr extends Proto {
protected final DependencyContext myContext;
private final int myFileName;
private final Set<UsageRepr.Usage> myUsages;
public ClassFileRepr(
int access,
int signature,
int name,
@NotNull Set<TypeRepr.ClassType> annotations,
final int fileName, final DependencyContext context, final Set<UsageRepr.Usage> usages) {
super(access, signature, name, annotations);
myFileName = fileName;
this.myContext = context;
this.myUsages = usages;
}
public ClassFileRepr(DependencyContext context, DataInput in) {
super(context, in);
myContext = context;
try {
myFileName = DataInputOutputUtil.readINT(in);
myUsages = RW.read(UsageRepr.externalizer(context), new THashSet<>(), in);
}
catch (IOException e) {
throw new BuildDataCorruptedException(e);
}
}
public int getFileName() {
return myFileName;
}
public Set<UsageRepr.Usage> getUsages() {
return myUsages;
}
public boolean addUsage(final UsageRepr.Usage usage) {
return myUsages.add(usage);
}
protected abstract void updateClassUsages(DependencyContext context, Set<? super UsageRepr.Usage> s);
@Override
public void toStream(DependencyContext context, PrintStream stream) {
super.toStream(context, stream);
stream.print(" Filename : ");
stream.println(context.getValue(myFileName));
}
@Override
public void save(final DataOutput out) {
try {
super.save(out);
DataInputOutputUtil.writeINT(out, myFileName);
RW.save(myUsages, UsageRepr.externalizer(myContext), out);
}
catch (IOException e) {
throw new BuildDataCorruptedException(e);
}
}
@Override
public int hashCode() {
return 31 * myFileName + name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClassFileRepr classRepr = (ClassFileRepr)o;
if (myFileName != classRepr.myFileName) return false;
if (name != classRepr.name) return false;
return true;
}
}
| apache-2.0 |
consulo/consulo-android | tools-base/build-system/integration-test/test-projects/multiDex/src/main/java/com/android/tests/basic/manymethods/Big025.java | 53466 | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.android.tests.basic.manymethods;
public class Big025 {
public int get0() {
return 0;
}
public int get1() {
return 1;
}
public int get2() {
return 2;
}
public int get3() {
return 3;
}
public int get4() {
return 4;
}
public int get5() {
return 5;
}
public int get6() {
return 6;
}
public int get7() {
return 7;
}
public int get8() {
return 8;
}
public int get9() {
return 9;
}
public int get10() {
return 10;
}
public int get11() {
return 11;
}
public int get12() {
return 12;
}
public int get13() {
return 13;
}
public int get14() {
return 14;
}
public int get15() {
return 15;
}
public int get16() {
return 16;
}
public int get17() {
return 17;
}
public int get18() {
return 18;
}
public int get19() {
return 19;
}
public int get20() {
return 20;
}
public int get21() {
return 21;
}
public int get22() {
return 22;
}
public int get23() {
return 23;
}
public int get24() {
return 24;
}
public int get25() {
return 25;
}
public int get26() {
return 26;
}
public int get27() {
return 27;
}
public int get28() {
return 28;
}
public int get29() {
return 29;
}
public int get30() {
return 30;
}
public int get31() {
return 31;
}
public int get32() {
return 32;
}
public int get33() {
return 33;
}
public int get34() {
return 34;
}
public int get35() {
return 35;
}
public int get36() {
return 36;
}
public int get37() {
return 37;
}
public int get38() {
return 38;
}
public int get39() {
return 39;
}
public int get40() {
return 40;
}
public int get41() {
return 41;
}
public int get42() {
return 42;
}
public int get43() {
return 43;
}
public int get44() {
return 44;
}
public int get45() {
return 45;
}
public int get46() {
return 46;
}
public int get47() {
return 47;
}
public int get48() {
return 48;
}
public int get49() {
return 49;
}
public int get50() {
return 50;
}
public int get51() {
return 51;
}
public int get52() {
return 52;
}
public int get53() {
return 53;
}
public int get54() {
return 54;
}
public int get55() {
return 55;
}
public int get56() {
return 56;
}
public int get57() {
return 57;
}
public int get58() {
return 58;
}
public int get59() {
return 59;
}
public int get60() {
return 60;
}
public int get61() {
return 61;
}
public int get62() {
return 62;
}
public int get63() {
return 63;
}
public int get64() {
return 64;
}
public int get65() {
return 65;
}
public int get66() {
return 66;
}
public int get67() {
return 67;
}
public int get68() {
return 68;
}
public int get69() {
return 69;
}
public int get70() {
return 70;
}
public int get71() {
return 71;
}
public int get72() {
return 72;
}
public int get73() {
return 73;
}
public int get74() {
return 74;
}
public int get75() {
return 75;
}
public int get76() {
return 76;
}
public int get77() {
return 77;
}
public int get78() {
return 78;
}
public int get79() {
return 79;
}
public int get80() {
return 80;
}
public int get81() {
return 81;
}
public int get82() {
return 82;
}
public int get83() {
return 83;
}
public int get84() {
return 84;
}
public int get85() {
return 85;
}
public int get86() {
return 86;
}
public int get87() {
return 87;
}
public int get88() {
return 88;
}
public int get89() {
return 89;
}
public int get90() {
return 90;
}
public int get91() {
return 91;
}
public int get92() {
return 92;
}
public int get93() {
return 93;
}
public int get94() {
return 94;
}
public int get95() {
return 95;
}
public int get96() {
return 96;
}
public int get97() {
return 97;
}
public int get98() {
return 98;
}
public int get99() {
return 99;
}
public int get100() {
return 100;
}
public int get101() {
return 101;
}
public int get102() {
return 102;
}
public int get103() {
return 103;
}
public int get104() {
return 104;
}
public int get105() {
return 105;
}
public int get106() {
return 106;
}
public int get107() {
return 107;
}
public int get108() {
return 108;
}
public int get109() {
return 109;
}
public int get110() {
return 110;
}
public int get111() {
return 111;
}
public int get112() {
return 112;
}
public int get113() {
return 113;
}
public int get114() {
return 114;
}
public int get115() {
return 115;
}
public int get116() {
return 116;
}
public int get117() {
return 117;
}
public int get118() {
return 118;
}
public int get119() {
return 119;
}
public int get120() {
return 120;
}
public int get121() {
return 121;
}
public int get122() {
return 122;
}
public int get123() {
return 123;
}
public int get124() {
return 124;
}
public int get125() {
return 125;
}
public int get126() {
return 126;
}
public int get127() {
return 127;
}
public int get128() {
return 128;
}
public int get129() {
return 129;
}
public int get130() {
return 130;
}
public int get131() {
return 131;
}
public int get132() {
return 132;
}
public int get133() {
return 133;
}
public int get134() {
return 134;
}
public int get135() {
return 135;
}
public int get136() {
return 136;
}
public int get137() {
return 137;
}
public int get138() {
return 138;
}
public int get139() {
return 139;
}
public int get140() {
return 140;
}
public int get141() {
return 141;
}
public int get142() {
return 142;
}
public int get143() {
return 143;
}
public int get144() {
return 144;
}
public int get145() {
return 145;
}
public int get146() {
return 146;
}
public int get147() {
return 147;
}
public int get148() {
return 148;
}
public int get149() {
return 149;
}
public int get150() {
return 150;
}
public int get151() {
return 151;
}
public int get152() {
return 152;
}
public int get153() {
return 153;
}
public int get154() {
return 154;
}
public int get155() {
return 155;
}
public int get156() {
return 156;
}
public int get157() {
return 157;
}
public int get158() {
return 158;
}
public int get159() {
return 159;
}
public int get160() {
return 160;
}
public int get161() {
return 161;
}
public int get162() {
return 162;
}
public int get163() {
return 163;
}
public int get164() {
return 164;
}
public int get165() {
return 165;
}
public int get166() {
return 166;
}
public int get167() {
return 167;
}
public int get168() {
return 168;
}
public int get169() {
return 169;
}
public int get170() {
return 170;
}
public int get171() {
return 171;
}
public int get172() {
return 172;
}
public int get173() {
return 173;
}
public int get174() {
return 174;
}
public int get175() {
return 175;
}
public int get176() {
return 176;
}
public int get177() {
return 177;
}
public int get178() {
return 178;
}
public int get179() {
return 179;
}
public int get180() {
return 180;
}
public int get181() {
return 181;
}
public int get182() {
return 182;
}
public int get183() {
return 183;
}
public int get184() {
return 184;
}
public int get185() {
return 185;
}
public int get186() {
return 186;
}
public int get187() {
return 187;
}
public int get188() {
return 188;
}
public int get189() {
return 189;
}
public int get190() {
return 190;
}
public int get191() {
return 191;
}
public int get192() {
return 192;
}
public int get193() {
return 193;
}
public int get194() {
return 194;
}
public int get195() {
return 195;
}
public int get196() {
return 196;
}
public int get197() {
return 197;
}
public int get198() {
return 198;
}
public int get199() {
return 199;
}
public int get200() {
return 200;
}
public int get201() {
return 201;
}
public int get202() {
return 202;
}
public int get203() {
return 203;
}
public int get204() {
return 204;
}
public int get205() {
return 205;
}
public int get206() {
return 206;
}
public int get207() {
return 207;
}
public int get208() {
return 208;
}
public int get209() {
return 209;
}
public int get210() {
return 210;
}
public int get211() {
return 211;
}
public int get212() {
return 212;
}
public int get213() {
return 213;
}
public int get214() {
return 214;
}
public int get215() {
return 215;
}
public int get216() {
return 216;
}
public int get217() {
return 217;
}
public int get218() {
return 218;
}
public int get219() {
return 219;
}
public int get220() {
return 220;
}
public int get221() {
return 221;
}
public int get222() {
return 222;
}
public int get223() {
return 223;
}
public int get224() {
return 224;
}
public int get225() {
return 225;
}
public int get226() {
return 226;
}
public int get227() {
return 227;
}
public int get228() {
return 228;
}
public int get229() {
return 229;
}
public int get230() {
return 230;
}
public int get231() {
return 231;
}
public int get232() {
return 232;
}
public int get233() {
return 233;
}
public int get234() {
return 234;
}
public int get235() {
return 235;
}
public int get236() {
return 236;
}
public int get237() {
return 237;
}
public int get238() {
return 238;
}
public int get239() {
return 239;
}
public int get240() {
return 240;
}
public int get241() {
return 241;
}
public int get242() {
return 242;
}
public int get243() {
return 243;
}
public int get244() {
return 244;
}
public int get245() {
return 245;
}
public int get246() {
return 246;
}
public int get247() {
return 247;
}
public int get248() {
return 248;
}
public int get249() {
return 249;
}
public int get250() {
return 250;
}
public int get251() {
return 251;
}
public int get252() {
return 252;
}
public int get253() {
return 253;
}
public int get254() {
return 254;
}
public int get255() {
return 255;
}
public int get256() {
return 256;
}
public int get257() {
return 257;
}
public int get258() {
return 258;
}
public int get259() {
return 259;
}
public int get260() {
return 260;
}
public int get261() {
return 261;
}
public int get262() {
return 262;
}
public int get263() {
return 263;
}
public int get264() {
return 264;
}
public int get265() {
return 265;
}
public int get266() {
return 266;
}
public int get267() {
return 267;
}
public int get268() {
return 268;
}
public int get269() {
return 269;
}
public int get270() {
return 270;
}
public int get271() {
return 271;
}
public int get272() {
return 272;
}
public int get273() {
return 273;
}
public int get274() {
return 274;
}
public int get275() {
return 275;
}
public int get276() {
return 276;
}
public int get277() {
return 277;
}
public int get278() {
return 278;
}
public int get279() {
return 279;
}
public int get280() {
return 280;
}
public int get281() {
return 281;
}
public int get282() {
return 282;
}
public int get283() {
return 283;
}
public int get284() {
return 284;
}
public int get285() {
return 285;
}
public int get286() {
return 286;
}
public int get287() {
return 287;
}
public int get288() {
return 288;
}
public int get289() {
return 289;
}
public int get290() {
return 290;
}
public int get291() {
return 291;
}
public int get292() {
return 292;
}
public int get293() {
return 293;
}
public int get294() {
return 294;
}
public int get295() {
return 295;
}
public int get296() {
return 296;
}
public int get297() {
return 297;
}
public int get298() {
return 298;
}
public int get299() {
return 299;
}
public int get300() {
return 300;
}
public int get301() {
return 301;
}
public int get302() {
return 302;
}
public int get303() {
return 303;
}
public int get304() {
return 304;
}
public int get305() {
return 305;
}
public int get306() {
return 306;
}
public int get307() {
return 307;
}
public int get308() {
return 308;
}
public int get309() {
return 309;
}
public int get310() {
return 310;
}
public int get311() {
return 311;
}
public int get312() {
return 312;
}
public int get313() {
return 313;
}
public int get314() {
return 314;
}
public int get315() {
return 315;
}
public int get316() {
return 316;
}
public int get317() {
return 317;
}
public int get318() {
return 318;
}
public int get319() {
return 319;
}
public int get320() {
return 320;
}
public int get321() {
return 321;
}
public int get322() {
return 322;
}
public int get323() {
return 323;
}
public int get324() {
return 324;
}
public int get325() {
return 325;
}
public int get326() {
return 326;
}
public int get327() {
return 327;
}
public int get328() {
return 328;
}
public int get329() {
return 329;
}
public int get330() {
return 330;
}
public int get331() {
return 331;
}
public int get332() {
return 332;
}
public int get333() {
return 333;
}
public int get334() {
return 334;
}
public int get335() {
return 335;
}
public int get336() {
return 336;
}
public int get337() {
return 337;
}
public int get338() {
return 338;
}
public int get339() {
return 339;
}
public int get340() {
return 340;
}
public int get341() {
return 341;
}
public int get342() {
return 342;
}
public int get343() {
return 343;
}
public int get344() {
return 344;
}
public int get345() {
return 345;
}
public int get346() {
return 346;
}
public int get347() {
return 347;
}
public int get348() {
return 348;
}
public int get349() {
return 349;
}
public int get350() {
return 350;
}
public int get351() {
return 351;
}
public int get352() {
return 352;
}
public int get353() {
return 353;
}
public int get354() {
return 354;
}
public int get355() {
return 355;
}
public int get356() {
return 356;
}
public int get357() {
return 357;
}
public int get358() {
return 358;
}
public int get359() {
return 359;
}
public int get360() {
return 360;
}
public int get361() {
return 361;
}
public int get362() {
return 362;
}
public int get363() {
return 363;
}
public int get364() {
return 364;
}
public int get365() {
return 365;
}
public int get366() {
return 366;
}
public int get367() {
return 367;
}
public int get368() {
return 368;
}
public int get369() {
return 369;
}
public int get370() {
return 370;
}
public int get371() {
return 371;
}
public int get372() {
return 372;
}
public int get373() {
return 373;
}
public int get374() {
return 374;
}
public int get375() {
return 375;
}
public int get376() {
return 376;
}
public int get377() {
return 377;
}
public int get378() {
return 378;
}
public int get379() {
return 379;
}
public int get380() {
return 380;
}
public int get381() {
return 381;
}
public int get382() {
return 382;
}
public int get383() {
return 383;
}
public int get384() {
return 384;
}
public int get385() {
return 385;
}
public int get386() {
return 386;
}
public int get387() {
return 387;
}
public int get388() {
return 388;
}
public int get389() {
return 389;
}
public int get390() {
return 390;
}
public int get391() {
return 391;
}
public int get392() {
return 392;
}
public int get393() {
return 393;
}
public int get394() {
return 394;
}
public int get395() {
return 395;
}
public int get396() {
return 396;
}
public int get397() {
return 397;
}
public int get398() {
return 398;
}
public int get399() {
return 399;
}
public int get400() {
return 400;
}
public int get401() {
return 401;
}
public int get402() {
return 402;
}
public int get403() {
return 403;
}
public int get404() {
return 404;
}
public int get405() {
return 405;
}
public int get406() {
return 406;
}
public int get407() {
return 407;
}
public int get408() {
return 408;
}
public int get409() {
return 409;
}
public int get410() {
return 410;
}
public int get411() {
return 411;
}
public int get412() {
return 412;
}
public int get413() {
return 413;
}
public int get414() {
return 414;
}
public int get415() {
return 415;
}
public int get416() {
return 416;
}
public int get417() {
return 417;
}
public int get418() {
return 418;
}
public int get419() {
return 419;
}
public int get420() {
return 420;
}
public int get421() {
return 421;
}
public int get422() {
return 422;
}
public int get423() {
return 423;
}
public int get424() {
return 424;
}
public int get425() {
return 425;
}
public int get426() {
return 426;
}
public int get427() {
return 427;
}
public int get428() {
return 428;
}
public int get429() {
return 429;
}
public int get430() {
return 430;
}
public int get431() {
return 431;
}
public int get432() {
return 432;
}
public int get433() {
return 433;
}
public int get434() {
return 434;
}
public int get435() {
return 435;
}
public int get436() {
return 436;
}
public int get437() {
return 437;
}
public int get438() {
return 438;
}
public int get439() {
return 439;
}
public int get440() {
return 440;
}
public int get441() {
return 441;
}
public int get442() {
return 442;
}
public int get443() {
return 443;
}
public int get444() {
return 444;
}
public int get445() {
return 445;
}
public int get446() {
return 446;
}
public int get447() {
return 447;
}
public int get448() {
return 448;
}
public int get449() {
return 449;
}
public int get450() {
return 450;
}
public int get451() {
return 451;
}
public int get452() {
return 452;
}
public int get453() {
return 453;
}
public int get454() {
return 454;
}
public int get455() {
return 455;
}
public int get456() {
return 456;
}
public int get457() {
return 457;
}
public int get458() {
return 458;
}
public int get459() {
return 459;
}
public int get460() {
return 460;
}
public int get461() {
return 461;
}
public int get462() {
return 462;
}
public int get463() {
return 463;
}
public int get464() {
return 464;
}
public int get465() {
return 465;
}
public int get466() {
return 466;
}
public int get467() {
return 467;
}
public int get468() {
return 468;
}
public int get469() {
return 469;
}
public int get470() {
return 470;
}
public int get471() {
return 471;
}
public int get472() {
return 472;
}
public int get473() {
return 473;
}
public int get474() {
return 474;
}
public int get475() {
return 475;
}
public int get476() {
return 476;
}
public int get477() {
return 477;
}
public int get478() {
return 478;
}
public int get479() {
return 479;
}
public int get480() {
return 480;
}
public int get481() {
return 481;
}
public int get482() {
return 482;
}
public int get483() {
return 483;
}
public int get484() {
return 484;
}
public int get485() {
return 485;
}
public int get486() {
return 486;
}
public int get487() {
return 487;
}
public int get488() {
return 488;
}
public int get489() {
return 489;
}
public int get490() {
return 490;
}
public int get491() {
return 491;
}
public int get492() {
return 492;
}
public int get493() {
return 493;
}
public int get494() {
return 494;
}
public int get495() {
return 495;
}
public int get496() {
return 496;
}
public int get497() {
return 497;
}
public int get498() {
return 498;
}
public int get499() {
return 499;
}
public int get500() {
return 500;
}
public int get501() {
return 501;
}
public int get502() {
return 502;
}
public int get503() {
return 503;
}
public int get504() {
return 504;
}
public int get505() {
return 505;
}
public int get506() {
return 506;
}
public int get507() {
return 507;
}
public int get508() {
return 508;
}
public int get509() {
return 509;
}
public int get510() {
return 510;
}
public int get511() {
return 511;
}
public int get512() {
return 512;
}
public int get513() {
return 513;
}
public int get514() {
return 514;
}
public int get515() {
return 515;
}
public int get516() {
return 516;
}
public int get517() {
return 517;
}
public int get518() {
return 518;
}
public int get519() {
return 519;
}
public int get520() {
return 520;
}
public int get521() {
return 521;
}
public int get522() {
return 522;
}
public int get523() {
return 523;
}
public int get524() {
return 524;
}
public int get525() {
return 525;
}
public int get526() {
return 526;
}
public int get527() {
return 527;
}
public int get528() {
return 528;
}
public int get529() {
return 529;
}
public int get530() {
return 530;
}
public int get531() {
return 531;
}
public int get532() {
return 532;
}
public int get533() {
return 533;
}
public int get534() {
return 534;
}
public int get535() {
return 535;
}
public int get536() {
return 536;
}
public int get537() {
return 537;
}
public int get538() {
return 538;
}
public int get539() {
return 539;
}
public int get540() {
return 540;
}
public int get541() {
return 541;
}
public int get542() {
return 542;
}
public int get543() {
return 543;
}
public int get544() {
return 544;
}
public int get545() {
return 545;
}
public int get546() {
return 546;
}
public int get547() {
return 547;
}
public int get548() {
return 548;
}
public int get549() {
return 549;
}
public int get550() {
return 550;
}
public int get551() {
return 551;
}
public int get552() {
return 552;
}
public int get553() {
return 553;
}
public int get554() {
return 554;
}
public int get555() {
return 555;
}
public int get556() {
return 556;
}
public int get557() {
return 557;
}
public int get558() {
return 558;
}
public int get559() {
return 559;
}
public int get560() {
return 560;
}
public int get561() {
return 561;
}
public int get562() {
return 562;
}
public int get563() {
return 563;
}
public int get564() {
return 564;
}
public int get565() {
return 565;
}
public int get566() {
return 566;
}
public int get567() {
return 567;
}
public int get568() {
return 568;
}
public int get569() {
return 569;
}
public int get570() {
return 570;
}
public int get571() {
return 571;
}
public int get572() {
return 572;
}
public int get573() {
return 573;
}
public int get574() {
return 574;
}
public int get575() {
return 575;
}
public int get576() {
return 576;
}
public int get577() {
return 577;
}
public int get578() {
return 578;
}
public int get579() {
return 579;
}
public int get580() {
return 580;
}
public int get581() {
return 581;
}
public int get582() {
return 582;
}
public int get583() {
return 583;
}
public int get584() {
return 584;
}
public int get585() {
return 585;
}
public int get586() {
return 586;
}
public int get587() {
return 587;
}
public int get588() {
return 588;
}
public int get589() {
return 589;
}
public int get590() {
return 590;
}
public int get591() {
return 591;
}
public int get592() {
return 592;
}
public int get593() {
return 593;
}
public int get594() {
return 594;
}
public int get595() {
return 595;
}
public int get596() {
return 596;
}
public int get597() {
return 597;
}
public int get598() {
return 598;
}
public int get599() {
return 599;
}
public int get600() {
return 600;
}
public int get601() {
return 601;
}
public int get602() {
return 602;
}
public int get603() {
return 603;
}
public int get604() {
return 604;
}
public int get605() {
return 605;
}
public int get606() {
return 606;
}
public int get607() {
return 607;
}
public int get608() {
return 608;
}
public int get609() {
return 609;
}
public int get610() {
return 610;
}
public int get611() {
return 611;
}
public int get612() {
return 612;
}
public int get613() {
return 613;
}
public int get614() {
return 614;
}
public int get615() {
return 615;
}
public int get616() {
return 616;
}
public int get617() {
return 617;
}
public int get618() {
return 618;
}
public int get619() {
return 619;
}
public int get620() {
return 620;
}
public int get621() {
return 621;
}
public int get622() {
return 622;
}
public int get623() {
return 623;
}
public int get624() {
return 624;
}
public int get625() {
return 625;
}
public int get626() {
return 626;
}
public int get627() {
return 627;
}
public int get628() {
return 628;
}
public int get629() {
return 629;
}
public int get630() {
return 630;
}
public int get631() {
return 631;
}
public int get632() {
return 632;
}
public int get633() {
return 633;
}
public int get634() {
return 634;
}
public int get635() {
return 635;
}
public int get636() {
return 636;
}
public int get637() {
return 637;
}
public int get638() {
return 638;
}
public int get639() {
return 639;
}
public int get640() {
return 640;
}
public int get641() {
return 641;
}
public int get642() {
return 642;
}
public int get643() {
return 643;
}
public int get644() {
return 644;
}
public int get645() {
return 645;
}
public int get646() {
return 646;
}
public int get647() {
return 647;
}
public int get648() {
return 648;
}
public int get649() {
return 649;
}
public int get650() {
return 650;
}
public int get651() {
return 651;
}
public int get652() {
return 652;
}
public int get653() {
return 653;
}
public int get654() {
return 654;
}
public int get655() {
return 655;
}
public int get656() {
return 656;
}
public int get657() {
return 657;
}
public int get658() {
return 658;
}
public int get659() {
return 659;
}
public int get660() {
return 660;
}
public int get661() {
return 661;
}
public int get662() {
return 662;
}
public int get663() {
return 663;
}
public int get664() {
return 664;
}
public int get665() {
return 665;
}
public int get666() {
return 666;
}
public int get667() {
return 667;
}
public int get668() {
return 668;
}
public int get669() {
return 669;
}
public int get670() {
return 670;
}
public int get671() {
return 671;
}
public int get672() {
return 672;
}
public int get673() {
return 673;
}
public int get674() {
return 674;
}
public int get675() {
return 675;
}
public int get676() {
return 676;
}
public int get677() {
return 677;
}
public int get678() {
return 678;
}
public int get679() {
return 679;
}
public int get680() {
return 680;
}
public int get681() {
return 681;
}
public int get682() {
return 682;
}
public int get683() {
return 683;
}
public int get684() {
return 684;
}
public int get685() {
return 685;
}
public int get686() {
return 686;
}
public int get687() {
return 687;
}
public int get688() {
return 688;
}
public int get689() {
return 689;
}
public int get690() {
return 690;
}
public int get691() {
return 691;
}
public int get692() {
return 692;
}
public int get693() {
return 693;
}
public int get694() {
return 694;
}
public int get695() {
return 695;
}
public int get696() {
return 696;
}
public int get697() {
return 697;
}
public int get698() {
return 698;
}
public int get699() {
return 699;
}
public int get700() {
return 700;
}
public int get701() {
return 701;
}
public int get702() {
return 702;
}
public int get703() {
return 703;
}
public int get704() {
return 704;
}
public int get705() {
return 705;
}
public int get706() {
return 706;
}
public int get707() {
return 707;
}
public int get708() {
return 708;
}
public int get709() {
return 709;
}
public int get710() {
return 710;
}
public int get711() {
return 711;
}
public int get712() {
return 712;
}
public int get713() {
return 713;
}
public int get714() {
return 714;
}
public int get715() {
return 715;
}
public int get716() {
return 716;
}
public int get717() {
return 717;
}
public int get718() {
return 718;
}
public int get719() {
return 719;
}
public int get720() {
return 720;
}
public int get721() {
return 721;
}
public int get722() {
return 722;
}
public int get723() {
return 723;
}
public int get724() {
return 724;
}
public int get725() {
return 725;
}
public int get726() {
return 726;
}
public int get727() {
return 727;
}
public int get728() {
return 728;
}
public int get729() {
return 729;
}
public int get730() {
return 730;
}
public int get731() {
return 731;
}
public int get732() {
return 732;
}
public int get733() {
return 733;
}
public int get734() {
return 734;
}
public int get735() {
return 735;
}
public int get736() {
return 736;
}
public int get737() {
return 737;
}
public int get738() {
return 738;
}
public int get739() {
return 739;
}
public int get740() {
return 740;
}
public int get741() {
return 741;
}
public int get742() {
return 742;
}
public int get743() {
return 743;
}
public int get744() {
return 744;
}
public int get745() {
return 745;
}
public int get746() {
return 746;
}
public int get747() {
return 747;
}
public int get748() {
return 748;
}
public int get749() {
return 749;
}
public int get750() {
return 750;
}
public int get751() {
return 751;
}
public int get752() {
return 752;
}
public int get753() {
return 753;
}
public int get754() {
return 754;
}
public int get755() {
return 755;
}
public int get756() {
return 756;
}
public int get757() {
return 757;
}
public int get758() {
return 758;
}
public int get759() {
return 759;
}
public int get760() {
return 760;
}
public int get761() {
return 761;
}
public int get762() {
return 762;
}
public int get763() {
return 763;
}
public int get764() {
return 764;
}
public int get765() {
return 765;
}
public int get766() {
return 766;
}
public int get767() {
return 767;
}
public int get768() {
return 768;
}
public int get769() {
return 769;
}
public int get770() {
return 770;
}
public int get771() {
return 771;
}
public int get772() {
return 772;
}
public int get773() {
return 773;
}
public int get774() {
return 774;
}
public int get775() {
return 775;
}
public int get776() {
return 776;
}
public int get777() {
return 777;
}
public int get778() {
return 778;
}
public int get779() {
return 779;
}
public int get780() {
return 780;
}
public int get781() {
return 781;
}
public int get782() {
return 782;
}
public int get783() {
return 783;
}
public int get784() {
return 784;
}
public int get785() {
return 785;
}
public int get786() {
return 786;
}
public int get787() {
return 787;
}
public int get788() {
return 788;
}
public int get789() {
return 789;
}
public int get790() {
return 790;
}
public int get791() {
return 791;
}
public int get792() {
return 792;
}
public int get793() {
return 793;
}
public int get794() {
return 794;
}
public int get795() {
return 795;
}
public int get796() {
return 796;
}
public int get797() {
return 797;
}
public int get798() {
return 798;
}
public int get799() {
return 799;
}
public int get800() {
return 800;
}
public int get801() {
return 801;
}
public int get802() {
return 802;
}
public int get803() {
return 803;
}
public int get804() {
return 804;
}
public int get805() {
return 805;
}
public int get806() {
return 806;
}
public int get807() {
return 807;
}
public int get808() {
return 808;
}
public int get809() {
return 809;
}
public int get810() {
return 810;
}
public int get811() {
return 811;
}
public int get812() {
return 812;
}
public int get813() {
return 813;
}
public int get814() {
return 814;
}
public int get815() {
return 815;
}
public int get816() {
return 816;
}
public int get817() {
return 817;
}
public int get818() {
return 818;
}
public int get819() {
return 819;
}
public int get820() {
return 820;
}
public int get821() {
return 821;
}
public int get822() {
return 822;
}
public int get823() {
return 823;
}
public int get824() {
return 824;
}
public int get825() {
return 825;
}
public int get826() {
return 826;
}
public int get827() {
return 827;
}
public int get828() {
return 828;
}
public int get829() {
return 829;
}
public int get830() {
return 830;
}
public int get831() {
return 831;
}
public int get832() {
return 832;
}
public int get833() {
return 833;
}
public int get834() {
return 834;
}
public int get835() {
return 835;
}
public int get836() {
return 836;
}
public int get837() {
return 837;
}
public int get838() {
return 838;
}
public int get839() {
return 839;
}
public int get840() {
return 840;
}
public int get841() {
return 841;
}
public int get842() {
return 842;
}
public int get843() {
return 843;
}
public int get844() {
return 844;
}
public int get845() {
return 845;
}
public int get846() {
return 846;
}
public int get847() {
return 847;
}
public int get848() {
return 848;
}
public int get849() {
return 849;
}
public int get850() {
return 850;
}
public int get851() {
return 851;
}
public int get852() {
return 852;
}
public int get853() {
return 853;
}
public int get854() {
return 854;
}
public int get855() {
return 855;
}
public int get856() {
return 856;
}
public int get857() {
return 857;
}
public int get858() {
return 858;
}
public int get859() {
return 859;
}
public int get860() {
return 860;
}
public int get861() {
return 861;
}
public int get862() {
return 862;
}
public int get863() {
return 863;
}
public int get864() {
return 864;
}
public int get865() {
return 865;
}
public int get866() {
return 866;
}
public int get867() {
return 867;
}
public int get868() {
return 868;
}
public int get869() {
return 869;
}
public int get870() {
return 870;
}
public int get871() {
return 871;
}
public int get872() {
return 872;
}
public int get873() {
return 873;
}
public int get874() {
return 874;
}
public int get875() {
return 875;
}
public int get876() {
return 876;
}
public int get877() {
return 877;
}
public int get878() {
return 878;
}
public int get879() {
return 879;
}
public int get880() {
return 880;
}
public int get881() {
return 881;
}
public int get882() {
return 882;
}
public int get883() {
return 883;
}
public int get884() {
return 884;
}
public int get885() {
return 885;
}
public int get886() {
return 886;
}
public int get887() {
return 887;
}
public int get888() {
return 888;
}
public int get889() {
return 889;
}
public int get890() {
return 890;
}
public int get891() {
return 891;
}
public int get892() {
return 892;
}
public int get893() {
return 893;
}
public int get894() {
return 894;
}
public int get895() {
return 895;
}
public int get896() {
return 896;
}
public int get897() {
return 897;
}
public int get898() {
return 898;
}
public int get899() {
return 899;
}
public int get900() {
return 900;
}
public int get901() {
return 901;
}
public int get902() {
return 902;
}
public int get903() {
return 903;
}
public int get904() {
return 904;
}
public int get905() {
return 905;
}
public int get906() {
return 906;
}
public int get907() {
return 907;
}
public int get908() {
return 908;
}
public int get909() {
return 909;
}
public int get910() {
return 910;
}
public int get911() {
return 911;
}
public int get912() {
return 912;
}
public int get913() {
return 913;
}
public int get914() {
return 914;
}
public int get915() {
return 915;
}
public int get916() {
return 916;
}
public int get917() {
return 917;
}
public int get918() {
return 918;
}
public int get919() {
return 919;
}
public int get920() {
return 920;
}
public int get921() {
return 921;
}
public int get922() {
return 922;
}
public int get923() {
return 923;
}
public int get924() {
return 924;
}
public int get925() {
return 925;
}
public int get926() {
return 926;
}
public int get927() {
return 927;
}
public int get928() {
return 928;
}
public int get929() {
return 929;
}
public int get930() {
return 930;
}
public int get931() {
return 931;
}
public int get932() {
return 932;
}
public int get933() {
return 933;
}
public int get934() {
return 934;
}
public int get935() {
return 935;
}
public int get936() {
return 936;
}
public int get937() {
return 937;
}
public int get938() {
return 938;
}
public int get939() {
return 939;
}
public int get940() {
return 940;
}
public int get941() {
return 941;
}
public int get942() {
return 942;
}
public int get943() {
return 943;
}
public int get944() {
return 944;
}
public int get945() {
return 945;
}
public int get946() {
return 946;
}
public int get947() {
return 947;
}
public int get948() {
return 948;
}
public int get949() {
return 949;
}
public int get950() {
return 950;
}
public int get951() {
return 951;
}
public int get952() {
return 952;
}
public int get953() {
return 953;
}
public int get954() {
return 954;
}
public int get955() {
return 955;
}
public int get956() {
return 956;
}
public int get957() {
return 957;
}
public int get958() {
return 958;
}
public int get959() {
return 959;
}
public int get960() {
return 960;
}
public int get961() {
return 961;
}
public int get962() {
return 962;
}
public int get963() {
return 963;
}
public int get964() {
return 964;
}
public int get965() {
return 965;
}
public int get966() {
return 966;
}
public int get967() {
return 967;
}
public int get968() {
return 968;
}
public int get969() {
return 969;
}
public int get970() {
return 970;
}
public int get971() {
return 971;
}
public int get972() {
return 972;
}
public int get973() {
return 973;
}
public int get974() {
return 974;
}
public int get975() {
return 975;
}
public int get976() {
return 976;
}
public int get977() {
return 977;
}
public int get978() {
return 978;
}
public int get979() {
return 979;
}
public int get980() {
return 980;
}
public int get981() {
return 981;
}
public int get982() {
return 982;
}
public int get983() {
return 983;
}
public int get984() {
return 984;
}
public int get985() {
return 985;
}
public int get986() {
return 986;
}
public int get987() {
return 987;
}
public int get988() {
return 988;
}
public int get989() {
return 989;
}
public int get990() {
return 990;
}
public int get991() {
return 991;
}
public int get992() {
return 992;
}
public int get993() {
return 993;
}
public int get994() {
return 994;
}
public int get995() {
return 995;
}
public int get996() {
return 996;
}
public int get997() {
return 997;
}
public int get998() {
return 998;
}
public int get999() {
return 999;
}
}
| apache-2.0 |
JoaoLMPereira/pentaho-data-profiling | model/core/src/test/java/org/pentaho/profiling/model/StreamingProfileServiceImplTest.java | 2264 | /*******************************************************************************
*
* Pentaho Data Profiling
*
* Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.profiling.model;
import org.pentaho.profiling.api.StreamingProfile;
import org.pentaho.profiling.api.action.ProfileActionException;
import org.pentaho.profiling.api.metrics.MetricContributorsFactory;
import org.pentaho.profiling.api.metrics.field.DataSourceFieldValue;
import org.junit.Test;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Created by bryan on 4/2/15.
*/
public class StreamingProfileServiceImplTest {
@Test
public void testStreamingProfileServiceImpl() throws ProfileActionException {
StreamingProfileServiceImpl streamingProfileService = new StreamingProfileServiceImpl();
String id = "test-id";
StreamingProfile streamingProfile = mock( StreamingProfile.class );
when( streamingProfile.getId() ).thenReturn( id );
streamingProfileService.registerStreamingProfile( streamingProfile );
StreamingProfile returnValue = streamingProfileService.getStreamingProfile( id );
assertEquals( streamingProfile, returnValue );
List<DataSourceFieldValue> dataSourceFieldValues = mock( List.class );
streamingProfileService.processRecord( id, dataSourceFieldValues );
verify( streamingProfile ).processRecord( dataSourceFieldValues );
}
}
| apache-2.0 |
dinfuehr/rust | src/libstd/collections/hash/bench.rs | 2492 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg(test)]
extern crate test;
use prelude::v1::*;
use self::test::Bencher;
use iter::{range_inclusive};
#[bench]
fn new_drop(b : &mut Bencher) {
use super::map::HashMap;
b.iter(|| {
let m : HashMap<int, int> = HashMap::new();
assert_eq!(m.len(), 0);
})
}
#[bench]
fn new_insert_drop(b : &mut Bencher) {
use super::map::HashMap;
b.iter(|| {
let mut m = HashMap::new();
m.insert(0i, 0i);
assert_eq!(m.len(), 1);
})
}
#[bench]
fn grow_by_insertion(b: &mut Bencher) {
use super::map::HashMap;
let mut m = HashMap::new();
for i in range_inclusive(1i, 1000) {
m.insert(i, i);
}
let mut k = 1001;
b.iter(|| {
m.insert(k, k);
k += 1;
});
}
#[bench]
fn find_existing(b: &mut Bencher) {
use super::map::HashMap;
let mut m = HashMap::new();
for i in range_inclusive(1i, 1000) {
m.insert(i, i);
}
b.iter(|| {
for i in range_inclusive(1i, 1000) {
m.contains_key(&i);
}
});
}
#[bench]
fn find_nonexisting(b: &mut Bencher) {
use super::map::HashMap;
let mut m = HashMap::new();
for i in range_inclusive(1i, 1000) {
m.insert(i, i);
}
b.iter(|| {
for i in range_inclusive(1001i, 2000) {
m.contains_key(&i);
}
});
}
#[bench]
fn hashmap_as_queue(b: &mut Bencher) {
use super::map::HashMap;
let mut m = HashMap::new();
for i in range_inclusive(1i, 1000) {
m.insert(i, i);
}
let mut k = 1i;
b.iter(|| {
m.remove(&k);
m.insert(k + 1000, k + 1000);
k += 1;
});
}
#[bench]
fn get_remove_insert(b: &mut Bencher) {
use super::map::HashMap;
let mut m = HashMap::new();
for i in range_inclusive(1i, 1000) {
m.insert(i, i);
}
let mut k = 1i;
b.iter(|| {
m.get(&(k + 400));
m.get(&(k + 2000));
m.remove(&k);
m.insert(k + 1000, k + 1000);
k += 1;
})
}
| apache-2.0 |
vjuranek/radargun | plugins/infinispan82/src/main/java/org/radargun/service/Infinispan82EmbeddedContinuousQuery.java | 2219 | package org.radargun.service;
import org.infinispan.query.Search;
import org.radargun.traits.ContinuousQuery;
import org.radargun.traits.Query;
/**
* @author Vojtech Juranek <vjuranek@redhat.com>
*/
public class Infinispan82EmbeddedContinuousQuery implements ContinuousQuery {
protected final Infinispan82EmbeddedService service;
public Infinispan82EmbeddedContinuousQuery(Infinispan82EmbeddedService service) {
this.service = service;
}
@Override
public ListenerReference createContinuousQuery(String cacheName, Query query, ContinuousQuery.Listener cqListener) {
AbstractInfinispanQueryable.QueryImpl ispnQuery = (AbstractInfinispanQueryable.QueryImpl) query;
org.infinispan.query.api.continuous.ContinuousQuery cq = Search.getContinuousQuery(service.getCache(cacheName));
Listener ispnCqListener = new Listener(cqListener);
cq.addContinuousQueryListener(ispnQuery.getDelegatingQuery(), ispnCqListener);
return new ListenerReference(cq, ispnCqListener);
}
@Override
public void removeContinuousQuery(String cacheName, ContinuousQuery.ListenerReference listenerReference) {
ListenerReference ref = (ListenerReference) listenerReference;
ref.cq.removeContinuousQueryListener(ref.listener);
}
private static class Listener implements org.infinispan.query.api.continuous.ContinuousQueryListener {
private final ContinuousQuery.Listener cqListener;
public Listener(ContinuousQuery.Listener cqListener) {
this.cqListener = cqListener;
}
@Override
public void resultJoining(Object key, Object value) {
cqListener.onEntryJoined(key, value);
}
@Override
public void resultLeaving(Object key) {
cqListener.onEntryLeft(key);
}
}
public static class ListenerReference implements ContinuousQuery.ListenerReference {
private final org.infinispan.query.api.continuous.ContinuousQuery<Object, Object> cq;
private final Listener listener;
public ListenerReference(org.infinispan.query.api.continuous.ContinuousQuery<Object, Object> cq, Listener listener) {
this.cq = cq;
this.listener = listener;
}
}
}
| apache-2.0 |
satrion/omnidroid | omnidroid/src/edu/nyu/cs/omnidroid/app/controller/util/ExceptionMessageMap.java | 5822 | /*******************************************************************************
* Copyright 2009 Omnidroid - http://code.google.com/p/omnidroid
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package edu.nyu.cs.omnidroid.app.controller.util;
import java.util.HashMap;
import java.util.Map;
/**
* Maps exception error codes to exception messages. <br>
* <br>
*
* The code structure is as follows:
*
* <ul>
* <li>The error code is a six digit number</li>
* <li>The first two digits represent the error type</li>
* <li>The last four digits represent the specific error in that error type</li>
* </ul>
*
* Note: the last four digits may be divided into subtypes. In such case, the explanation of the
* format will be included in the error type class descriptions below.<br>
* <br>
*
* The error types are:
*
* <ul>
* <li>00 -- Standard Java exceptions caused by malformed or invalid method parameters</li>
* <li>02 -- All other standard Java exceptions</li>
* <li>10 -- General {@code OmniException}s from the UI</li>
* <li>11 -- General {@code OmniException}s from the Event Catcher</li>
* <li>12 -- General {@code OmniException}s from the Action Thrower</li>
* <li>13 -- General {@code OmniException}s from the Application API</li>
* <li>20 -- IO related {@code OmniException}s</li>
* </ul>
*
* How to use the codes and messages:
*
* <ul>
* <li>All exceptions thrown from within Omnidroid should contain a code and a message, even
* exceptions thrown using standard Java exceptions.</li>
* <li>When creating a new exception code and message, start the message with the code like so:
* "000001: "</li>
* <li>When throwing an <code>OmniException</code>, the code and the message must be supplied to the
* constructor.</li>
* <li>When throwing a standard Java exception (ex. <code>NullPointerException</code>), just supply
* the message since the code is already included in the message.</li>
* <li>The message retrieved from the <code>ExceptionMessageMap</code> can be treated like a base
* message. If additional information is required in the message, append that information to the
* base message.</li>
* <li>If the value of a variable must be included in the message, list the variable names and their
* values at the end of the message in the following format: "{ varName1=[varVal1],
* varName2=[varVal2], ... }"</li>
* </ul>
*
* TODO (acase): Load the exception message mapping from a properties bundle.
*/
public class ExceptionMessageMap {
private static Map<String, String> MESSAGE_MAP;
static {
MESSAGE_MAP = new HashMap<String, String>();
MESSAGE_MAP.put("000001", "000001: A String argument was not specified but is required.");
MESSAGE_MAP.put("000002", "000002: A List<String> argument was not specified but is required.");
MESSAGE_MAP.put("000003", "000003: An Object argument was not specified but is required.");
MESSAGE_MAP.put("000004", "000004: A MessageType argument was not specified but is required.");
MESSAGE_MAP.put("000005",
"000005: The int argument is outside the allowable range (start <= index < end).");
MESSAGE_MAP.put("020000", "020000: Error serializing message.");
MESSAGE_MAP.put("020001", "020001: Error deserializing message.");
MESSAGE_MAP.put("020002", "020002: Configuration file does not exist.");
MESSAGE_MAP.put("100000", "100000: Error starting UI.");
MESSAGE_MAP.put("110000", "110000: Error starting EventCatcher.");
MESSAGE_MAP.put("120000", "120000: Error starting ActionThrower.");
MESSAGE_MAP.put("120001", "120001: Illegal execution method for action.");
MESSAGE_MAP.put("120002", "120002: Action parameters not found.");
MESSAGE_MAP.put("120003", "120003: Action not supported.");
MESSAGE_MAP.put("120004", "120004: Action parameters mal format.");
MESSAGE_MAP.put("130000", "130000: Error starting Application API.");
MESSAGE_MAP.put("140000", "140000: Application/Event name cannot be null.");
MESSAGE_MAP.put("140001", "140001: Error retrieving event from database.");
MESSAGE_MAP.put("200000", "200000: When reading bytes from an InputStream, the EndOfFile "
+ "was reached before expected.");
MESSAGE_MAP.put("200001", "200001: When reading chars from a Reader, the EndOfFile was "
+ "reached before expected.");
MESSAGE_MAP.put("200002", "200002: Error reading the bytes of a message from an InputStream.");
MESSAGE_MAP.put("200003", "200003: Error reading the chars of a message from an InputStream.");
MESSAGE_MAP.put("200500", "200500: Error writing message bytes to an output stream.");
}
/**
* Gets the error message for the specified code.
*
* @param code
* The error code for which to get the associated error message.
* @return The error message for the specified code.
*/
public static String getMessage(String code) {
String str = MESSAGE_MAP.get(code);
if (null == str) {
return "NO MESSAGE FOR ERROR CODE. { code=[" + code + "] }";
}
return str;
}
}
| apache-2.0 |
ty1er/incubator-asterixdb | asterixdb/asterix-external-data/src/test/java/org/apache/asterix/external/classad/object/pool/AMutableCharArrayStringPool.java | 1235 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.asterix.external.classad.object.pool;
import org.apache.asterix.external.classad.AMutableCharArrayString;
public class AMutableCharArrayStringPool extends Pool<AMutableCharArrayString> {
@Override
public AMutableCharArrayString newInstance() {
return new AMutableCharArrayString();
}
@Override
protected void reset(AMutableCharArrayString str) {
str.reset();
}
}
| apache-2.0 |
onepf/OpenIAB | library/src/main/java/org/onepf/oms/appstore/googleUtils/Inventory.java | 3748 | /*
* Copyright 2012-2014 One Platform Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onepf.oms.appstore.googleUtils;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Represents a block of information about in-app items.
* An Inventory is returned by such methods as {@link IabHelper#queryInventory}.
*/
public class Inventory {
private final Map<String, SkuDetails> mSkuMap = new ConcurrentHashMap<String, SkuDetails>();
private final Map<String, Purchase> mPurchaseMap = new ConcurrentHashMap<String, Purchase>();
public Inventory() {
}
/**
* Returns the listing details for an in-app product.
*/
public SkuDetails getSkuDetails(String sku) {
return mSkuMap.get(sku);
}
/**
* Returns purchase information for a given product, or null if there is no purchase.
*/
public Purchase getPurchase(String sku) {
return mPurchaseMap.get(sku);
}
/**
* Returns whether or not there exists a purchase of the given product.
*/
public boolean hasPurchase(String sku) {
return mPurchaseMap.containsKey(sku);
}
/**
* Return whether or not details about the given product are available.
*/
public boolean hasDetails(String sku) {
return mSkuMap.containsKey(sku);
}
/**
* Erase a purchase (locally) from the inventory, given its product ID. This just
* modifies the Inventory object locally and has no effect on the server! This is
* useful when you have an existing Inventory object which you know to be up to date,
* and you have just consumed an item successfully, which means that erasing its
* purchase data from the Inventory you already have is quicker than querying for
* a new Inventory.
*/
public void erasePurchase(String sku) {
if (mPurchaseMap.containsKey(sku)) mPurchaseMap.remove(sku);
}
/**
* Returns a list of all owned product IDs.
*/
@NotNull
public List<String> getAllOwnedSkus() {
return new ArrayList<String>(mPurchaseMap.keySet());
}
/**
* Returns a list of all owned product IDs of a given type
*/
@NotNull
public List<String> getAllOwnedSkus(String itemType) {
List<String> result = new ArrayList<String>();
for (Purchase p : mPurchaseMap.values()) {
if (p.getItemType().equals(itemType)) result.add(p.getSku());
}
return result;
}
/**
* Returns a list of all purchases.
*/
@NotNull
public List<Purchase> getAllPurchases() {
return new ArrayList<Purchase>(mPurchaseMap.values());
}
public void addSkuDetails(@NotNull SkuDetails d) {
mSkuMap.put(d.getSku(), d);
}
public void addPurchase(@NotNull Purchase p) {
mPurchaseMap.put(p.getSku(), p);
}
public Map<String, SkuDetails> getSkuMap() {
return Collections.unmodifiableMap(mSkuMap);
}
public Map<String, Purchase> getPurchaseMap() {
return Collections.unmodifiableMap(mPurchaseMap);
}
}
| apache-2.0 |
brettwooldridge/buck | tools/ideabuck/src/com/facebook/buck/intellij/ideabuck/actions/BuckStopAction.java | 1936 | /*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.intellij.ideabuck.actions;
import com.facebook.buck.intellij.ideabuck.build.BuckBuildManager;
import com.facebook.buck.intellij.ideabuck.icons.BuckIcons;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.project.Project;
public class BuckStopAction extends BuckBaseAction {
public static final String ACTION_TITLE = "Stop the current buck command";
public static final String ACTION_DESCRIPTION = "Stop the current buck command";
public BuckStopAction() {
super(ACTION_TITLE, ACTION_DESCRIPTION, BuckIcons.ACTION_STOP);
}
@Override
public void update(AnActionEvent e) {
Project project = e.getProject();
if (project != null) {
BuckBuildManager buildManager = BuckBuildManager.getInstance(project);
e.getPresentation().setEnabled(!buildManager.isKilling() && buildManager.isBuilding());
}
}
@Override
public void executeOnPooledThread(AnActionEvent e) {
Project project = e.getProject();
if (project != null) {
BuckBuildManager buckBuildManager = BuckBuildManager.getInstance(project);
if (buckBuildManager.getCurrentRunningBuckCommandHandler() != null) {
buckBuildManager.getCurrentRunningBuckCommandHandler().stop();
}
buckBuildManager.setBuilding(project, false);
}
}
}
| apache-2.0 |
streamsets/datacollector | container/src/test/java/com/streamsets/datacollector/restapi/StageLibraryResourceConfig.java | 1232 | /*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.streamsets.datacollector.restapi;
import com.streamsets.datacollector.main.BuildInfo;
import com.streamsets.datacollector.main.RuntimeInfo;
import com.streamsets.datacollector.stagelibrary.StageLibraryTask;
import org.glassfish.hk2.utilities.binding.AbstractBinder;
public class StageLibraryResourceConfig extends AbstractBinder {
@Override
protected void configure() {
bindFactory(TestUtil.StageLibraryTestInjector.class).to(StageLibraryTask.class);
bindFactory(TestUtil.RuntimeInfoTestInjector.class).to(RuntimeInfo.class);
bindFactory(TestUtil.BuildInfoTestInjector.class).to(BuildInfo.class);
}
}
| apache-2.0 |
DLotts/incubator-rya | web/web.rya/src/main/java/org/apache/cloud/rdf/web/cloudbase/sail/AbstractRDFWebServlet.java | 4092 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
//package org.apache.cloud.rdf.web.cloudbase.sail;
//
//import cloudbase.core.client.Connector;
//import cloudbase.core.client.ZooKeeperInstance;
//import org.apache.rya.cloudbase.CloudbaseRdfDAO;
//import org.apache.rya.cloudbase.CloudbaseRdfEvalStatsDAO;
//import RdfCloudTripleStore;
//import org.openrdf.repository.Repository;
//import org.openrdf.repository.RepositoryException;
//import org.openrdf.repository.sail.SailRepository;
//
//import javax.servlet.ServletConfig;
//import javax.servlet.ServletException;
//import javax.servlet.http.HttpServlet;
//
///**
// * Class AbstractRDFWebServlet
// * Date: Dec 13, 2010
// * Time: 9:44:08 AM
// */
//public class AbstractRDFWebServlet extends HttpServlet implements RDFWebConstants {
//
// protected Repository repository;
// protected String origTablePrefix;
//
// @Override
// public void init(ServletConfig config) throws ServletException {
// super.init(config);
// try {
// String instance = config.getInitParameter(INSTANCE_PARAM);
// String server = config.getInitParameter(SERVER_PARAM);
// String port = config.getInitParameter(PORT_PARAM);
// String user = config.getInitParameter(USER_PARAM);
// String password = config.getInitParameter(PASSWORD_PARAM);
// String tablePrefix = config.getInitParameter(TABLEPREFIX_PARAM);
//
// RdfCloudTripleStore rts = new RdfCloudTripleStore();
//// rts.setInstance("dne");
//// if (instance != null)
//// rts.setInstance(instance);
//// if (server != null)
//// rts.setServer(server);
//// if (port != null)
//// rts.setPort(Integer.parseInt(port));
//// if (user != null)
//// rts.setUser(user);
//// if (password != null)
//// rts.setPassword(password);
//// if (tablePrefix != null) {
//// rts.setTablePrefix(tablePrefix);
//// origTablePrefix = tablePrefix;
//// }
// CloudbaseRdfDAO crdfdao = new CloudbaseRdfDAO();
// Connector connector = new ZooKeeperInstance("stratus", "stratus13:2181").getConnector("root", "password");
// crdfdao.setConnector(connector);
// crdfdao.setSpoTable("lubm_spo");
// crdfdao.setPoTable("lubm_po");
// crdfdao.setOspTable("lubm_osp");
// crdfdao.setNamespaceTable("lubm_ns");
// rts.setRdfDao(crdfdao);
// CloudbaseRdfEvalStatsDAO ceval = new CloudbaseRdfEvalStatsDAO();
// ceval.setConnector(connector);
// ceval.setEvalTable("lubm_eval");
// rts.setRdfEvalStatsDAO(ceval);
//
// repository = new SailRepository(rts);
//
// repository.initialize();
// } catch (Exception e) {
// throw new ServletException(e);
// }
// }
//
// @Override
// public void destroy() {
// try {
// repository.shutDown();
// } catch (RepositoryException e) {
// e.printStackTrace();
// }
// }
//
//
// public Repository getRepository() {
// return repository;
// }
//
// public void setRepository(Repository repository) {
// this.repository = repository;
// }
//}
| apache-2.0 |
Xpray/flink | flink-runtime/src/main/java/org/apache/flink/runtime/client/JobSubmissionClientActor.java | 6720 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.client;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.Status;
import akka.dispatch.Futures;
import org.apache.flink.configuration.AkkaOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.akka.ListeningBehaviour;
import org.apache.flink.runtime.instance.ActorGateway;
import org.apache.flink.runtime.instance.AkkaActorGateway;
import org.apache.flink.runtime.jobgraph.JobGraph;
import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService;
import org.apache.flink.runtime.messages.JobClientMessages;
import org.apache.flink.runtime.messages.JobClientMessages.SubmitJobAndWait;
import org.apache.flink.runtime.messages.JobManagerMessages;
import org.apache.flink.runtime.util.SerializedThrowable;
import scala.concurrent.duration.FiniteDuration;
import java.io.IOException;
import java.util.concurrent.Callable;
/**
* Actor which handles Job submission process and provides Job updates until completion.
*/
public class JobSubmissionClientActor extends JobClientActor {
/** JobGraph which shall be submitted to the JobManager */
private JobGraph jobGraph;
/** true if a SubmitJobSuccess message has been received */
private boolean jobSuccessfullySubmitted = false;
/** The cluster configuration */
private final Configuration clientConfig;
public JobSubmissionClientActor(
LeaderRetrievalService leaderRetrievalService,
FiniteDuration timeout,
boolean sysoutUpdates,
Configuration clientConfig) {
super(leaderRetrievalService, timeout, sysoutUpdates);
this.clientConfig = clientConfig;
}
@Override
public void connectedToJobManager() {
if (jobGraph != null && !jobSuccessfullySubmitted) {
// if we haven't yet submitted the job successfully
tryToSubmitJob();
}
}
@Override
protected Class getClientMessageClass() {
return SubmitJobAndWait.class;
}
@Override
public void handleCustomMessage(Object message) {
// submit a job to the JobManager
if (message instanceof SubmitJobAndWait) {
// sanity check that this no job was submitted through this actor before -
// it is a one-shot actor after all
if (this.client == null) {
jobGraph = ((SubmitJobAndWait) message).jobGraph();
if (jobGraph == null) {
LOG.error("Received null JobGraph");
sender().tell(
decorateMessage(new Status.Failure(new Exception("JobGraph is null"))),
getSelf());
} else {
LOG.info("Received job {} ({}).", jobGraph.getName(), jobGraph.getJobID());
this.client = getSender();
// is only successful if we already know the job manager leader
if (jobManager != null) {
tryToSubmitJob();
}
}
} else {
// repeated submission - tell failure to sender and kill self
String msg = "Received repeated 'SubmitJobAndWait'";
LOG.error(msg);
getSender().tell(
decorateMessage(new Status.Failure(new Exception(msg))), ActorRef.noSender());
terminate();
}
} else if (message instanceof JobManagerMessages.JobSubmitSuccess) {
// job was successfully submitted :-)
LOG.info("Job {} was successfully submitted to the JobManager {}.",
((JobManagerMessages.JobSubmitSuccess) message).jobId(),
getSender().path());
jobSuccessfullySubmitted = true;
} else if (JobClientMessages.getSubmissionTimeout().equals(message)) {
// check if our job submission was successful in the meantime
if (!jobSuccessfullySubmitted) {
if (isClientConnected()) {
client.tell(
decorateMessage(new Status.Failure(
new JobClientActorSubmissionTimeoutException("Job submission to the JobManager timed out. " +
"You may increase '" + AkkaOptions.CLIENT_TIMEOUT.key() + "' in case the JobManager " +
"needs more time to configure and confirm the job submission."))),
getSelf());
}
// We haven't heard back from the job manager after sending the job graph to him,
// therefore terminate
terminate();
}
} else {
LOG.error("{} received unknown message: ", getClass());
}
}
private void tryToSubmitJob() {
LOG.info("Sending message to JobManager {} to submit job {} ({}) and wait for progress",
jobManager.path().toString(), jobGraph.getName(), jobGraph.getJobID());
Futures.future(new Callable<Object>() {
@Override
public Object call() throws Exception {
ActorGateway jobManagerGateway = new AkkaActorGateway(jobManager, leaderSessionID);
LOG.info("Upload jar files to job manager {}.", jobManager.path());
try {
jobGraph.uploadUserJars(jobManagerGateway, timeout, clientConfig);
} catch (IOException exception) {
getSelf().tell(
decorateMessage(new JobManagerMessages.JobResultFailure(
new SerializedThrowable(
new JobSubmissionException(
jobGraph.getJobID(),
"Could not upload the jar files to the job manager.",
exception)
)
)),
ActorRef.noSender()
);
}
LOG.info("Submit job to the job manager {}.", jobManager.path());
jobManager.tell(
decorateMessage(
new JobManagerMessages.SubmitJob(
jobGraph,
ListeningBehaviour.EXECUTION_RESULT_AND_STATE_CHANGES)),
getSelf());
// issue a SubmissionTimeout message to check that we submit the job within
// the given timeout
getContext().system().scheduler().scheduleOnce(
timeout,
getSelf(),
decorateMessage(JobClientMessages.getSubmissionTimeout()),
getContext().dispatcher(),
ActorRef.noSender());
return null;
}
}, getContext().dispatcher());
}
public static Props createActorProps(
LeaderRetrievalService leaderRetrievalService,
FiniteDuration timeout,
boolean sysoutUpdates,
Configuration clientConfig) {
return Props.create(
JobSubmissionClientActor.class,
leaderRetrievalService,
timeout,
sysoutUpdates,
clientConfig);
}
}
| apache-2.0 |
DariusX/camel | components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/CronScheduledRoutePolicyTest.java | 12569 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.routepolicy.quartz;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.Consumer;
import org.apache.camel.Route;
import org.apache.camel.ServiceStatus;
import org.apache.camel.SuspendableService;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.direct.DirectComponent;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.component.quartz.QuartzComponent;
import org.apache.camel.support.service.ServiceHelper;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.junit.Test;
public class CronScheduledRoutePolicyTest extends CamelTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testScheduledStartRoutePolicyWithTwoRoutes() throws Exception {
MockEndpoint success1 = context.getEndpoint("mock:success1", MockEndpoint.class);
MockEndpoint success2 = context.getEndpoint("mock:success2", MockEndpoint.class);
success1.expectedMessageCount(1);
success2.expectedMessageCount(1);
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteStartTime("*/3 * * * * ?");
from("direct:start1")
.routeId("test1")
.routePolicy(policy)
.to("mock:success1");
from("direct:start2")
.routeId("test2")
.routePolicy(policy)
.to("mock:success2");
}
});
context.start();
context.getRouteController().stopRoute("test1", 1000, TimeUnit.MILLISECONDS);
context.getRouteController().stopRoute("test2", 1000, TimeUnit.MILLISECONDS);
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test1") == ServiceStatus.Started);
assertTrue(context.getRouteController().getRouteStatus("test2") == ServiceStatus.Started);
template.sendBody("direct:start1", "Ready or not, Here, I come");
template.sendBody("direct:start2", "Ready or not, Here, I come");
success1.assertIsSatisfied();
success2.assertIsSatisfied();
}
@Test
public void testScheduledStopRoutePolicyWithTwoRoutes() throws Exception {
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteStopTime("*/3 * * * * ?");
policy.setRouteStopGracePeriod(0);
policy.setTimeUnit(TimeUnit.MILLISECONDS);
from("direct:start1")
.routeId("test1")
.routePolicy(policy)
.to("mock:unreachable");
from("direct:start2")
.routeId("test2")
.routePolicy(policy)
.to("mock:unreachable");
}
});
context.start();
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test1") == ServiceStatus.Stopped);
assertTrue(context.getRouteController().getRouteStatus("test2") == ServiceStatus.Stopped);
}
@Test
public void testScheduledStartRoutePolicy() throws Exception {
MockEndpoint success = context.getEndpoint("mock:success", MockEndpoint.class);
success.expectedMessageCount(1);
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteStartTime("*/3 * * * * ?");
from("direct:start")
.routeId("test")
.routePolicy(policy)
.to("mock:success");
}
});
context.start();
context.getRouteController().stopRoute("test", 1000, TimeUnit.MILLISECONDS);
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test") == ServiceStatus.Started);
template.sendBody("direct:start", "Ready or not, Here, I come");
context.getComponent("quartz", QuartzComponent.class).stop();
success.assertIsSatisfied();
}
@Test
public void testScheduledStopRoutePolicy() throws Exception {
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteStopTime("*/3 * * * * ?");
policy.setRouteStopGracePeriod(0);
policy.setTimeUnit(TimeUnit.MILLISECONDS);
from("direct:start")
.routeId("test")
.routePolicy(policy)
.to("mock:unreachable");
}
});
context.start();
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test") == ServiceStatus.Stopped);
}
@Test
public void testScheduledStartAndStopRoutePolicy() throws Exception {
MockEndpoint success = context.getEndpoint("mock:success", MockEndpoint.class);
success.expectedMessageCount(1);
final CountDownLatch startedLatch = new CountDownLatch(1);
final CountDownLatch stoppedLatch = new CountDownLatch(1);
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy() {
@Override
public void onStart(final Route route) {
super.onStart(route);
startedLatch.countDown();
}
@Override
public void onStop(final Route route) {
super.onStop(route);
stoppedLatch.countDown();
}
};
policy.setRouteStartTime("*/3 * * * * ?");
policy.setRouteStopTime("*/6 * * * * ?");
policy.setRouteStopGracePeriod(0);
from("direct:start")
.routeId("test")
.routePolicy(policy)
.noAutoStartup()
.to("mock:success");
}
});
context.start();
startedLatch.await(5000, TimeUnit.SECONDS);
ServiceStatus startedStatus = context.getRouteController().getRouteStatus("test");
assertTrue(startedStatus == ServiceStatus.Started || startedStatus == ServiceStatus.Starting);
template.sendBody("direct:start", "Ready or not, Here, I come");
stoppedLatch.await(5000, TimeUnit.SECONDS);
ServiceStatus stoppedStatus = context.getRouteController().getRouteStatus("test");
assertTrue(stoppedStatus == ServiceStatus.Stopped || stoppedStatus == ServiceStatus.Stopping);
success.assertIsSatisfied();
}
@Test
public void testScheduledStopRoutePolicyWithExtraPolicy() throws Exception {
final MyRoutePolicy myPolicy = new MyRoutePolicy();
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteStopTime("*/3 * * * * ?");
policy.setRouteStopGracePeriod(0);
policy.setTimeUnit(TimeUnit.MILLISECONDS);
from("direct:start")
.routeId("test")
.routePolicy(policy, myPolicy)
.to("mock:unreachable");
}
});
context.start();
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test") == ServiceStatus.Stopped);
assertTrue("Should have called onStart", myPolicy.isStart());
assertTrue("Should have called onStop", myPolicy.isStop());
}
@Test
public void testScheduledSuspendRoutePolicy() throws Exception {
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteSuspendTime("*/3 * * * * ?");
from("direct:start")
.routeId("test")
.routePolicy(policy)
.to("mock:unreachable");
}
});
context.start();
Thread.sleep(5000);
// when suspending its only the consumer that suspends
// there is a ticket to improve this
Consumer consumer = context.getRoute("test").getConsumer();
SuspendableService ss = (SuspendableService) consumer;
assertTrue("Consumer should be suspended", ss.isSuspended());
}
@Test
public void testScheduledResumeRoutePolicy() throws Exception {
MockEndpoint success = context.getEndpoint("mock:success", MockEndpoint.class);
success.expectedMessageCount(1);
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class).setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
CronScheduledRoutePolicy policy = new CronScheduledRoutePolicy();
policy.setRouteResumeTime("*/3 * * * * ?");
from("direct:start")
.routeId("test")
.routePolicy(policy)
.to("mock:success");
}
});
context.start();
ServiceHelper.suspendService(context.getRoute("test").getConsumer());
Thread.sleep(5000);
assertTrue(context.getRouteController().getRouteStatus("test") == ServiceStatus.Started);
template.sendBody("direct:start", "Ready or not, Here, I come");
success.assertIsSatisfied();
}
}
| apache-2.0 |
lewixliu/git-repo | subcmds/smartsync.py | 1051 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subcmds.sync import Sync
class Smartsync(Sync):
common = True
helpSummary = "Update working tree to the latest known good revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command is a shortcut for sync -s.
"""
def _Options(self, p):
Sync._Options(self, p, show_smart=False)
def Execute(self, opt, args):
opt.smart_sync = True
Sync.Execute(self, opt, args)
| apache-2.0 |
csantanapr/incubator-openwhisk | tests/dat/actions/printParams.js | 673 | // Licensed to the Apache Software Foundation (ASF) under one or more contributor
// license agreements; and to You under the Apache License, Version 2.0.
/**
* Print the parameters to the console, sorted alphabetically by key
*/
function main(params) {
var sep = '';
var retn = {};
var keys = [];
for (var key in params) {
if (params.hasOwnProperty(key)) {
keys.push(key);
}
}
keys.sort();
for (var i in keys) {
var key = keys[i];
var value = params[key];
console.log(sep + 'params.' + key + ':', value);
sep = ' ';
retn[key] = value;
}
return {params: retn};
}
| apache-2.0 |
kisoku/chef | chef/lib/chef/cookbook_uploader.rb | 5668 |
require 'set'
require 'rest_client'
require 'chef/exceptions'
require 'chef/knife/cookbook_metadata'
require 'chef/checksum_cache'
require 'chef/sandbox'
require 'chef/cookbook_version'
require 'chef/cookbook/syntax_check'
require 'chef/cookbook/file_system_file_vendor'
class Chef
class CookbookUploader
def self.work_queue
@work_queue ||= Queue.new
end
def self.setup_worker_threads
@worker_threads ||= begin
work_queue
(1...10).map do
Thread.new do
loop do
work_queue.pop.call
end
end
end
end
end
attr_reader :cookbook
attr_reader :path
attr_reader :opts
attr_reader :rest
# Creates a new CookbookUploader.
# ===Arguments:
# * cookbook::: A Chef::CookbookVersion describing the cookbook to be uploaded
# * path::: A String or Array of Strings representing the base paths to the
# cookbook repositories.
# * opts::: (optional) An options Hash
# ===Options:
# * :force indicates that the uploader should set the force option when
# uploading the cookbook. This allows frozen CookbookVersion
# documents on the server to be overwritten (otherwise a 409 is
# returned by the server)
# * :rest A Chef::REST object that you have configured the way you like it.
# If you don't provide this, one will be created using the values
# in Chef::Config.
def initialize(cookbook, path, opts={})
@cookbook, @path, @opts = cookbook, path, opts
@rest = opts[:rest] || Chef::REST.new(Chef::Config[:chef_server_url])
end
def upload_cookbook
Thread.abort_on_exception = true
Chef::Log.info("Saving #{cookbook.name}")
# Syntax Check
validate_cookbook
# generate checksums of cookbook files and create a sandbox
checksum_files = cookbook.checksums
checksums = checksum_files.inject({}){|memo,elt| memo[elt.first]=nil ; memo}
new_sandbox = rest.post_rest("sandboxes", { :checksums => checksums })
Chef::Log.info("Uploading files")
self.class.setup_worker_threads
checksums_to_upload = Set.new
# upload the new checksums and commit the sandbox
new_sandbox['checksums'].each do |checksum, info|
if info['needs_upload'] == true
checksums_to_upload << checksum
Chef::Log.info("Uploading #{checksum_files[checksum]} (checksum hex = #{checksum}) to #{info['url']}")
self.class.work_queue << uploader_function_for(checksum_files[checksum], checksum, info['url'], checksums_to_upload)
else
Chef::Log.debug("#{checksum_files[checksum]} has not changed")
end
end
until checksums_to_upload.empty?
sleep 0.1
end
sandbox_url = new_sandbox['uri']
Chef::Log.debug("Committing sandbox")
# Retry if S3 is claims a checksum doesn't exist (the eventual
# in eventual consistency)
retries = 0
begin
rest.put_rest(sandbox_url, {:is_completed => true})
rescue Net::HTTPServerException => e
if e.message =~ /^400/ && (retries += 1) <= 5
sleep 2
retry
else
raise
end
end
# files are uploaded, so save the manifest
save_url = opts[:force] ? cookbook.force_save_url : cookbook.save_url
rest.put_rest(save_url, cookbook)
Chef::Log.info("Upload complete!")
end
def worker_thread(work_queue)
end
def uploader_function_for(file, checksum, url, checksums_to_upload)
lambda do
# Checksum is the hexadecimal representation of the md5,
# but we need the base64 encoding for the content-md5
# header
checksum64 = Base64.encode64([checksum].pack("H*")).strip
timestamp = Time.now.utc.iso8601
file_contents = File.open(file, "rb") {|f| f.read}
# TODO - 5/28/2010, cw: make signing and sending the request streaming
sign_obj = Mixlib::Authentication::SignedHeaderAuth.signing_object(
:http_method => :put,
:path => URI.parse(url).path,
:body => file_contents,
:timestamp => timestamp,
:user_id => rest.client_name
)
headers = { 'content-type' => 'application/x-binary', 'content-md5' => checksum64, :accept => 'application/json' }
headers.merge!(sign_obj.sign(OpenSSL::PKey::RSA.new(rest.signing_key)))
begin
RestClient::Resource.new(url, :headers=>headers, :timeout=>1800, :open_timeout=>1800).put(file_contents)
checksums_to_upload.delete(checksum)
rescue RestClient::Exception => e
Chef::Knife.ui.error("Failed to upload #@cookbook : #{e.message}\n#{e.response.body}")
raise
end
end
end
def validate_cookbook
syntax_checker = Chef::Cookbook::SyntaxCheck.for_cookbook(cookbook.name, @user_cookbook_path)
Chef::Log.info("Validating ruby files")
exit(1) unless syntax_checker.validate_ruby_files
Chef::Log.info("Validating templates")
exit(1) unless syntax_checker.validate_templates
Chef::Log.info("Syntax OK")
true
end
end
end
| apache-2.0 |
elonazoulay/presto | presto-main/src/main/java/com/facebook/presto/operator/aggregation/ApproximateCountDistinctAggregations.java | 6141 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator.aggregation;
import com.facebook.presto.operator.aggregation.state.HyperLogLogState;
import com.facebook.presto.spi.block.BlockBuilder;
import com.facebook.presto.spi.function.AggregationFunction;
import com.facebook.presto.spi.function.AggregationState;
import com.facebook.presto.spi.function.CombineFunction;
import com.facebook.presto.spi.function.InputFunction;
import com.facebook.presto.spi.function.LiteralParameters;
import com.facebook.presto.spi.function.OutputFunction;
import com.facebook.presto.spi.function.SqlType;
import com.facebook.presto.spi.type.StandardTypes;
import com.google.common.annotations.VisibleForTesting;
import io.airlift.slice.Slice;
import io.airlift.stats.cardinality.HyperLogLog;
import static com.facebook.presto.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT;
import static com.facebook.presto.spi.type.BigintType.BIGINT;
import static com.facebook.presto.util.Failures.checkCondition;
@AggregationFunction("approx_distinct")
public final class ApproximateCountDistinctAggregations
{
private static final double DEFAULT_STANDARD_ERROR = 0.023;
private static final double LOWEST_MAX_STANDARD_ERROR = 0.0040625;
private static final double HIGHEST_MAX_STANDARD_ERROR = 0.26000;
private ApproximateCountDistinctAggregations() {}
@InputFunction
public static void input(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.BIGINT) long value)
{
input(state, value, DEFAULT_STANDARD_ERROR);
}
@InputFunction
public static void input(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.BIGINT) long value, @SqlType(StandardTypes.DOUBLE) double maxStandardError)
{
HyperLogLog hll = getOrCreateHyperLogLog(state, maxStandardError);
state.addMemoryUsage(-hll.estimatedInMemorySize());
hll.add(value);
state.addMemoryUsage(hll.estimatedInMemorySize());
}
@InputFunction
public static void input(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.DOUBLE) double value)
{
input(state, value, DEFAULT_STANDARD_ERROR);
}
@InputFunction
public static void input(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.DOUBLE) double value, @SqlType(StandardTypes.DOUBLE) double maxStandardError)
{
input(state, Double.doubleToLongBits(value), maxStandardError);
}
@InputFunction
@LiteralParameters("x")
public static void input(@AggregationState HyperLogLogState state, @SqlType("varchar(x)") Slice value)
{
input(state, value, DEFAULT_STANDARD_ERROR);
}
@InputFunction
@LiteralParameters("x")
public static void input(@AggregationState HyperLogLogState state, @SqlType("varchar(x)") Slice value, @SqlType(StandardTypes.DOUBLE) double maxStandardError)
{
inputBinary(state, value, maxStandardError);
}
@InputFunction
public static void inputBinary(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.VARBINARY) Slice value)
{
inputBinary(state, value, DEFAULT_STANDARD_ERROR);
}
@InputFunction
public static void inputBinary(@AggregationState HyperLogLogState state, @SqlType(StandardTypes.VARBINARY) Slice value, @SqlType(StandardTypes.DOUBLE) double maxStandardError)
{
HyperLogLog hll = getOrCreateHyperLogLog(state, maxStandardError);
state.addMemoryUsage(-hll.estimatedInMemorySize());
hll.add(value);
state.addMemoryUsage(hll.estimatedInMemorySize());
}
private static HyperLogLog getOrCreateHyperLogLog(HyperLogLogState state, double maxStandardError)
{
HyperLogLog hll = state.getHyperLogLog();
if (hll == null) {
hll = HyperLogLog.newInstance(standardErrorToBuckets(maxStandardError));
state.setHyperLogLog(hll);
state.addMemoryUsage(hll.estimatedInMemorySize());
}
return hll;
}
@VisibleForTesting
static int standardErrorToBuckets(double maxStandardError)
{
checkCondition(maxStandardError >= LOWEST_MAX_STANDARD_ERROR && maxStandardError <= HIGHEST_MAX_STANDARD_ERROR,
INVALID_FUNCTION_ARGUMENT,
"Max standard error must be in [%s, %s]: %s", LOWEST_MAX_STANDARD_ERROR, HIGHEST_MAX_STANDARD_ERROR, maxStandardError);
return log2Ceiling((int) Math.ceil(1.0816 / (maxStandardError * maxStandardError)));
}
private static int log2Ceiling(int value)
{
return Integer.highestOneBit(value - 1) << 1;
}
@CombineFunction
public static void combineState(@AggregationState HyperLogLogState state, @AggregationState HyperLogLogState otherState)
{
HyperLogLog input = otherState.getHyperLogLog();
HyperLogLog previous = state.getHyperLogLog();
if (previous == null) {
state.setHyperLogLog(input);
state.addMemoryUsage(input.estimatedInMemorySize());
}
else {
state.addMemoryUsage(-previous.estimatedInMemorySize());
previous.mergeWith(input);
state.addMemoryUsage(previous.estimatedInMemorySize());
}
}
@OutputFunction(StandardTypes.BIGINT)
public static void evaluateFinal(@AggregationState HyperLogLogState state, BlockBuilder out)
{
HyperLogLog hyperLogLog = state.getHyperLogLog();
if (hyperLogLog == null) {
BIGINT.writeLong(out, 0);
}
else {
BIGINT.writeLong(out, hyperLogLog.cardinality());
}
}
}
| apache-2.0 |
erinspace/scrapi | tests/test_helpers.py | 7171 | import vcr
import mock
import pytest
from scrapi import requests
from scrapi.base import helpers
@pytest.fixture(autouse=True)
def mock_maybe_load_response(monkeypatch):
mock_mlr = mock.Mock()
mock_mlr.return_value = None
mock_save = lambda x: x
monkeypatch.setattr(requests, '_maybe_load_response', mock_mlr)
monkeypatch.setattr(requests.HarvesterResponse, 'save', mock_save)
class TestHelpers(object):
def test_format_one_tag(self):
single_tag = ' A single tag '
single_output = helpers.format_tags(single_tag)
assert single_output == ['a single tag']
assert isinstance(single_output, list)
def test_format_many_tags(self):
many_tags = [' A', 'Bunch', ' oftags ']
many_output = helpers.format_tags(many_tags)
assert set(many_output) == set(['a', 'bunch', 'oftags'])
def test_format_sep_tags(self):
sep_tags = ['These, we know', 'should be many']
sep_output = helpers.format_tags(sep_tags, sep=',')
assert set(sep_output) == set(['these', 'we know', 'should be many'])
def test_extract_dois(self):
identifiers = ['doi: THIS_IS_A_DOI!', 'http://dx.doi.org/andalsothis', 'doi:doi:thistoook']
valid_dois = helpers.oai_extract_dois(identifiers)
assert valid_dois == [
'http://dx.doi.org/THIS_IS_A_DOI!',
'http://dx.doi.org/andalsothis',
'http://dx.doi.org/thistoook'
]
def oai_process_uris(self):
identifiers = ['I might be a url but rly I am naaaahhttt']
with pytest.raises(ValueError):
helpers.oai_extract_url(identifiers)
def test_extract_uris(self):
identifiers = ['doi:10.whateverwhatever', 'http://alloutofbubblegum.com',
'http://viewcontent.cgi/iamacoolpdf', 'http://GETTHETABLES.com',
'Vahedifard, F. et al. (2013). G??otechnique 63, No. 6, 451???462 [http://dx.doi.org/10.1680/geot.11.P.130] ',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi']
uri_dict = helpers.oai_process_uris(identifiers)
assert uri_dict == {
'canonicalUri': 'http://alloutofbubblegum.com',
'objectUris': ['http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi',
'http://viewcontent.cgi/iamacoolpdf'],
'providerUris': ['http://alloutofbubblegum.com', 'http://GETTHETABLES.com']
}
def test_extract_uris_use_doi(self):
identifiers = ['doi:10.whateverwhatever', 'http://alloutofbubblegum.com',
'http://viewcontent.cgi/iamacoolpdf', 'http://GETTHETABLES.com',
'Vahedifard, F. et al. (2013). G??otechnique 63, No. 6, 451???462 [http://dx.doi.org/10.1680/geot.11.P.130] ',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi']
uri_dict = helpers.oai_process_uris(identifiers, use_doi=True)
assert uri_dict == {
'canonicalUri': 'http://dx.doi.org/10.10.thisisarealdoi',
'objectUris': ['http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi',
'http://viewcontent.cgi/iamacoolpdf'],
'providerUris': ['http://alloutofbubblegum.com', 'http://GETTHETABLES.com']
}
def test_process_contributors(self):
args = ['Stardust Rhodes', 'Golddust Rhodes', 'Dusty Rhodes']
response = helpers.oai_process_contributors(args)
assert isinstance(response, list)
@vcr.use_cassette('tests/vcr/asu.yaml')
def test_oai_get_records_and_token(self):
url = 'http://repository.asu.edu/oai-pmh?verb=ListRecords&metadataPrefix=oai_dc&from=2015-03-10&until=2015-03-11'
force = False
verify = True
throttle = 0.5
namespaces = {
'dc': 'http://purl.org/dc/elements/1.1/',
'ns0': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/',
}
records, token = helpers.oai_get_records_and_token(url, throttle, force, namespaces, verify)
assert records
assert token
assert len(records) == 50
def test_extract_doi_from_text(self):
text = ["""
Ryder, Z., & Dudley, B. R. (2014). Methods of WOO WOO WOO and D3 comming atcha by a
Continuous Flow Microreactor. Crystal Growth & Design, 14(9),
4759-4767. doi:10.1021/woowoowoo yep yep yep what he do"""]
extracted_doi = helpers.extract_doi_from_text(text)
assert extracted_doi == 'http://dx.doi.org/10.1021/woowoowoo'
def test_gather_identifiers(self):
identifiers = [['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf'],
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
['http://bubbaray.com', 'http://devon.net']]
gathered = helpers.gather_identifiers(identifiers)
assert gathered == ['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf',
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
'http://bubbaray.com',
'http://devon.net']
def test_gather_object_uris(self):
identifiers = ['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf',
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
'http://bubbaray.com',
'http://devon.net']
object_uris = helpers.gather_object_uris(identifiers)
assert object_uris == [
'http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi'
]
def test_seperate_provider_object_uris(self):
identifiers = [
'http://dx.doi.org/10.whateverwhatever',
'http://cgi.viewcontent.apdf.pdf',
'http://get_the_tables.net'
]
provider_uris, object_uris = helpers.seperate_provider_object_uris(identifiers)
assert provider_uris == ['http://get_the_tables.net']
assert object_uris == ['http://dx.doi.org/10.whateverwhatever', 'http://cgi.viewcontent.apdf.pdf']
def test_format_doi_as_url(self):
doi1 = ' doi:10.dudleyzrule '
doi2 = 'DOI:10.getthetables '
assert helpers.format_doi_as_url(doi1) == 'http://dx.doi.org/10.dudleyzrule'
assert helpers.format_doi_as_url(doi2) == 'http://dx.doi.org/10.getthetables'
| apache-2.0 |
nishantmonu51/druid | indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/LegacySinglePhaseSubTask.java | 1956 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.indexing.common.task.batch.parallel;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.druid.indexing.common.task.TaskResource;
import javax.annotation.Nullable;
import java.util.Map;
public class LegacySinglePhaseSubTask extends SinglePhaseSubTask
{
@JsonCreator
public LegacySinglePhaseSubTask(
@JsonProperty("id") @Nullable final String id,
@JsonProperty("groupId") final String groupId,
@JsonProperty("resource") final TaskResource taskResource,
@JsonProperty("supervisorTaskId") final String supervisorTaskId,
@JsonProperty("numAttempts") final int numAttempts, // zero-based counting
@JsonProperty("spec") final ParallelIndexIngestionSpec ingestionSchema,
@JsonProperty("context") final Map<String, Object> context
)
{
super(
id,
groupId,
taskResource,
supervisorTaskId,
null,
numAttempts,
ingestionSchema,
context
);
}
@Override
public String getType()
{
return SinglePhaseSubTask.OLD_TYPE_NAME;
}
}
| apache-2.0 |
bbrangeo/OpenSourceBIMaaS | config/ues/repository/deployment/server/jaggeryapps/mitrai/res/lib/jquery-ui-1.10.3.custom/development-bundle/ui/minified/i18n/jquery.ui.datepicker-lb.min.js | 824 | /*! jQuery UI - v1.10.3 - 2013-10-08
* http://jqueryui.com
* Copyright 2013 jQuery Foundation and other contributors; Licensed MIT */
jQuery(function(t){t.datepicker.regional.lb={closeText:"Fäerdeg",prevText:"Zréck",nextText:"Weider",currentText:"Haut",monthNames:["Januar","Februar","Mäerz","Abrëll","Mee","Juni","Juli","August","September","Oktober","November","Dezember"],monthNamesShort:["Jan","Feb","Mäe","Abr","Mee","Jun","Jul","Aug","Sep","Okt","Nov","Dez"],dayNames:["Sonndeg","Méindeg","Dënschdeg","Mëttwoch","Donneschdeg","Freideg","Samschdeg"],dayNamesShort:["Son","Méi","Dën","Mët","Don","Fre","Sam"],dayNamesMin:["So","Mé","Dë","Më","Do","Fr","Sa"],weekHeader:"W",dateFormat:"dd.mm.yy",firstDay:1,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},t.datepicker.setDefaults(t.datepicker.regional.lb)}); | apache-2.0 |
stewartpark/presto | presto-orc/src/test/java/com/facebook/presto/orc/stream/TestFloatStream.java | 2663 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.orc.stream;
import com.facebook.presto.orc.OrcCorruptionException;
import com.facebook.presto.orc.OrcDecompressor;
import com.facebook.presto.orc.checkpoint.FloatStreamCheckpoint;
import io.airlift.slice.Slice;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import static com.facebook.presto.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext;
import static com.facebook.presto.orc.OrcDecompressor.createOrcDecompressor;
import static com.facebook.presto.orc.metadata.CompressionKind.SNAPPY;
public class TestFloatStream
extends AbstractTestValueStream<Float, FloatStreamCheckpoint, FloatOutputStream, FloatInputStream>
{
@Test
public void test()
throws IOException
{
List<List<Float>> groups = new ArrayList<>();
for (int groupIndex = 0; groupIndex < 3; groupIndex++) {
List<Float> group = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
group.add((float) (groupIndex * 10_000 + i));
}
groups.add(group);
}
testWriteValue(groups);
}
@Override
protected FloatOutputStream createValueOutputStream()
{
return new FloatOutputStream(SNAPPY, COMPRESSION_BLOCK_SIZE);
}
@Override
protected void writeValue(FloatOutputStream outputStream, Float value)
{
outputStream.writeFloat(value);
}
@Override
protected FloatInputStream createValueStream(Slice slice)
throws OrcCorruptionException
{
Optional<OrcDecompressor> orcDecompressor = createOrcDecompressor(ORC_DATA_SOURCE_ID, SNAPPY, COMPRESSION_BLOCK_SIZE);
return new FloatInputStream(new OrcInputStream(ORC_DATA_SOURCE_ID, slice.getInput(), orcDecompressor, newSimpleAggregatedMemoryContext(), slice.getRetainedSize()));
}
@Override
protected Float readValue(FloatInputStream valueStream)
throws IOException
{
return valueStream.next();
}
}
| apache-2.0 |
westlywright/ui | lib/logging/addon/components/logging/code-block/component.js | 144 | import layout from './template';
import CodeBlock from 'shared/components/code-block/component';
export default CodeBlock.extend({ layout, });
| apache-2.0 |
glaucio-melo-movile/activemq-artemis | artemis-core-client/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/SessionReceiveContinuationMessage.java | 3834 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.protocol.core.impl.wireformat;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.utils.DataConstants;
public class SessionReceiveContinuationMessage extends SessionContinuationMessage {
// Constants -----------------------------------------------------
public static final int SESSION_RECEIVE_CONTINUATION_BASE_SIZE = SESSION_CONTINUATION_BASE_SIZE + DataConstants.SIZE_LONG;
// Attributes ----------------------------------------------------
private long consumerID;
// Static --------------------------------------------------------
// Constructors --------------------------------------------------
public SessionReceiveContinuationMessage() {
super(SESS_RECEIVE_CONTINUATION);
}
/**
* @param consumerID
* @param body
* @param continues
* @param requiresResponse
*/
public SessionReceiveContinuationMessage(final long consumerID,
final byte[] body,
final boolean continues,
final boolean requiresResponse) {
super(SESS_RECEIVE_CONTINUATION, body, continues);
this.consumerID = consumerID;
}
public SessionReceiveContinuationMessage(final long consumerID,
final byte[] body,
final boolean continues,
final boolean requiresResponse,
final int packetSize) {
this(consumerID, body, continues, requiresResponse);
this.size = packetSize;
}
/**
* @return the consumerID
*/
public long getConsumerID() {
return consumerID;
}
// Public --------------------------------------------------------
@Override
public void encodeRest(final ActiveMQBuffer buffer) {
super.encodeRest(buffer);
buffer.writeLong(consumerID);
}
@Override
public int getPacketSize() {
if (size == -1) {
// This packet was created by the LargeMessageController
return 0;
}
else {
return size;
}
}
@Override
public void decodeRest(final ActiveMQBuffer buffer) {
super.decodeRest(buffer);
consumerID = buffer.readLong();
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + (int) (consumerID ^ (consumerID >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (!(obj instanceof SessionReceiveContinuationMessage))
return false;
SessionReceiveContinuationMessage other = (SessionReceiveContinuationMessage) obj;
if (consumerID != other.consumerID)
return false;
return true;
}
}
| apache-2.0 |
gouyang/kubernetes | pkg/kubectl/rolling_updater_test.go | 48024 | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
apitesting "k8s.io/kubernetes/pkg/api/testing"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/fake"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
)
func oldRc(replicas int, original int) *api.ReplicationController {
return &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo-v1",
UID: "7764ae47-9092-11e4-8393-42010af018ff",
Annotations: map[string]string{
originalReplicasAnnotation: fmt.Sprintf("%d", original),
},
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(replicas),
Selector: map[string]string{"version": "v1"},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Name: "foo-v1",
Labels: map[string]string{"version": "v1"},
},
},
},
Status: api.ReplicationControllerStatus{
Replicas: int32(replicas),
},
}
}
func newRc(replicas int, desired int) *api.ReplicationController {
rc := oldRc(replicas, replicas)
rc.Spec.Template = &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Name: "foo-v2",
Labels: map[string]string{"version": "v2"},
},
}
rc.Spec.Selector = map[string]string{"version": "v2"}
rc.ObjectMeta = api.ObjectMeta{
Name: "foo-v2",
Annotations: map[string]string{
desiredReplicasAnnotation: fmt.Sprintf("%d", desired),
sourceIdAnnotation: "foo-v1:7764ae47-9092-11e4-8393-42010af018ff",
},
}
return rc
}
// TestUpdate performs complex scenario testing for rolling updates. It
// provides fine grained control over the states for each update interval to
// allow the expression of as many edge cases as possible.
func TestUpdate(t *testing.T) {
// up represents a simulated scale up event and expectation
type up struct {
// to is the expected replica count for a scale-up
to int
}
// down represents a simulated scale down event and expectation
type down struct {
// oldReady is the number of oldRc replicas which will be seen
// as ready during the scale down attempt
oldReady int
// newReady is the number of newRc replicas which will be seen
// as ready during the scale up attempt
newReady int
// to is the expected replica count for the scale down
to int
// noop and to are mutually exclusive; if noop is true, that means for
// this down event, no scaling attempt should be made (for example, if
// by scaling down, the readiness minimum would be crossed.)
noop bool
}
tests := []struct {
name string
// oldRc is the "from" deployment
oldRc *api.ReplicationController
// newRc is the "to" deployment
newRc *api.ReplicationController
// whether newRc existed (false means it was created)
newRcExists bool
maxUnavail intstr.IntOrString
maxSurge intstr.IntOrString
// expected is the sequence of up/down events that will be simulated and
// verified
expected []interface{}
// output is the expected textual output written
output string
}{
{
name: "10->10 30/0 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("30%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 10, newReady: 0, to: 7},
up{3},
down{oldReady: 7, newReady: 3, to: 4},
up{6},
down{oldReady: 4, newReady: 6, to: 1},
up{9},
down{oldReady: 1, newReady: 9, to: 0},
up{10},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods)
Scaling foo-v1 down to 7
Scaling foo-v2 up to 3
Scaling foo-v1 down to 4
Scaling foo-v2 up to 6
Scaling foo-v1 down to 1
Scaling foo-v2 up to 9
Scaling foo-v1 down to 0
Scaling foo-v2 up to 10
`,
},
{
name: "10->10 30/0 delayed readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("30%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 10, newReady: 0, to: 7},
up{3},
down{oldReady: 7, newReady: 0, noop: true},
down{oldReady: 7, newReady: 1, to: 6},
up{4},
down{oldReady: 6, newReady: 4, to: 3},
up{7},
down{oldReady: 3, newReady: 7, to: 0},
up{10},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods)
Scaling foo-v1 down to 7
Scaling foo-v2 up to 3
Scaling foo-v1 down to 6
Scaling foo-v2 up to 4
Scaling foo-v1 down to 3
Scaling foo-v2 up to 7
Scaling foo-v1 down to 0
Scaling foo-v2 up to 10
`,
}, {
name: "10->10 30/0 fast readiness, continuation",
oldRc: oldRc(7, 10),
newRc: newRc(3, 10),
newRcExists: false,
maxUnavail: intstr.FromString("30%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 7, newReady: 3, to: 4},
up{6},
down{oldReady: 4, newReady: 6, to: 1},
up{9},
down{oldReady: 1, newReady: 9, to: 0},
up{10},
},
output: `Created foo-v2
Scaling up foo-v2 from 3 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods)
Scaling foo-v1 down to 4
Scaling foo-v2 up to 6
Scaling foo-v1 down to 1
Scaling foo-v2 up to 9
Scaling foo-v1 down to 0
Scaling foo-v2 up to 10
`,
}, {
name: "10->10 30/0 fast readiness, continued after restart which prevented first scale-up",
oldRc: oldRc(7, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("30%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 7, newReady: 0, noop: true},
up{3},
down{oldReady: 7, newReady: 3, to: 4},
up{6},
down{oldReady: 4, newReady: 6, to: 1},
up{9},
down{oldReady: 1, newReady: 9, to: 0},
up{10},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods)
Scaling foo-v2 up to 3
Scaling foo-v1 down to 4
Scaling foo-v2 up to 6
Scaling foo-v1 down to 1
Scaling foo-v2 up to 9
Scaling foo-v1 down to 0
Scaling foo-v2 up to 10
`,
}, {
name: "10->10 0/30 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("30%"),
expected: []interface{}{
up{3},
down{oldReady: 10, newReady: 3, to: 7},
up{6},
down{oldReady: 7, newReady: 6, to: 4},
up{9},
down{oldReady: 4, newReady: 9, to: 1},
up{10},
down{oldReady: 1, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods)
Scaling foo-v2 up to 3
Scaling foo-v1 down to 7
Scaling foo-v2 up to 6
Scaling foo-v1 down to 4
Scaling foo-v2 up to 9
Scaling foo-v1 down to 1
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 0/30 delayed readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("30%"),
expected: []interface{}{
up{3},
down{oldReady: 10, newReady: 0, noop: true},
down{oldReady: 10, newReady: 1, to: 9},
up{4},
down{oldReady: 9, newReady: 3, to: 7},
up{6},
down{oldReady: 7, newReady: 6, to: 4},
up{9},
down{oldReady: 4, newReady: 9, to: 1},
up{10},
down{oldReady: 1, newReady: 9, noop: true},
down{oldReady: 1, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods)
Scaling foo-v2 up to 3
Scaling foo-v1 down to 9
Scaling foo-v2 up to 4
Scaling foo-v1 down to 7
Scaling foo-v2 up to 6
Scaling foo-v1 down to 4
Scaling foo-v2 up to 9
Scaling foo-v1 down to 1
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 10/20 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("20%"),
expected: []interface{}{
up{2},
down{oldReady: 10, newReady: 2, to: 7},
up{5},
down{oldReady: 7, newReady: 5, to: 4},
up{8},
down{oldReady: 4, newReady: 8, to: 1},
up{10},
down{oldReady: 1, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods)
Scaling foo-v2 up to 2
Scaling foo-v1 down to 7
Scaling foo-v2 up to 5
Scaling foo-v1 down to 4
Scaling foo-v2 up to 8
Scaling foo-v1 down to 1
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 10/20 delayed readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("20%"),
expected: []interface{}{
up{2},
down{oldReady: 10, newReady: 2, to: 7},
up{5},
down{oldReady: 7, newReady: 4, to: 5},
up{7},
down{oldReady: 5, newReady: 4, noop: true},
down{oldReady: 5, newReady: 7, to: 2},
up{10},
down{oldReady: 2, newReady: 9, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods)
Scaling foo-v2 up to 2
Scaling foo-v1 down to 7
Scaling foo-v2 up to 5
Scaling foo-v1 down to 5
Scaling foo-v2 up to 7
Scaling foo-v1 down to 2
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 10/20 fast readiness continued after restart which prevented first scale-down",
oldRc: oldRc(10, 10),
newRc: newRc(2, 10),
newRcExists: false,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("20%"),
expected: []interface{}{
down{oldReady: 10, newReady: 2, to: 7},
up{5},
down{oldReady: 7, newReady: 5, to: 4},
up{8},
down{oldReady: 4, newReady: 8, to: 1},
up{10},
down{oldReady: 1, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 2 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods)
Scaling foo-v1 down to 7
Scaling foo-v2 up to 5
Scaling foo-v1 down to 4
Scaling foo-v2 up to 8
Scaling foo-v1 down to 1
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 0/100 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("100%"),
expected: []interface{}{
up{10},
down{oldReady: 10, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods)
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 0/100 delayed readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("100%"),
expected: []interface{}{
up{10},
down{oldReady: 10, newReady: 0, noop: true},
down{oldReady: 10, newReady: 2, to: 8},
down{oldReady: 8, newReady: 7, to: 3},
down{oldReady: 3, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods)
Scaling foo-v2 up to 10
Scaling foo-v1 down to 8
Scaling foo-v1 down to 3
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 100/0 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 10),
newRcExists: false,
maxUnavail: intstr.FromString("100%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 10, newReady: 0, to: 0},
up{10},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 0 pods available, don't exceed 10 pods)
Scaling foo-v1 down to 0
Scaling foo-v2 up to 10
`,
}, {
name: "1->1 25/25 maintain minimum availability",
oldRc: oldRc(1, 1),
newRc: newRc(0, 1),
newRcExists: false,
maxUnavail: intstr.FromString("25%"),
maxSurge: intstr.FromString("25%"),
expected: []interface{}{
up{1},
down{oldReady: 1, newReady: 0, noop: true},
down{oldReady: 1, newReady: 1, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
`,
}, {
name: "1->1 0/10 delayed readiness",
oldRc: oldRc(1, 1),
newRc: newRc(0, 1),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("10%"),
expected: []interface{}{
up{1},
down{oldReady: 1, newReady: 0, noop: true},
down{oldReady: 1, newReady: 1, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
`,
}, {
name: "1->1 10/10 delayed readiness",
oldRc: oldRc(1, 1),
newRc: newRc(0, 1),
newRcExists: false,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("10%"),
expected: []interface{}{
up{1},
down{oldReady: 1, newReady: 0, noop: true},
down{oldReady: 1, newReady: 1, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
`,
}, {
name: "3->3 1/1 fast readiness (absolute values)",
oldRc: oldRc(3, 3),
newRc: newRc(0, 3),
newRcExists: false,
maxUnavail: intstr.FromInt(0),
maxSurge: intstr.FromInt(1),
expected: []interface{}{
up{1},
down{oldReady: 3, newReady: 1, to: 2},
up{2},
down{oldReady: 2, newReady: 2, to: 1},
up{3},
down{oldReady: 1, newReady: 3, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 3, scaling down foo-v1 from 3 to 0 (keep 3 pods available, don't exceed 4 pods)
Scaling foo-v2 up to 1
Scaling foo-v1 down to 2
Scaling foo-v2 up to 2
Scaling foo-v1 down to 1
Scaling foo-v2 up to 3
Scaling foo-v1 down to 0
`,
}, {
name: "10->10 0/20 fast readiness, continued after restart which resulted in partial first scale-up",
oldRc: oldRc(6, 10),
newRc: newRc(5, 10),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("20%"),
expected: []interface{}{
up{6},
down{oldReady: 6, newReady: 6, to: 4},
up{8},
down{oldReady: 4, newReady: 8, to: 2},
up{10},
down{oldReady: 1, newReady: 10, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 5 to 10, scaling down foo-v1 from 6 to 0 (keep 10 pods available, don't exceed 12 pods)
Scaling foo-v2 up to 6
Scaling foo-v1 down to 4
Scaling foo-v2 up to 8
Scaling foo-v1 down to 2
Scaling foo-v2 up to 10
Scaling foo-v1 down to 0
`,
}, {
name: "10->20 0/300 fast readiness",
oldRc: oldRc(10, 10),
newRc: newRc(0, 20),
newRcExists: false,
maxUnavail: intstr.FromString("0%"),
maxSurge: intstr.FromString("300%"),
expected: []interface{}{
up{20},
down{oldReady: 10, newReady: 20, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 80 pods)
Scaling foo-v2 up to 20
Scaling foo-v1 down to 0
`,
}, {
name: "1->1 0/1 scale down unavailable rc to a ready rc (rollback)",
oldRc: oldRc(1, 1),
newRc: newRc(1, 1),
newRcExists: true,
maxUnavail: intstr.FromInt(0),
maxSurge: intstr.FromInt(1),
expected: []interface{}{
up{1},
down{oldReady: 0, newReady: 1, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 1 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v1 down to 0
`,
},
{
name: "3->0 1/1 desired 0 (absolute values)",
oldRc: oldRc(3, 3),
newRc: newRc(0, 0),
newRcExists: true,
maxUnavail: intstr.FromInt(1),
maxSurge: intstr.FromInt(1),
expected: []interface{}{
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 1 pods)
Scaling foo-v1 down to 0
`,
},
{
name: "3->0 10/10 desired 0 (percentages)",
oldRc: oldRc(3, 3),
newRc: newRc(0, 0),
newRcExists: true,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("10%"),
expected: []interface{}{
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods)
Scaling foo-v1 down to 0
`,
},
{
name: "3->0 10/10 desired 0 (create new RC)",
oldRc: oldRc(3, 3),
newRc: newRc(0, 0),
newRcExists: false,
maxUnavail: intstr.FromString("10%"),
maxSurge: intstr.FromString("10%"),
expected: []interface{}{
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods)
Scaling foo-v1 down to 0
`,
},
{
name: "0->0 1/1 desired 0 (absolute values)",
oldRc: oldRc(0, 0),
newRc: newRc(0, 0),
newRcExists: true,
maxUnavail: intstr.FromInt(1),
maxSurge: intstr.FromInt(1),
expected: []interface{}{
down{oldReady: 0, newReady: 0, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 0 to 0 (keep 0 pods available, don't exceed 1 pods)
`,
}, {
name: "30->2 50%/0",
oldRc: oldRc(30, 30),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromString("50%"),
maxSurge: intstr.FromInt(0),
expected: []interface{}{
down{oldReady: 30, newReady: 0, to: 1},
up{1},
down{oldReady: 1, newReady: 2, to: 0},
up{2},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v1 down to 1
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
Scaling foo-v2 up to 2
`,
},
{
name: "2->2 1/0 blocked oldRc",
oldRc: oldRc(2, 2),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromInt(1),
maxSurge: intstr.FromInt(0),
expected: []interface{}{
down{oldReady: 1, newReady: 0, to: 1},
up{1},
down{oldReady: 1, newReady: 1, to: 0},
up{2},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v1 down to 1
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
Scaling foo-v2 up to 2
`,
},
{
name: "1->1 1/0 allow maxUnavailability",
oldRc: oldRc(1, 1),
newRc: newRc(0, 1),
newRcExists: false,
maxUnavail: intstr.FromString("1%"),
maxSurge: intstr.FromInt(0),
expected: []interface{}{
down{oldReady: 1, newReady: 0, to: 0},
up{1},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 0 pods available, don't exceed 1 pods)
Scaling foo-v1 down to 0
Scaling foo-v2 up to 1
`,
},
{
name: "1->2 25/25 complex asymetric deployment",
oldRc: oldRc(1, 1),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromString("25%"),
maxSurge: intstr.FromString("25%"),
expected: []interface{}{
up{2},
down{oldReady: 1, newReady: 2, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods)
Scaling foo-v2 up to 2
Scaling foo-v1 down to 0
`,
},
{
name: "2->2 25/1 maxSurge trumps maxUnavailable",
oldRc: oldRc(2, 2),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromString("25%"),
maxSurge: intstr.FromString("1%"),
expected: []interface{}{
up{1},
down{oldReady: 2, newReady: 1, to: 1},
up{2},
down{oldReady: 1, newReady: 2, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods)
Scaling foo-v2 up to 1
Scaling foo-v1 down to 1
Scaling foo-v2 up to 2
Scaling foo-v1 down to 0
`,
},
{
name: "2->2 25/0 maxUnavailable resolves to zero, then one",
oldRc: oldRc(2, 2),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromString("25%"),
maxSurge: intstr.FromString("0%"),
expected: []interface{}{
down{oldReady: 2, newReady: 0, to: 1},
up{1},
down{oldReady: 1, newReady: 1, to: 0},
up{2},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v1 down to 1
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
Scaling foo-v2 up to 2
`,
},
}
for i, test := range tests {
// Extract expectations into some makeshift FIFOs so they can be returned
// in the correct order from the right places. This lets scale downs be
// expressed a single event even though the data is used from multiple
// interface calls.
oldReady := []int{}
newReady := []int{}
upTo := []int{}
downTo := []int{}
for _, event := range test.expected {
switch e := event.(type) {
case down:
oldReady = append(oldReady, e.oldReady)
newReady = append(newReady, e.newReady)
if !e.noop {
downTo = append(downTo, e.to)
}
case up:
upTo = append(upTo, e.to)
}
}
// Make a way to get the next item from our FIFOs. Returns -1 if the array
// is empty.
next := func(s *[]int) int {
slice := *s
v := -1
if len(slice) > 0 {
v = slice[0]
if len(slice) > 1 {
*s = slice[1:]
} else {
*s = []int{}
}
}
return v
}
t.Logf("running test %d (%s) (up: %v, down: %v, oldReady: %v, newReady: %v)", i, test.name, upTo, downTo, oldReady, newReady)
updater := &RollingUpdater{
ns: "default",
scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
// Return a scale up or scale down expectation depending on the rc,
// and throw errors if there is no expectation expressed for this
// call.
expected := -1
switch {
case rc == test.newRc:
t.Logf("scaling up %s to %d", rc.Name, rc.Spec.Replicas)
expected = next(&upTo)
case rc == test.oldRc:
t.Logf("scaling down %s to %d", rc.Name, rc.Spec.Replicas)
expected = next(&downTo)
}
if expected == -1 {
t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas)
} else if e, a := expected, int(rc.Spec.Replicas); e != a {
t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a)
}
// Simulate the scale.
rc.Status.Replicas = rc.Spec.Replicas
return rc, nil
},
getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
// Simulate a create vs. update of an existing controller.
return test.newRc, test.newRcExists, nil
},
cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
return nil
},
}
// Set up a mock readiness check which handles the test assertions.
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
// Return simulated readiness, and throw an error if this call has no
// expectations defined.
oldReady := next(&oldReady)
newReady := next(&newReady)
if oldReady == -1 || newReady == -1 {
t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc)
}
return int32(oldReady), int32(newReady), nil
}
var buffer bytes.Buffer
config := &RollingUpdaterConfig{
Out: &buffer,
OldRc: test.oldRc,
NewRc: test.newRc,
UpdatePeriod: 0,
Interval: time.Millisecond,
Timeout: time.Millisecond,
CleanupPolicy: DeleteRollingUpdateCleanupPolicy,
MaxUnavailable: test.maxUnavail,
MaxSurge: test.maxSurge,
}
err := updater.Update(config)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if buffer.String() != test.output {
t.Errorf("Bad output. expected:\n%s\ngot:\n%s", test.output, buffer.String())
}
}
}
// TestUpdate_progressTimeout ensures that an update which isn't making any
// progress will eventually time out with a specified error.
func TestUpdate_progressTimeout(t *testing.T) {
oldRc := oldRc(2, 2)
newRc := newRc(0, 2)
updater := &RollingUpdater{
ns: "default",
scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
// Do nothing.
return rc, nil
},
getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
return newRc, false, nil
},
cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
return nil
},
}
updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
// Coerce a timeout by pods never becoming ready.
return 0, 0, nil
}
var buffer bytes.Buffer
config := &RollingUpdaterConfig{
Out: &buffer,
OldRc: oldRc,
NewRc: newRc,
UpdatePeriod: 0,
Interval: time.Millisecond,
Timeout: time.Millisecond,
CleanupPolicy: DeleteRollingUpdateCleanupPolicy,
MaxUnavailable: intstr.FromInt(0),
MaxSurge: intstr.FromInt(1),
}
err := updater.Update(config)
if err == nil {
t.Fatalf("expected an error")
}
if e, a := "timed out waiting for any update progress to be made", err.Error(); e != a {
t.Fatalf("expected error message: %s, got: %s", e, a)
}
}
func TestUpdate_assignOriginalAnnotation(t *testing.T) {
oldRc := oldRc(1, 1)
delete(oldRc.Annotations, originalReplicasAnnotation)
newRc := newRc(1, 1)
var updatedOldRc *api.ReplicationController
fake := &testclient.Fake{}
fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
switch a := action.(type) {
case testclient.GetAction:
return true, oldRc, nil
case testclient.UpdateAction:
updatedOldRc = a.GetObject().(*api.ReplicationController)
return true, updatedOldRc, nil
}
return false, nil, nil
})
updater := &RollingUpdater{
c: fake,
ns: "default",
scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
return rc, nil
},
getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) {
return newRc, false, nil
},
cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
return nil
},
getReadyPods: func(oldRc, newRc *api.ReplicationController) (int32, int32, error) {
return 1, 1, nil
},
}
var buffer bytes.Buffer
config := &RollingUpdaterConfig{
Out: &buffer,
OldRc: oldRc,
NewRc: newRc,
UpdatePeriod: 0,
Interval: time.Millisecond,
Timeout: time.Millisecond,
CleanupPolicy: DeleteRollingUpdateCleanupPolicy,
MaxUnavailable: intstr.FromString("100%"),
}
err := updater.Update(config)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if updatedOldRc == nil {
t.Fatalf("expected rc to be updated")
}
if e, a := "1", updatedOldRc.Annotations[originalReplicasAnnotation]; e != a {
t.Fatalf("expected annotation value %s, got %s", e, a)
}
}
func TestRollingUpdater_multipleContainersInPod(t *testing.T) {
tests := []struct {
oldRc *api.ReplicationController
newRc *api.ReplicationController
container string
image string
deploymentKey string
}{
{
oldRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "old",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "old",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container1",
Image: "image1",
},
{
Name: "container2",
Image: "image2",
},
},
},
},
},
},
newRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "old",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "old",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container1",
Image: "newimage",
},
{
Name: "container2",
Image: "image2",
},
},
},
},
},
},
container: "container1",
image: "newimage",
deploymentKey: "dk",
},
{
oldRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "old",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "old",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container1",
Image: "image1",
},
},
},
},
},
},
newRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "old",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "old",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container1",
Image: "newimage",
},
},
},
},
},
},
container: "container1",
image: "newimage",
deploymentKey: "dk",
},
}
for _, test := range tests {
fake := &testclient.Fake{}
fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
switch action.(type) {
case testclient.GetAction:
return true, test.oldRc, nil
}
return false, nil, nil
})
codec := testapi.Default.Codec()
deploymentHash, err := api.HashObject(test.newRc, codec)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
test.newRc.Spec.Selector[test.deploymentKey] = deploymentHash
test.newRc.Spec.Template.Labels[test.deploymentKey] = deploymentHash
test.newRc.Name = fmt.Sprintf("%s-%s", test.newRc.Name, deploymentHash)
config := &NewControllerConfig{
OldName: test.oldRc.ObjectMeta.Name,
NewName: test.newRc.ObjectMeta.Name,
Image: test.image,
Container: test.container,
DeploymentKey: test.deploymentKey,
}
updatedRc, err := CreateNewControllerFromCurrentController(fake, codec, config)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(updatedRc, test.newRc) {
t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.newRc, updatedRc)
}
}
}
// TestRollingUpdater_cleanupWithClients ensures that the cleanup policy is
// correctly implemented.
func TestRollingUpdater_cleanupWithClients(t *testing.T) {
rc := oldRc(2, 2)
rcExisting := newRc(1, 3)
tests := []struct {
name string
policy RollingUpdaterCleanupPolicy
responses []runtime.Object
expected []string
}{
{
name: "preserve",
policy: PreserveRollingUpdateCleanupPolicy,
responses: []runtime.Object{rcExisting},
expected: []string{
"get",
"update",
"get",
"get",
},
},
{
name: "delete",
policy: DeleteRollingUpdateCleanupPolicy,
responses: []runtime.Object{rcExisting},
expected: []string{
"get",
"update",
"get",
"get",
"delete",
},
},
{
name: "rename",
policy: RenameRollingUpdateCleanupPolicy,
responses: []runtime.Object{rcExisting},
expected: []string{
"get",
"update",
"get",
"get",
"delete",
"create",
"delete",
},
},
}
for _, test := range tests {
fake := testclient.NewSimpleFake(test.responses...)
updater := &RollingUpdater{
ns: "default",
c: fake,
}
config := &RollingUpdaterConfig{
Out: ioutil.Discard,
OldRc: rc,
NewRc: rcExisting,
UpdatePeriod: 0,
Interval: time.Millisecond,
Timeout: time.Millisecond,
CleanupPolicy: test.policy,
}
err := updater.cleanupWithClients(rc, rcExisting, config)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fake.Actions()) != len(test.expected) {
t.Fatalf("%s: unexpected actions: %v, expected %v", test.name, fake.Actions(), test.expected)
}
for j, action := range fake.Actions() {
if e, a := test.expected[j], action.GetVerb(); e != a {
t.Errorf("%s: unexpected action: expected %s, got %s", test.name, e, a)
}
}
}
}
func TestFindSourceController(t *testing.T) {
ctrl1 := api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
sourceIdAnnotation: "bar:1234",
},
},
}
ctrl2 := api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Annotations: map[string]string{
sourceIdAnnotation: "foo:12345",
},
},
}
ctrl3 := api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
sourceIdAnnotation: "baz:45667",
},
},
}
tests := []struct {
list *api.ReplicationControllerList
expectedController *api.ReplicationController
err error
name string
expectError bool
}{
{
list: &api.ReplicationControllerList{},
expectError: true,
},
{
list: &api.ReplicationControllerList{
Items: []api.ReplicationController{ctrl1},
},
name: "foo",
expectError: true,
},
{
list: &api.ReplicationControllerList{
Items: []api.ReplicationController{ctrl1},
},
name: "bar",
expectedController: &ctrl1,
},
{
list: &api.ReplicationControllerList{
Items: []api.ReplicationController{ctrl1, ctrl2},
},
name: "bar",
expectedController: &ctrl1,
},
{
list: &api.ReplicationControllerList{
Items: []api.ReplicationController{ctrl1, ctrl2},
},
name: "foo",
expectedController: &ctrl2,
},
{
list: &api.ReplicationControllerList{
Items: []api.ReplicationController{ctrl1, ctrl2, ctrl3},
},
name: "baz",
expectedController: &ctrl3,
},
}
for _, test := range tests {
fakeClient := testclient.NewSimpleFake(test.list)
ctrl, err := FindSourceController(fakeClient, "default", test.name)
if test.expectError && err == nil {
t.Errorf("unexpected non-error")
}
if !test.expectError && err != nil {
t.Errorf("unexpected error")
}
if !reflect.DeepEqual(ctrl, test.expectedController) {
t.Errorf("expected:\n%v\ngot:\n%v\n", test.expectedController, ctrl)
}
}
}
func TestUpdateExistingReplicationController(t *testing.T) {
tests := []struct {
rc *api.ReplicationController
name string
deploymentKey string
deploymentValue string
expectedRc *api.ReplicationController
expectErr bool
}{
{
rc: &api.ReplicationController{
Spec: api.ReplicationControllerSpec{
Template: &api.PodTemplateSpec{},
},
},
name: "foo",
deploymentKey: "dk",
deploymentValue: "some-hash",
expectedRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubectl.kubernetes.io/next-controller-id": "foo",
},
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "some-hash",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "some-hash",
},
},
},
},
},
},
{
rc: &api.ReplicationController{
Spec: api.ReplicationControllerSpec{
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "some-other-hash",
},
},
},
Selector: map[string]string{
"dk": "some-other-hash",
},
},
},
name: "foo",
deploymentKey: "dk",
deploymentValue: "some-hash",
expectedRc: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubectl.kubernetes.io/next-controller-id": "foo",
},
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"dk": "some-other-hash",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"dk": "some-other-hash",
},
},
},
},
},
},
}
for _, test := range tests {
buffer := &bytes.Buffer{}
fakeClient := testclient.NewSimpleFake(test.expectedRc)
rc, err := UpdateExistingReplicationController(fakeClient, test.rc, "default", test.name, test.deploymentKey, test.deploymentValue, buffer)
if !reflect.DeepEqual(rc, test.expectedRc) {
t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.expectedRc, rc)
}
if test.expectErr && err == nil {
t.Errorf("unexpected non-error")
}
if !test.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
}
}
func TestUpdateWithRetries(t *testing.T) {
codec := testapi.Default.Codec()
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: "rc",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"foo": "bar",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
}
// Test end to end updating of the rc with retries. Essentially make sure the update handler
// sees the right updates, failures in update/get are handled properly, and that the updated
// rc with new resource version is returned to the caller. Without any of these rollingupdate
// will fail cryptically.
newRc := *rc
newRc.ResourceVersion = "2"
newRc.Spec.Selector["baz"] = "foobar"
updates := []*http.Response{
{StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})},
{StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})},
{StatusCode: 200, Body: objBody(codec, &newRc)},
}
gets := []*http.Response{
{StatusCode: 500, Body: objBody(codec, &api.ReplicationController{})},
{StatusCode: 200, Body: objBody(codec, rc)},
}
fakeClient := &fake.RESTClient{
Codec: codec,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT":
update := updates[0]
updates = updates[1:]
// We should always get an update with a valid rc even when the get fails. The rc should always
// contain the update.
if c, ok := readOrDie(t, req, codec).(*api.ReplicationController); !ok || !reflect.DeepEqual(rc, c) {
t.Errorf("Unexpected update body, got %+v expected %+v", c, rc)
} else if sel, ok := c.Spec.Selector["baz"]; !ok || sel != "foobar" {
t.Errorf("Expected selector label update, got %+v", c.Spec.Selector)
} else {
delete(c.Spec.Selector, "baz")
}
return update, nil
case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET":
get := gets[0]
gets = gets[1:]
return get, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
clientConfig := &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}
client := client.NewOrDie(clientConfig)
client.Client = fakeClient.Client
if rc, err := updateWithRetries(
client.ReplicationControllers("default"), rc, func(c *api.ReplicationController) {
c.Spec.Selector["baz"] = "foobar"
}); err != nil {
t.Errorf("unexpected error: %v", err)
} else if sel, ok := rc.Spec.Selector["baz"]; !ok || sel != "foobar" || rc.ResourceVersion != "2" {
t.Errorf("Expected updated rc, got %+v", rc)
}
if len(updates) != 0 || len(gets) != 0 {
t.Errorf("Remaining updates %+v gets %+v", updates, gets)
}
}
func readOrDie(t *testing.T, req *http.Request, codec runtime.Codec) runtime.Object {
data, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Errorf("Error reading: %v", err)
t.FailNow()
}
obj, err := runtime.Decode(codec, data)
if err != nil {
t.Errorf("error decoding: %v", err)
t.FailNow()
}
return obj
}
func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser {
return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))
}
func TestAddDeploymentHash(t *testing.T) {
buf := &bytes.Buffer{}
codec := testapi.Default.Codec()
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: "rc"},
Spec: api.ReplicationControllerSpec{
Selector: map[string]string{
"foo": "bar",
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
}
podList := &api.PodList{
Items: []api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "foo"}},
{ObjectMeta: api.ObjectMeta{Name: "bar"}},
{ObjectMeta: api.ObjectMeta{Name: "baz"}},
},
}
seen := sets.String{}
updatedRc := false
fakeClient := &fake.RESTClient{
Codec: codec,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case p == testapi.Default.ResourcePath("pods", "default", "") && m == "GET":
if req.URL.RawQuery != "labelSelector=foo%3Dbar" {
t.Errorf("Unexpected query string: %s", req.URL.RawQuery)
}
return &http.Response{StatusCode: 200, Body: objBody(codec, podList)}, nil
case p == testapi.Default.ResourcePath("pods", "default", "foo") && m == "PUT":
seen.Insert("foo")
obj := readOrDie(t, req, codec)
podList.Items[0] = *(obj.(*api.Pod))
return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[0])}, nil
case p == testapi.Default.ResourcePath("pods", "default", "bar") && m == "PUT":
seen.Insert("bar")
obj := readOrDie(t, req, codec)
podList.Items[1] = *(obj.(*api.Pod))
return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[1])}, nil
case p == testapi.Default.ResourcePath("pods", "default", "baz") && m == "PUT":
seen.Insert("baz")
obj := readOrDie(t, req, codec)
podList.Items[2] = *(obj.(*api.Pod))
return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[2])}, nil
case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT":
updatedRc = true
return &http.Response{StatusCode: 200, Body: objBody(codec, rc)}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
clientConfig := &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}
client := client.NewOrDie(clientConfig)
client.Client = fakeClient.Client
if _, err := AddDeploymentKeyToReplicationController(rc, client, "dk", "hash", api.NamespaceDefault, buf); err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, pod := range podList.Items {
if !seen.Has(pod.Name) {
t.Errorf("Missing update for pod: %s", pod.Name)
}
}
if !updatedRc {
t.Errorf("Failed to update replication controller with new labels")
}
}
func TestRollingUpdater_readyPods(t *testing.T) {
mkpod := func(owner *api.ReplicationController, ready bool) *api.Pod {
labels := map[string]string{}
for k, v := range owner.Spec.Selector {
labels[k] = v
}
status := api.ConditionTrue
if !ready {
status = api.ConditionFalse
}
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod",
Labels: labels,
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Type: api.PodReady,
Status: status,
},
},
},
}
}
tests := []struct {
oldRc *api.ReplicationController
newRc *api.ReplicationController
// expectated old/new ready counts
oldReady int32
newReady int32
// pods owned by the rcs; indicate whether they're ready
oldPods []bool
newPods []bool
}{
{
oldRc: oldRc(4, 4),
newRc: newRc(4, 4),
oldReady: 4,
newReady: 2,
oldPods: []bool{
true,
true,
true,
true,
},
newPods: []bool{
true,
false,
true,
false,
},
},
{
oldRc: oldRc(4, 4),
newRc: newRc(4, 4),
oldReady: 0,
newReady: 1,
oldPods: []bool{
false,
},
newPods: []bool{
true,
},
},
{
oldRc: oldRc(4, 4),
newRc: newRc(4, 4),
oldReady: 1,
newReady: 0,
oldPods: []bool{
true,
},
newPods: []bool{
false,
},
},
}
for i, test := range tests {
t.Logf("evaluating test %d", i)
// Populate the fake client with pods associated with their owners.
pods := []runtime.Object{}
for _, ready := range test.oldPods {
pods = append(pods, mkpod(test.oldRc, ready))
}
for _, ready := range test.newPods {
pods = append(pods, mkpod(test.newRc, ready))
}
client := testclient.NewSimpleFake(pods...)
updater := &RollingUpdater{
ns: "default",
c: client,
}
oldReady, newReady, err := updater.readyPods(test.oldRc, test.newRc)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if e, a := test.oldReady, oldReady; e != a {
t.Errorf("expected old ready %d, got %d", e, a)
}
if e, a := test.newReady, newReady; e != a {
t.Errorf("expected new ready %d, got %d", e, a)
}
}
}
| apache-2.0 |
shankarh/geode | geode-core/src/main/java/org/apache/geode/internal/admin/StatAlertsManager.java | 13124 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.admin;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TimerTask;
import org.apache.logging.log4j.Logger;
import org.apache.geode.CancelException;
import org.apache.geode.StatisticDescriptor;
import org.apache.geode.Statistics;
import org.apache.geode.StatisticsType;
import org.apache.geode.admin.jmx.internal.StatAlertsAggregator;
import org.apache.geode.distributed.internal.DistributionManager;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import org.apache.geode.internal.SystemTimer;
import org.apache.geode.internal.SystemTimer.SystemTimerTask;
import org.apache.geode.internal.admin.remote.AlertsNotificationMessage;
import org.apache.geode.internal.admin.remote.UpdateAlertDefinitionMessage;
import org.apache.geode.internal.admin.statalerts.DummyStatisticInfoImpl;
import org.apache.geode.internal.admin.statalerts.StatisticInfo;
import org.apache.geode.internal.admin.statalerts.StatisticInfoImpl;
import org.apache.geode.internal.i18n.LocalizedStrings;
import org.apache.geode.internal.logging.LogService;
import org.apache.geode.internal.logging.log4j.LocalizedMessage;
/**
* The alert manager maintains the list of alert definitions (added by client e.g GFMon 2.0).
*
* It retrieved the value of statistic( defined in alert definition) and notify alert aggregator
* sitting on admin VM
*
* @see StatAlertDefinition
* @see StatAlert
*
*
* @since GemFire 5.7
*/
public class StatAlertsManager {
private static final Logger logger = LogService.getLogger();
/**
* Instance for current DM
*
* Guarded by StatAlertsManager.class
*/
private static StatAlertsManager alertManager;
/**
* Guarded by this
*/
private long refreshInterval;
/**
* Guarded by this.alertDefinitionsMap
*/
protected final HashMap alertDefinitionsMap = new HashMap();
/**
* Guarded by this
*/
private SystemTimer timer;
/**
* Guarded by this
*/
private boolean refreshAtFixedRate;
/**
* Provides life cycle support
*/
protected final DistributionManager dm;
private StatAlertsManager(DistributionManager dm) {
this.dm = dm;
logger.info(
LocalizedMessage.create(LocalizedStrings.StatAlertsManager_STATALERTSMANAGER_CREATED));
}
/**
* @return singleton instance of StatAlertsManager
*/
public synchronized static StatAlertsManager getInstance(DistributionManager dm) {
// As per current implementation set up request will be send only once ,
// when member joined to Admin distributed system
// we don't need to care about race condition
if (alertManager != null && alertManager.dm == dm) {
return alertManager;
}
if (alertManager != null) {
alertManager.close();
}
/*
* Throw DistributedSystemDisconnectedException if cancel operation is in progress
*/
dm.getCancelCriterion().checkCancelInProgress(null);
alertManager = new StatAlertsManager(dm);
return alertManager;
}
/**
* Nullifies the StatAlertsManager instance.
*/
private synchronized static void closeInstance() {
StatAlertsManager.alertManager = null;
}
/**
*
* Update the alert's definition map
*
* @param defns Alert definitions
* @param actionCode Action to be performed like add , remove or update alert's definition
*
* @see UpdateAlertDefinitionMessage
*/
public void updateAlertDefinition(StatAlertDefinition[] defns, int actionCode) {
if (logger.isDebugEnabled()) {
logger.debug("Entered StatAlertsManager.updateAlertDefinition *****");
}
synchronized (alertDefinitionsMap) {
if (actionCode == UpdateAlertDefinitionMessage.REMOVE_ALERT_DEFINITION) {
for (int i = 0; i < defns.length; i++) {
alertDefinitionsMap.remove(Integer.valueOf(defns[i].getId()));
if (logger.isDebugEnabled()) {
logger.debug("Removed StatAlertDefinition: {}", defns[i].getName());
}
}
} else {
StatAlertDefinition[] alertDefns = this.createMemberStatAlertDefinition(dm, defns);
StatAlertDefinition defn;
for (int i = 0; i < alertDefns.length; i++) {
defn = alertDefns[i];
alertDefinitionsMap.put(Integer.valueOf(defns[i].getId()), defn);
}
}
} // synchronized
if (logger.isDebugEnabled()) {
logger.debug("Exiting StatAlertsManager.updateAlertDefinition *****");
}
}
private synchronized void rescheduleTimer() {
// cancel the old timer. Although cancelled, old task might execute one last
// time
if (timer != null)
timer.cancel();
// Get the swarm. Currently rather UGLY.
InternalDistributedSystem system = dm.getSystem();
if (system == null || system.getDistributionManager() != dm) {
throw new org.apache.geode.distributed.DistributedSystemDisconnectedException(
"This manager has been cancelled");
}
// start and schedule new timer
timer = new SystemTimer(system /* swarm */, true);
EvaluateAlertDefnsTask task = new EvaluateAlertDefnsTask();
if (refreshAtFixedRate) {
timer.scheduleAtFixedRate(task, 0, refreshInterval);
} else {
timer.schedule(task, 0, refreshInterval);
}
}
/**
* Set refresh time interval also cancel the previous {@link TimerTask} and create new timer task
* based on ner refresh time interval
*
* @param interval Refresh time interval
*/
public synchronized void setRefreshTimeInterval(long interval) {
refreshInterval = interval;
rescheduleTimer();
}
/**
*
* @return time interval alert generation
*/
public synchronized long getRefreshTimeInterval() {
return refreshInterval;
}
/**
* @return true if refresh for timer has to be fixed rate see scheduleAtFixedRate method of
* {@link TimerTask}
*/
public synchronized boolean isRefreshAtFixedRate() {
return refreshAtFixedRate;
}
/**
* set true if refresh for timer has to be fixed rate see scheduleAtFixedRate method of
* {@link TimerTask}
*
* TODO never called
*
* @param refreshAtFixedRate
*/
public synchronized void setRefreshAtFixedRate(boolean refreshAtFixedRate) {
this.refreshAtFixedRate = refreshAtFixedRate;
rescheduleTimer();
}
/**
* Query all the statistic defined by alert definition and notify alerts aggregator if at least
* one statistic value crosses the threshold defined in alert definition
*
*/
protected StatAlert[] getAlerts() {
Set alerts = new HashSet();
synchronized (alertDefinitionsMap) {
Set keyset = alertDefinitionsMap.keySet();
Iterator iter = keyset.iterator();
StatAlert alert;
Date now = new Date();
while (iter.hasNext()) {
Integer key = (Integer) iter.next();
StatAlertDefinition defn = (StatAlertDefinition) alertDefinitionsMap.get(key);
alert = defn.evaluateAndAlert();
if (alert != null) {
alert.setTime(now);
alerts.add(alert);
if (logger.isDebugEnabled()) {
logger.debug("getAlerts: found alert {}", alert);
}
}
} // while
} // synchronized
return (StatAlert[]) alerts.toArray(new StatAlert[alerts.size()]);
}
/**
* Convert {@link StatAlertDefinition }(Created by client like GFMon2.0) with
* {@link DummyStatisticInfoImpl} to StatAlertDefinition with {@link StatisticInfoImpl}
*/
private StatAlertDefinition[] createMemberStatAlertDefinition(DistributionManager dm,
StatAlertDefinition[] defns) {
dm.getCancelCriterion().checkCancelInProgress(null);
Statistics[] statistics;
StatisticsType type;
StatisticDescriptor desc;
String textId;
boolean skipDefinition = false;
List result = new ArrayList();
for (int i = 0; i < defns.length; i++) {
skipDefinition = false;
StatAlertDefinition defn = defns[i];
StatisticInfo[] statInfos = defn.getStatisticInfo();
for (int ii = 0; ii < statInfos.length && !skipDefinition; ii++) {
textId = statInfos[ii].getStatisticsTextId();
// TODO If none by TextID, use StatType and getAll.
statistics = dm.getSystem().findStatisticsByTextId(textId);
if (statistics.length == 0) {
logger.error(LocalizedMessage.create(
LocalizedStrings.StatAlertsManager_STATALERTSMANAGER_CREATEMEMBERSTATALERTDEFINITION_STATISTICS_WITH_GIVEN_TEXTID_0_NOT_FOUND,
textId));
skipDefinition = true;
// break;
continue; // To print all errors
}
type = statistics[0].getType();
desc = type.nameToDescriptor(statInfos[ii].getStatisticName());
// Replace the actual StatInfo object
statInfos[ii] = new StatisticInfoImpl(statistics[0], desc);
if (logger.isDebugEnabled()) {
logger.debug("StatAlertsManager.createMemberStatAlertDefinition: created statInfo {}",
statInfos[ii]);
}
} // for
if (!skipDefinition) {
defn.setStatisticInfo(statInfos);
result.add(defn);
if (logger.isDebugEnabled()) {
logger.debug("StatAlertsManager.createMemberStatAlertDefinition :: {}",
defns[i].getStringRepresentation());
}
} else {
if (logger.isDebugEnabled()) {
logger.debug(
"StatAlertsManager.createMemberStatAlertDefinition :: StatAlertDefinition {} is excluded",
defn.getName());
}
}
} // for
return (StatAlertDefinition[]) result.toArray(new StatAlertDefinition[result.size()]);
}
/**
* Shut down this instance
*/
protected synchronized void close() {
// nullify the manager instance first
closeInstance();
// cancel the old timer. Although canceled, old task might execute one last
// time
if (timer != null) {
timer.cancel();
}
timer = null;
}
/**
* Timer task to send all the alerts raised to {@link StatAlertsAggregator}
*
*/
class EvaluateAlertDefnsTask extends SystemTimerTask {
/**
* Collect all the alerts raised and send it to {@link StatAlertsAggregator}
*/
@Override
public void run2() {
final boolean isDebugEnabled = logger.isDebugEnabled();
synchronized (StatAlertsManager.this) {
if (dm.getCancelCriterion().isCancelInProgress()) {
return;
}
// start alert notification are supposed to send to all the
// admin agents exists in the system.
// For the DS without agent, alert manager should not create
// any alert notifications
Set adminMemberSet = dm.getAdminMemberSet();
if (adminMemberSet == null || adminMemberSet.isEmpty())
return;
if (isDebugEnabled) {
logger.debug("EvaluateAlertDefnsTask: starting");
}
try {
StatAlert[] alerts = getAlerts();
if (alerts.length == 0) {
if (isDebugEnabled) {
logger.debug("EvaluateAlertsDefnsTask: no alerts");
}
return;
}
AlertsNotificationMessage request = new AlertsNotificationMessage();
request.setAlerts(alerts);
if (isDebugEnabled) {
Iterator iterator = adminMemberSet.iterator();
while (iterator.hasNext()) {
logger.debug("EvaluateAlertDefnsTask: sending {} alerts to {}", alerts.length,
iterator.next());
}
}
request.setRecipients(adminMemberSet);
dm.putOutgoing(request);
} catch (CancelException e) {
logger.debug("EvaluateAlertDefnsTask: system closed: {}", e.getMessage(), e);
close();
} catch (Exception e) {
logger.error(
LocalizedMessage.create(
LocalizedStrings.StatAlertsManager_EVALUATEALERTDEFNSTASK_FAILED_WITH_AN_EXCEPTION),
e);
close();
}
if (isDebugEnabled) {
logger.debug("EvaluateAlertDefnsTask: done ");
}
}
} // run
} // EvaluateAlertDefnsTask
}
| apache-2.0 |
QuantConnect/Lean | Algorithm.Python/RollingWindowAlgorithm.py | 3197 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Using rolling windows for efficient storage of historical data; which automatically clears after a period of time.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="history and warm up" />
### <meta name="tag" content="history" />
### <meta name="tag" content="warm up" />
### <meta name="tag" content="indicators" />
### <meta name="tag" content="rolling windows" />
class RollingWindowAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,1) #Set Start Date
self.SetEndDate(2013,11,1) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY", Resolution.Daily)
# Creates a Rolling Window indicator to keep the 2 TradeBar
self.window = RollingWindow[TradeBar](2) # For other security types, use QuoteBar
# Creates an indicator and adds to a rolling window when it is updated
self.sma = self.SMA("SPY", 5)
self.sma.Updated += self.SmaUpdated
self.smaWin = RollingWindow[IndicatorDataPoint](5)
def SmaUpdated(self, sender, updated):
'''Adds updated values to rolling window'''
self.smaWin.Add(updated)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
# Add SPY TradeBar in rollling window
self.window.Add(data["SPY"])
# Wait for windows to be ready.
if not (self.window.IsReady and self.smaWin.IsReady): return
currBar = self.window[0] # Current bar had index zero.
pastBar = self.window[1] # Past bar has index one.
self.Log("Price: {0} -> {1} ... {2} -> {3}".format(pastBar.Time, pastBar.Close, currBar.Time, currBar.Close))
currSma = self.smaWin[0] # Current SMA had index zero.
pastSma = self.smaWin[self.smaWin.Count-1] # Oldest SMA has index of window count minus 1.
self.Log("SMA: {0} -> {1} ... {2} -> {3}".format(pastSma.Time, pastSma.Value, currSma.Time, currSma.Value))
if not self.Portfolio.Invested and currSma.Value > pastSma.Value:
self.SetHoldings("SPY", 1)
| apache-2.0 |
rlugojr/incubator-trafodion | core/sqf/src/stfs/stfsd/stfsd_close.cpp | 3157 | ///////////////////////////////////////////////////////////////////////////////
//
/// \file stfsd_close.cpp
/// \brief
///
/// This file contains the implementation of the STFSd_createFragment()
/// function.
//
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
///////////////////////////////////////////////////////////////////////////////
#include <unistd.h>
#include <stdlib.h>
#include <stdarg.h>
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <iostream>
#include "stfs/stfslib.h"
#include "stfs_metadata.h"
#include "stfs_defs.h"
#include "stfs_util.h"
#include "stfs_message.h"
#include "stfsd.h"
#include "stfsd_close.h"
namespace STFS {
///////////////////////////////////////////////////////////////////////////////
///
// STFSd_close
///
/// \brief Processes a file close request given the open identifier
///
/// \param STFS_OpenIdentifier *pp_OpenId
///
///////////////////////////////////////////////////////////////////////////////
int
STFSd_close(STFS_OpenIdentifier *pp_OpenId)
{
const char *WHERE = "STFSd_close";
STFS_ScopeTrace lv_st(WHERE);
if (!pp_OpenId) {
return -1;
}
STFS_ExternalFileOpenerContainer *lp_EfoContainer = STFS_ExternalFileOpenerContainer::GetInstance();
if (!lp_EfoContainer) {
TRACE_PRINTF2(1,"%s\n", "Null EFO Container");
return -1;
}
STFS_ExternalFileOpenerInfo *lp_Efoi = lp_EfoContainer->Get(pp_OpenId);
if (!lp_Efoi) {
TRACE_PRINTF3(1,
"Open Id: %d,%ld not found in the EFO Container\n",
pp_OpenId->sqOwningDaemonNodeId,
pp_OpenId->openIdentifier
);
return -1;
}
STFS_ExternalFileMetadata *lp_Efm = lp_Efoi->efm_;
if (!lp_Efm) {
TRACE_PRINTF1(1,"Null EFM Found in the Efoi Entry\n");
return -1;
}
TraceOpeners(lp_EfoContainer,
lp_Efm);
int lv_Ret = lp_Efm->Close(true);
if (lv_Ret < 0) {
return lv_Ret;
}
lv_Ret = lp_EfoContainer->Delete(pp_OpenId);
if (lv_Ret < 0) {
return lv_Ret;
}
TraceOpeners(lp_EfoContainer,
lp_Efm);
lv_Ret = STFS_ExternalFileMetadata::DeleteFromContainer(lp_Efm);
if (lv_Ret < 0) {
return lv_Ret;
}
return lv_Ret;
}
} //namespace STFS
| apache-2.0 |
osrf/opensplice | src/api/dcps/isocpp/include/spec/dds/core/policy/TQosPolicyCount.hpp | 1905 | #ifndef OMG_TDDS_CORE_POLICY_QOS_POLICY_COUNT_HPP_
#define OMG_TDDS_CORE_POLICY_QOS_POLICY_COUNT_HPP_
/* Copyright 2010, Object Management Group, Inc.
* Copyright 2010, PrismTech, Corp.
* Copyright 2010, Real-Time Innovations, Inc.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <dds/core/Value.hpp>
namespace dds
{
namespace core
{
namespace policy
{
/**
* The QosPolicyCount object shows, for a QosPolicy, the total number of
* times that the concerned DataWriter discovered a DataReader for the
* same Topic and a requested DataReaderQos that is incompatible with
* the one offered by the DataWriter.
*/
template <typename D>
class TQosPolicyCount : public dds::core::Value<D>
{
public:
/**
* Creates a QosPolicyCount instance
*
* @param policy_id the policy_id
* @param count the count
*/
TQosPolicyCount(QosPolicyId policy_id, int32_t count);
/**
* Copies a QosPolicyCount instance
*
* @param other the QosPolicyCount instance to copy
*/
TQosPolicyCount(const TQosPolicyCount& other);
public:
/**
* Gets the policy_id
*
* @return the policy_id
*/
QosPolicyId policy_id() const;
/**
* Gets the count
*
* @return the count
*/
int32_t count() const;
};
}
}
}
#endif // !defined(OMG_TDDS_CORE_POLICY_QOS_POLICY_COUNT_HPP_)
| apache-2.0 |
awesome-niu/android-floatinglabel-widgets | library/src/main/java/com/marvinlabs/widget/floatinglabel/instantpicker/Instant.java | 255 | package com.marvinlabs.widget.floatinglabel.instantpicker;
import android.os.Parcelable;
/**
* Marker interface for instance (date or time)
* Created by Vincent Mimoun-Prat @ MarvinLabs, 01/09/2014.
*/
public interface Instant extends Parcelable {
}
| apache-2.0 |
arcadoss/js-invulnerable | src/com/google/javascript/jscomp/RenameLabels.java | 8086 | /*
* Copyright 2008 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.javascript.jscomp.NodeTraversal.ScopedCallback;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.Token;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
/**
* RenameLabels renames all the labels so that they have short names, to reduce
* code size and also to obfuscate the code.
*
* Label names have a unique namespace, so variable or function names clashes
* are not a concern, but keywords clashes are.
*
* Additionally, labels names are only within the statements include in the
* label and do not cross function boundaries. This means that it is possible to
* create one label name that is used for labels at any given depth of label
* nesting. Typically, the name "a" will be used for all top level labels, "b"
* for the next nested label, and so on. For example:
*
* <code>
* function bar() {
* a: {
* b: {
* foo();
* }
* }
*
* a: {
* b: break a;
* }
* }
* </code>
*
* The general processes is as follows: process() is the entry point for the
* CompilerPass, and from there a standard "ScopedCallback" traversal is done,
* where "shouldTraverse" is called when descending the tree, and the "visit" is
* called in a depth first manner. The name for the label is selected during the
* decent in "shouldTraverse", and the references to the label name are renamed
* as they are encountered during the "visit". This means that if the label is
* unreferenced, it is known when the label node is visited, and, if so, can be
* safely removed.
*
* @author johnlenz@google.com (John Lenz)
*/
final class RenameLabels implements CompilerPass {
private final AbstractCompiler compiler;
RenameLabels(AbstractCompiler compiler) {
this.compiler = compiler;
}
/**
* Iterate through the nodes, renaming all the labels.
*/
class ProcessLabels implements ScopedCallback {
ProcessLabels() {
// Create a entry for global scope.
namespaceStack.push(new LabelNamespace());
}
// A stack of labels namespaces. Labels in an outer scope aren't part of an
// inner scope, so a new namespace is created each time a scope is entered.
final Deque<LabelNamespace> namespaceStack = Lists.newLinkedList();
// NameGenerator is used to create safe label names.
final NameGenerator nameGenerator =
new NameGenerator(new HashSet<String>(), "", null);
// The list of generated names. Typically, the first name will be "a",
// the second "b", etc.
final ArrayList<String> names = new ArrayList<String>();
@Override
public void enterScope(NodeTraversal nodeTraversal) {
// Start a new namespace for label names.
namespaceStack.push(new LabelNamespace());
}
@Override
public void exitScope(NodeTraversal nodeTraversal) {
namespaceStack.pop();
}
/**
* shouldTraverse is call when descending into the Node tree, so it is used
* here to build the context for label renames.
*
* {@inheritDoc}
*/
public boolean shouldTraverse(NodeTraversal nodeTraversal, Node node,
Node parent) {
if (node.getType() == Token.LABEL) {
// Determine the new name for this label.
LabelNamespace current = namespaceStack.peek();
int currentDepth = current.renameMap.size() + 1;
String name = node.getFirstChild().getString();
// Store the context for this label name.
LabelInfo li = new LabelInfo(currentDepth);
Preconditions.checkState(!current.renameMap.containsKey(name));
current.renameMap.put(name, li);
// Create a new name, if needed, for this depth.
if (names.size() < currentDepth) {
names.add(nameGenerator.generateNextName());
}
String newName = getNameForId(currentDepth);
compiler.addToDebugLog("label renamed: " + name + " => " + newName);
}
return true;
}
/**
* Delegate the actual processing of the node to visitLabel and
* visitBreakOrContinue.
*
* {@inheritDoc}
*/
public void visit(NodeTraversal nodeTraversal, Node node, Node parent) {
switch (node.getType()) {
case Token.LABEL:
visitLabel(node, parent);
break;
case Token.BREAK:
case Token.CONTINUE:
visitBreakOrContinue(node);
break;
}
}
/**
* Rename label references in breaks and continues.
* @param node The break or continue node.
*/
private void visitBreakOrContinue(Node node) {
Node nameNode = node.getFirstChild();
if (nameNode != null) {
// This is a named break or continue;
String name = nameNode.getString();
Preconditions.checkState(name.length() != 0);
LabelInfo li = getLabelInfo(name);
if (li != null) {
String newName = getNameForId(li.id);
// Mark the label as referenced so it isn't removed.
li.referenced = true;
if (!name.equals(newName)) {
// Give it the short name.
nameNode.setString(newName);
compiler.reportCodeChange();
}
}
}
}
/**
* Rename or remove labels.
* @param node The label node.
* @param parent The parent of the label node.
*/
private void visitLabel(Node node, Node parent) {
Node nameNode = node.getFirstChild();
Preconditions.checkState(nameNode != null);
String name = nameNode.getString();
LabelInfo li = getLabelInfo(name);
// This is a label...
if (li.referenced) {
String newName = getNameForId(li.id);
if (!name.equals(newName)) {
// ... and it is used, give it the short name.
nameNode.setString(newName);
compiler.reportCodeChange();
}
} else {
// ... and it is not referenced, just remove it.
Node newChild = node.getLastChild();
node.removeChild(newChild);
parent.replaceChild(node, newChild);
if (newChild.getType() == Token.BLOCK) {
NodeUtil.tryMergeBlock(newChild);
}
compiler.reportCodeChange();
}
// Remove the label from the current stack of labels.
namespaceStack.peek().renameMap.remove(name);
}
/**
* @param id The id, which is the depth of the label in the current context,
* for which to get a short name.
* @return The short name of the identified label.
*/
String getNameForId(int id) {
return names.get(id - 1);
}
/**
* @param name The name to retrieve information about.
* @return The structure representing the name in the current context.
*/
LabelInfo getLabelInfo(String name) {
return namespaceStack.peek().renameMap.get(name);
}
}
@Override
public void process(Node externs, Node root) {
// Do variable reference counting.
NodeTraversal.traverse(compiler, root, new ProcessLabels());
}
private static class LabelInfo {
boolean referenced = false;
final int id;
LabelInfo(int id) {
this.id = id;
}
}
private static class LabelNamespace {
final Map<String, LabelInfo> renameMap = new HashMap<String, LabelInfo>();
}
}
| apache-2.0 |
oalles/camel | components/camel-tagsoup/src/main/java/org/apache/camel/dataformat/tagsoup/springboot/TidyMarkupDataFormatAutoConfiguration.java | 2537 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.dataformat.tagsoup.springboot;
import java.util.HashMap;
import java.util.Map;
import org.apache.camel.CamelContext;
import org.apache.camel.CamelContextAware;
import org.apache.camel.dataformat.tagsoup.TidyMarkupDataFormat;
import org.apache.camel.util.IntrospectionSupport;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Configuration
@EnableConfigurationProperties(TidyMarkupDataFormatConfiguration.class)
public class TidyMarkupDataFormatAutoConfiguration {
@Bean(name = "tidyMarkup-dataformat")
@ConditionalOnClass(CamelContext.class)
@ConditionalOnMissingBean(TidyMarkupDataFormat.class)
public TidyMarkupDataFormat configureTidyMarkupDataFormat(
CamelContext camelContext,
TidyMarkupDataFormatConfiguration configuration) throws Exception {
TidyMarkupDataFormat dataformat = new TidyMarkupDataFormat();
if (dataformat instanceof CamelContextAware) {
((CamelContextAware) dataformat).setCamelContext(camelContext);
}
Map<String, Object> parameters = new HashMap<>();
IntrospectionSupport.getProperties(configuration, parameters, null,
false);
IntrospectionSupport.setProperties(camelContext,
camelContext.getTypeConverter(), dataformat, parameters);
return dataformat;
}
} | apache-2.0 |
yugangw-msft/azure-sdk-for-net | sdk/synapse/Azure.Analytics.Synapse.Artifacts/src/Generated/Models/MongoDbCollectionDataset.cs | 3279 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// <auto-generated/>
#nullable disable
using System;
using System.Collections.Generic;
namespace Azure.Analytics.Synapse.Artifacts.Models
{
/// <summary> The MongoDB database dataset. </summary>
public partial class MongoDbCollectionDataset : Dataset
{
/// <summary> Initializes a new instance of MongoDbCollectionDataset. </summary>
/// <param name="linkedServiceName"> Linked service reference. </param>
/// <param name="collectionName"> The table name of the MongoDB database. Type: string (or Expression with resultType string). </param>
/// <exception cref="ArgumentNullException"> <paramref name="linkedServiceName"/> or <paramref name="collectionName"/> is null. </exception>
public MongoDbCollectionDataset(LinkedServiceReference linkedServiceName, object collectionName) : base(linkedServiceName)
{
if (linkedServiceName == null)
{
throw new ArgumentNullException(nameof(linkedServiceName));
}
if (collectionName == null)
{
throw new ArgumentNullException(nameof(collectionName));
}
CollectionName = collectionName;
Type = "MongoDbCollection";
}
/// <summary> Initializes a new instance of MongoDbCollectionDataset. </summary>
/// <param name="type"> Type of dataset. </param>
/// <param name="description"> Dataset description. </param>
/// <param name="structure"> Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement. </param>
/// <param name="schema"> Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement. </param>
/// <param name="linkedServiceName"> Linked service reference. </param>
/// <param name="parameters"> Parameters for dataset. </param>
/// <param name="annotations"> List of tags that can be used for describing the Dataset. </param>
/// <param name="folder"> The folder that this Dataset is in. If not specified, Dataset will appear at the root level. </param>
/// <param name="additionalProperties"> . </param>
/// <param name="collectionName"> The table name of the MongoDB database. Type: string (or Expression with resultType string). </param>
internal MongoDbCollectionDataset(string type, string description, object structure, object schema, LinkedServiceReference linkedServiceName, IDictionary<string, ParameterSpecification> parameters, IList<object> annotations, DatasetFolder folder, IDictionary<string, object> additionalProperties, object collectionName) : base(type, description, structure, schema, linkedServiceName, parameters, annotations, folder, additionalProperties)
{
CollectionName = collectionName;
Type = type ?? "MongoDbCollection";
}
/// <summary> The table name of the MongoDB database. Type: string (or Expression with resultType string). </summary>
public object CollectionName { get; set; }
}
}
| apache-2.0 |
jwren/intellij-community | python/testData/inspections/PyCompatibilityInspection/binaryExpression.py | 263 | print(<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 do not support <>, use != instead">a <> b</warning>)
if <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11 do not support <>, use != instead">a <> 2</warning>:
var = a | apache-2.0 |
bgoodin/silver-stripes | stripes/src/main/java/net/sourceforge/stripes/config/Configuration.java | 9400 | /* Copyright 2005-2006 Tim Fennell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.stripes.config;
import net.sourceforge.stripes.controller.ActionBeanPropertyBinder;
import net.sourceforge.stripes.controller.ActionResolver;
import net.sourceforge.stripes.controller.ActionBeanContextFactory;
import net.sourceforge.stripes.controller.ObjectFactory;
import net.sourceforge.stripes.localization.LocalizationBundleFactory;
import net.sourceforge.stripes.localization.LocalePicker;
import net.sourceforge.stripes.validation.TypeConverterFactory;
import net.sourceforge.stripes.validation.ValidationMetadataProvider;
import net.sourceforge.stripes.tag.TagErrorRendererFactory;
import net.sourceforge.stripes.tag.PopulationStrategy;
import net.sourceforge.stripes.format.FormatterFactory;
import net.sourceforge.stripes.controller.Interceptor;
import net.sourceforge.stripes.controller.LifecycleStage;
import net.sourceforge.stripes.controller.multipart.MultipartWrapperFactory;
import net.sourceforge.stripes.exception.ExceptionHandler;
import javax.servlet.ServletContext;
import java.util.Collection;
/**
* <p>Type safe interface for accessing configuration information used to configure Stripes. All
* Configuration implementations are handed a reference to the BootstrapPropertyResolver to
* enable them to find initial values and fully initialize themselves. Through the
* BootstrapPropertyResolver implementations also get access to the ServletConfig of the
* DispatcherServlet which can be used for locating configuration values if desired.</p>
*
* <p>Implementations of Configuration should fail fast. At initialization time they should
* detect as many failures as possible and raise an exception. Since exceptions in Configuration
* are considered fatal there are no exception specifications and implementations are expected to
* throw runtime exceptions with plenty of details about the failure and its suspected cause(s).</p>
*
* @author Tim Fennell
*/
public interface Configuration {
/**
* Supplies the Configuration with a BootstrapPropertyResolver. This method is guaranteed to
* be invoked prior to the init method.
*
* @param resolver a BootStrapPropertyResolver which can be used to find any values required
* by the Configuration in order to initialize
*/
void setBootstrapPropertyResolver(BootstrapPropertyResolver resolver);
/**
* Called by the DispatcherServlet to initialize the Configuration. Any operations which may
* fail and cause the Configuration to be inaccessible should be performed here (e.g.
* opening a configuration file and reading the contents).
*/
void init();
/**
* Implementations should implement this method to simply return a reference to the
* BootstrapPropertyResolver passed to the Configuration at initialization time.
*
* @return BootstrapPropertyResolver the instance passed to the init() method
*/
BootstrapPropertyResolver getBootstrapPropertyResolver();
/**
* Retrieves the ServletContext for the context within which the Stripes application
* is executing.
*
* @return the ServletContext in which the application is running
*/
ServletContext getServletContext();
/** Enable or disable debug mode. */
void setDebugMode(boolean debugMode);
/** Returns true if the Stripes application is running in debug mode. */
boolean isDebugMode();
/**
* Returns an instance of {@link ObjectFactory} that is used throughout Stripes to instantiate
* classes.
*
* @return an instance of {@link ObjectFactory}.
*/
ObjectFactory getObjectFactory();
/**
* Returns an instance of ActionResolver that will be used by Stripes to lookup and resolve
* ActionBeans. The instance should be cached by the Configuration since multiple entities
* in the system may access the ActionResolver throughout the lifetime of the application.
*
* @return the Class representing the configured ActionResolver
*/
ActionResolver getActionResolver();
/**
* Returns an instance of ActionBeanPropertyBinder that is responsible for binding all
* properties to all ActionBeans at runtime. The instance should be cached by the Configuration
* since multiple entities in the system may access the ActionBeanPropertyBinder throughout the
* lifetime of the application.
*
* @return ActionBeanPropertyBinder the property binder to be used by Stripes
*/
ActionBeanPropertyBinder getActionBeanPropertyBinder();
/**
* Returns an instance of TypeConverterFactory that is responsible for providing lookups and
* instances of TypeConverters for the validation system. The instance should be cached by the
* Configuration since multiple entities in the system may access the TypeConverterFactory
* throughout the lifetime of the application.
*
* @return TypeConverterFactory an instance of a TypeConverterFactory implementation
*/
TypeConverterFactory getTypeConverterFactory();
/**
* Returns an instance of LocalizationBundleFactory that is responsible for looking up
* resource bundles for the varying localization needs of a web application. The instance should
* be cached by the Configuration since multiple entities in the system may access the
* LocalizationBundleFactory throughout the lifetime of the application.
*
* @return LocalizationBundleFactory an instance of a LocalizationBundleFactory implementation
*/
LocalizationBundleFactory getLocalizationBundleFactory();
/**
* Returns an instance of LocalePicker that is responsible for choosing the Locale for
* each request that enters the system.
*
* @return LocalePicker an instance of a LocalePicker implementation
*/
LocalePicker getLocalePicker();
/**
* Returns an instance of FormatterFactory that is responsible for creating Formatter objects
* for converting rich types into Strings for display on pages.
*
* @return LocalePicker an instance of a LocalePicker implementation
*/
FormatterFactory getFormatterFactory();
/**
* Returns an instance of a tag error renderer factory for building custom error renderers
* for form input tags that have field errors.
*
* @return TagErrorRendererFactory an instance of TagErrorRendererFactory
*/
TagErrorRendererFactory getTagErrorRendererFactory();
/**
* Returns an instance of a PopulationStrategy that determines from where a tag's value
* should be repopulated.
*
* @return PopulationStrategy an instance of PopulationStrategy
*/
PopulationStrategy getPopulationStrategy();
/**
* Returns an instance of an action bean context factory which will used throughout Stripes
* to manufacture ActionBeanContext objects. This allows projects to extend ActionBeanContext
* and provide additional type safe methods for accessing contextual information cleanly.
*
* @return ActionBeanContextFactory an instance of ActionBeanContextFactory
*/
ActionBeanContextFactory getActionBeanContextFactory();
/**
* Fetches the interceptors that should be executed around the lifecycle stage applied.
* Must return a non-null collection, but the collection may be empty. The Interceptors
* are invoked around the code which executes the given lifecycle function (e.g.
* ActionBeanResolution), and as a result can execute code both before and after it.
*
* @return Collection<Interceptor> an ordered collection of interceptors to be executed
* around the given lifecycle stage.
*/
Collection<Interceptor> getInterceptors(LifecycleStage stage);
/**
* Returns an instance of ExceptionHandler that can be used by Stripes to handle any
* exceptions that arise as the result of processing a request.
*
* @return ExceptionHandler an instance of ExceptionHandler
*/
ExceptionHandler getExceptionHandler();
/**
* Returns an instance of MultipartWrapperFactory that can be used by Stripes to construct
* MultipartWrapper instances for dealing with multipart requests (those containing file
* uploads).
*
* @return MultipartWrapperFactory an instance of the wrapper factory
*/
MultipartWrapperFactory getMultipartWrapperFactory();
/**
* Returns an instance of {@link ValidationMetadataProvider} that can be used by Stripes to
* determine what validations need to be applied during
* {@link LifecycleStage#BindingAndValidation}.
*
* @return an instance of {@link ValidationMetadataProvider}
*/
ValidationMetadataProvider getValidationMetadataProvider();
}
| apache-2.0 |
Unicon/openregistry | openregistry-repository-jpa-impl/src/main/java/org/openregistry/core/factory/jpa/JpaPersonFactory.java | 1530 | /**
* Licensed to Jasig under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Jasig licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.openregistry.core.factory.jpa;
import org.openregistry.core.domain.jpa.JpaPersonImpl;
import org.openregistry.core.domain.Person;
import org.springframework.beans.factory.ObjectFactory;
import org.springframework.beans.BeansException;
import javax.inject.Named;
/**
* Autowired component that will construct a new JpaPersonImpl to be fed to our other layers. There should only be one
* of these configured at a given time.
*
* @author Scott Battaglia
* @version $Revision$ $Date$
* @since 1.0.0
*/
@Named("personFactory")
public final class JpaPersonFactory implements ObjectFactory<Person> {
public Person getObject() throws BeansException {
return new JpaPersonImpl();
}
}
| apache-2.0 |
dump247/aws-sdk-java | aws-java-sdk-core/src/main/java/com/amazonaws/Response.java | 1198 | /*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws;
import com.amazonaws.http.HttpResponse;
/**
* Response wrapper to provide access to not only the original AWS response
* but also the associated http response.
*
* @param <T> the underlying AWS response type.
*/
public final class Response<T> {
private final T response;
private final HttpResponse httpResponse;
public Response(T response, HttpResponse httpResponse) {
this.response = response;
this.httpResponse = httpResponse;
}
public T getAwsResponse() { return response; }
public HttpResponse getHttpResponse() { return httpResponse; }
}
| apache-2.0 |
jxauchengchao/async-http-client | api/src/main/java/org/asynchttpclient/RequestBuilder.java | 1703 | /*
* Copyright 2010 Ning, Inc.
*
* Ning licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.asynchttpclient;
import org.asynchttpclient.util.UriEncoder;
/**
* Builder for a {@link Request}.
* Warning: mutable and not thread-safe! Beware that it holds a reference on the Request instance it builds,
* so modifying the builder will modify the request even after it has been built.
*/
public class RequestBuilder extends RequestBuilderBase<RequestBuilder> {
public RequestBuilder() {
super(RequestBuilder.class, "GET", false);
}
public RequestBuilder(String method) {
super(RequestBuilder.class, method, false);
}
public RequestBuilder(String method, boolean disableUrlEncoding) {
super(RequestBuilder.class, method, disableUrlEncoding);
}
public RequestBuilder(String method, UriEncoder uriEncoder) {
super(RequestBuilder.class, method, uriEncoder);
}
public RequestBuilder(Request prototype) {
super(RequestBuilder.class, prototype);
}
public RequestBuilder(Request prototype, UriEncoder uriEncoder) {
super(RequestBuilder.class, prototype, uriEncoder);
}
}
| apache-2.0 |
bacaldwell/ironic | ironic/tests/unit/drivers/modules/msftocs/test_common.py | 5369 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for MSFT OCS common functions
"""
import mock
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.msftocs import common as msftocs_common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_msftocs_info()
class MSFTOCSCommonTestCase(db_base.DbTestCase):
def setUp(self):
super(MSFTOCSCommonTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_msftocs')
self.info = INFO_DICT
self.node = obj_utils.create_test_node(self.context,
driver='fake_msftocs',
driver_info=self.info)
def test_get_client_info(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_info = task.node.driver_info
(client, blade_id) = msftocs_common.get_client_info(driver_info)
self.assertEqual(driver_info['msftocs_base_url'], client._base_url)
self.assertEqual(driver_info['msftocs_username'], client._username)
self.assertEqual(driver_info['msftocs_password'], client._password)
self.assertEqual(driver_info['msftocs_blade_id'], blade_id)
@mock.patch.object(msftocs_common, '_is_valid_url', autospec=True)
def test_parse_driver_info(self, mock_is_valid_url):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
msftocs_common.parse_driver_info(task.node)
mock_is_valid_url.assert_called_once_with(
task.node.driver_info['msftocs_base_url'])
def test_parse_driver_info_fail_missing_param(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
del task.node.driver_info['msftocs_base_url']
self.assertRaises(exception.MissingParameterValue,
msftocs_common.parse_driver_info,
task.node)
def test_parse_driver_info_fail_bad_url(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_info['msftocs_base_url'] = "bad-url"
self.assertRaises(exception.InvalidParameterValue,
msftocs_common.parse_driver_info,
task.node)
def test_parse_driver_info_fail_bad_blade_id_type(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_info['msftocs_blade_id'] = "bad-blade-id"
self.assertRaises(exception.InvalidParameterValue,
msftocs_common.parse_driver_info,
task.node)
def test_parse_driver_info_fail_bad_blade_id_value(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_info['msftocs_blade_id'] = 0
self.assertRaises(exception.InvalidParameterValue,
msftocs_common.parse_driver_info,
task.node)
def test__is_valid_url(self):
self.assertIs(True, msftocs_common._is_valid_url("http://fake.com"))
self.assertIs(
True, msftocs_common._is_valid_url("http://www.fake.com"))
self.assertIs(True, msftocs_common._is_valid_url("http://FAKE.com"))
self.assertIs(True, msftocs_common._is_valid_url("http://fake"))
self.assertIs(
True, msftocs_common._is_valid_url("http://fake.com/blah"))
self.assertIs(True, msftocs_common._is_valid_url("http://localhost"))
self.assertIs(True, msftocs_common._is_valid_url("https://fake.com"))
self.assertIs(True, msftocs_common._is_valid_url("http://10.0.0.1"))
self.assertIs(False, msftocs_common._is_valid_url("bad-url"))
self.assertIs(False, msftocs_common._is_valid_url("http://.bad-url"))
self.assertIs(False, msftocs_common._is_valid_url("http://bad-url$"))
self.assertIs(False, msftocs_common._is_valid_url("http://$bad-url"))
self.assertIs(False, msftocs_common._is_valid_url("http://bad$url"))
self.assertIs(False, msftocs_common._is_valid_url(None))
self.assertIs(False, msftocs_common._is_valid_url(0))
| apache-2.0 |
GuGuss/kafka | core/src/main/scala/kafka/tools/ProducerPerformance.scala | 11698 | /*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import kafka.utils.Utils
import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong
import kafka.producer._
import async.DefaultEventHandler
import kafka.serializer.StringEncoder
import org.apache.log4j.Logger
import joptsimple.{OptionSet, OptionParser}
import java.util.{Random, Properties}
import kafka.message.{CompressionCodec, Message, ByteBufferMessageSet}
/**
* Load test for the producer
*/
object ProducerPerformance {
def main(args: Array[String]) {
val logger = Logger.getLogger(getClass)
val config = new PerfConfig(args)
if(!config.isFixSize)
logger.info("WARN: Throughput will be slower due to changing message size per request")
val totalBytesSent = new AtomicLong(0)
val totalMessagesSent = new AtomicLong(0)
val executor = Executors.newFixedThreadPool(config.numThreads)
val allDone = new CountDownLatch(config.numThreads)
val startMs = System.currentTimeMillis
val rand = new java.util.Random
for(i <- 0 until config.numThreads) {
if(config.isAsync)
executor.execute(new AsyncProducerThread(i, config, totalBytesSent, totalMessagesSent, allDone, rand))
else
executor.execute(new SyncProducerThread(i, config, totalBytesSent, totalMessagesSent, allDone, rand))
}
allDone.await()
val elapsedSecs = (System.currentTimeMillis - startMs) / 1000.0
logger.info("Total Num Messages: " + totalMessagesSent.get + " bytes: " + totalBytesSent.get + " in " + elapsedSecs + " secs")
logger.info("Messages/sec: " + (1.0 * totalMessagesSent.get / elapsedSecs).formatted("%.4f"))
logger.info("MB/sec: " + (totalBytesSent.get / elapsedSecs / (1024.0*1024.0)).formatted("%.4f"))
System.exit(0)
}
class PerfConfig(args: Array[String]) {
val parser = new OptionParser
val brokerInfoOpt = parser.accepts("brokerinfo", "REQUIRED: broker info (either from zookeeper or a list.")
.withRequiredArg
.describedAs("broker.list=brokerid:hostname:port or zk.connect=host:port")
.ofType(classOf[String])
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val numMessagesOpt = parser.accepts("messages", "REQUIRED: The number of messages to send.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
val messageSizeOpt = parser.accepts("message-size", "The size of each message.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(100)
val varyMessageSizeOpt = parser.accepts("vary-message-size", "If set, message size will vary up to the given maximum.")
val asyncOpt = parser.accepts("async", "If set, messages are sent asynchronously.")
val delayMSBtwBatchOpt = parser.accepts("delay-btw-batch-ms", "Delay in ms between 2 batch sends.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Long])
.defaultsTo(0)
val batchSizeOpt = parser.accepts("batch-size", "Number of messages to send in a single batch.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(200)
val numThreadsOpt = parser.accepts("threads", "Number of sending threads.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(10)
val reportingIntervalOpt = parser.accepts("reporting-interval", "Interval at which to print progress info.")
.withRequiredArg
.describedAs("size")
.ofType(classOf[java.lang.Integer])
.defaultsTo(5000)
val compressionCodecOption = parser.accepts("compression-codec", "If set, messages are sent compressed")
.withRequiredArg
.describedAs("compression codec ")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val options = parser.parse(args : _*)
for(arg <- List(brokerInfoOpt, topicOpt, numMessagesOpt)) {
if(!options.has(arg)) {
System.err.println("Missing required argument \"" + arg + "\"")
parser.printHelpOn(System.err)
System.exit(1)
}
}
val brokerInfo = options.valueOf(brokerInfoOpt)
val numMessages = options.valueOf(numMessagesOpt).intValue
val messageSize = options.valueOf(messageSizeOpt).intValue
val isFixSize = !options.has(varyMessageSizeOpt)
val isAsync = options.has(asyncOpt)
val delayedMSBtwSend = options.valueOf(delayMSBtwBatchOpt).longValue
var batchSize = options.valueOf(batchSizeOpt).intValue
val numThreads = options.valueOf(numThreadsOpt).intValue
val topic = options.valueOf(topicOpt)
val reportingInterval = options.valueOf(reportingIntervalOpt).intValue
val compressionCodec = CompressionCodec.getCompressionCodec(options.valueOf(compressionCodecOption).intValue)
}
private def getStringOfLength(len: Int) : String = {
val strArray = new Array[Char](len)
for (i <- 0 until len)
strArray(i) = 'x'
return new String(strArray)
}
class AsyncProducerThread(val threadId: Int,
val config: PerfConfig,
val totalBytesSent: AtomicLong,
val totalMessagesSent: AtomicLong,
val allDone: CountDownLatch,
val rand: Random) extends Runnable {
val logger = Logger.getLogger(getClass)
val brokerInfoList = config.brokerInfo.split("=")
val props = new Properties()
if (brokerInfoList(0) == "zk.connect")
props.put("zk.connect", brokerInfoList(1))
else
props.put("broker.list", brokerInfoList(1))
props.put("compression.codec", config.compressionCodec.codec.toString)
props.put("producer.type","async")
props.put("batch.size", config.batchSize.toString)
props.put("reconnect.interval", Integer.MAX_VALUE.toString)
props.put("buffer.size", (64*1024).toString)
logger.info("Producer properties = " + props.toString)
val producerConfig = new ProducerConfig(props)
val producer = new Producer[String, String](producerConfig, new StringEncoder,
new DefaultEventHandler[String](producerConfig, null), null, new DefaultPartitioner[String])
override def run {
var bytesSent = 0L
var lastBytesSent = 0L
var nSends = 0
var lastNSends = 0
var message = getStringOfLength(config.messageSize)
var reportTime = System.currentTimeMillis()
var lastReportTime = reportTime
val messagesPerThread = config.numMessages / config.numThreads
logger.info("Messages per thread = " + messagesPerThread)
for(j <- 0 until messagesPerThread) {
var strLength = config.messageSize
if (!config.isFixSize) {
strLength = rand.nextInt(config.messageSize)
message = getStringOfLength(strLength)
bytesSent += strLength
}else
bytesSent += config.messageSize
try {
producer.send(new ProducerData[String,String](config.topic, message))
if (config.delayedMSBtwSend > 0 && (nSends + 1) % config.batchSize == 0)
Thread.sleep(config.delayedMSBtwSend)
nSends += 1
}catch {
case e: Exception => e.printStackTrace
}
if(nSends % config.reportingInterval == 0) {
reportTime = System.currentTimeMillis()
logger.info("thread " + threadId + ": " + nSends + " messages sent "
+ (1000.0 * (nSends - lastNSends) / (reportTime - lastReportTime)).formatted("%.4f") + " nMsg/sec "
+ (1000.0 * (bytesSent - lastBytesSent) / (reportTime - lastReportTime) / (1024 * 1024)).formatted("%.4f") + " MBs/sec")
lastReportTime = reportTime
lastBytesSent = bytesSent
lastNSends = nSends
}
}
producer.close()
totalBytesSent.addAndGet(bytesSent)
totalMessagesSent.addAndGet(nSends)
allDone.countDown()
}
}
class SyncProducerThread(val threadId: Int,
val config: PerfConfig,
val totalBytesSent: AtomicLong,
val totalMessagesSent: AtomicLong,
val allDone: CountDownLatch,
val rand: Random) extends Runnable {
val logger = Logger.getLogger(getClass)
val props = new Properties()
val brokerInfoList = config.brokerInfo.split("=")
if (brokerInfoList(0) == "zk.connect")
props.put("zk.connect", brokerInfoList(1))
else
props.put("broker.list", brokerInfoList(1))
props.put("compression.codec", config.compressionCodec.toString)
props.put("reconnect.interval", Integer.MAX_VALUE.toString)
props.put("buffer.size", (64*1024).toString)
val producerConfig = new ProducerConfig(props)
val producer = new Producer[String, String](producerConfig, new StringEncoder,
new DefaultEventHandler[String](producerConfig, null), null, new DefaultPartitioner[String])
override def run {
var bytesSent = 0L
var lastBytesSent = 0L
var nSends = 0
var lastNSends = 0
val message = getStringOfLength(config.messageSize)
var reportTime = System.currentTimeMillis()
var lastReportTime = reportTime
val messagesPerThread = config.numMessages / config.numThreads / config.batchSize
logger.info("Messages per thread = " + messagesPerThread)
var messageSet: List[String] = Nil
for(k <- 0 until config.batchSize) {
messageSet ::= message
}
for(j <- 0 until messagesPerThread) {
var strLength = config.messageSize
if (!config.isFixSize) {
for(k <- 0 until config.batchSize) {
strLength = rand.nextInt(config.messageSize)
messageSet ::= getStringOfLength(strLength)
bytesSent += strLength
}
}else
bytesSent += config.batchSize*config.messageSize
try {
producer.send(new ProducerData[String,String](config.topic, messageSet))
if (config.delayedMSBtwSend > 0 && (nSends + 1) % config.batchSize == 0)
Thread.sleep(config.delayedMSBtwSend)
nSends += 1
}catch {
case e: Exception => e.printStackTrace
}
if(nSends % config.reportingInterval == 0) {
reportTime = System.currentTimeMillis()
logger.info("thread " + threadId + ": " + nSends + " messages sent "
+ (1000.0 * (nSends - lastNSends) * config.batchSize / (reportTime - lastReportTime)).formatted("%.4f") + " nMsg/sec "
+ (1000.0 * (bytesSent - lastBytesSent) / (reportTime - lastReportTime) / (1024 * 1024)).formatted("%.4f") + " MBs/sec")
lastReportTime = reportTime
lastBytesSent = bytesSent
lastNSends = nSends
}
}
producer.close()
totalBytesSent.addAndGet(bytesSent)
totalMessagesSent.addAndGet(nSends*config.batchSize)
allDone.countDown()
}
}
}
| apache-2.0 |
RavenB/lumify | data-mapping/core/src/test/java/io/lumify/mapping/xform/DoubleValueTransformerTest.java | 843 | package io.lumify.mapping.xform;
import java.util.Arrays;
import org.junit.runners.Parameterized.Parameters;
public class DoubleValueTransformerTest extends AbstractValueTransformerTest<Double> {
@Parameters(name="{index}: {0}->{1}")
public static Iterable<Object[]> getTestValues() {
return Arrays.asList(new Object[][] {
{ null, null },
{ "", null },
{ "\n \t\t \n", null },
{ "1", 1.0d },
{ "27.73692", 27.73692d },
{ "-3.14", -3.14d },
{ " \t 23.0\n", 23.0d },
{ "23/b", null },
{ "10/2", null },
{ "not a number", null }
});
}
public DoubleValueTransformerTest(final String testVal, final Double expected) {
super(new DoubleValueTransformer(), testVal, expected);
}
}
| apache-2.0 |
lcarli/NodeRed-Azure-Storage-Table | node_modules/azure-storage/lib/common/util/validate.js | 12964 | //
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
var _ = require('underscore');
var util = require('util');
var constants = require('./../util/constants');
var blobConstants = constants.BlobConstants;
var FileUtilities = require('./../../services/file/fileutilities');
var azureutil = require('./util');
var SR = require('./sr');
var check = require('validator');
var errors = require('../errors/errors');
var ArgumentError = errors.ArgumentError;
var ArgumentNullError = errors.ArgumentNullError;
exports = module.exports;
function initCallback(callbackParam, resultsCb) {
var fail;
if (callbackParam) {
fail = function (err) {
callbackParam(err);
return false;
};
} else {
fail = function (err) {
throw err;
};
callbackParam = function () {};
}
resultsCb(fail, callbackParam);
}
/**
* Checks if the given value is a valid enumeration or not.
*
* @param {object} value The value to validate.
* @param {object} list The enumeration values.
* @return {boolean}
*/
exports.isValidEnumValue = function (value, list, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (!list.some(function (current) {
return current.toLowerCase() === value.toLowerCase();
})) {
return fail(new RangeError(util.format('Invalid value: %s. Options are: %s.', value, list)));
}
callback();
return true;
};
/**
* Creates a anonymous function that check if the given uri is valid or not.
*
* @param {string} uri The uri to validate.
* @return {boolean}
*/
exports.isValidUri = function (uri) {
if (!check.isURL(uri)){
throw new URIError('The provided URI "' + uri + '" is invalid.');
}
return true;
};
/**
* Checks if the given host is valid or not.
*
* @param {string|object} host The host to validate.
* @return {boolean}
*/
exports.isValidHost= function (host) {
if (azureutil.objectIsNull(host)) {
throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED);
} else {
var storageHost = {};
storageHost.primaryHost = _.isString(host) ? host : host.primaryHost;
if (storageHost.primaryHost && !check.isURL(storageHost.primaryHost)){
throw new URIError('The provided URI "' + storageHost.primaryHost + '" is invalid.');
}
storageHost.secondaryHost = _.isString(host) ? undefined : host.secondaryHost;
if (storageHost.secondaryHost && !check.isURL(storageHost.secondaryHost)){
throw new URIError('The provided URI "' + storageHost.secondaryHost + '" is invalid.');
}
if (!storageHost.primaryHost && !storageHost.secondaryHost) {
throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED);
}
}
return true;
};
/**
* Checks if the given value is a valid UUID or not.
*
* @param {string|object} uuid The uuid to validate.
* @return {boolean}
*/
exports.isValidUuid = function(uuid, callback) {
var validUuidRegex = /^[a-zA-Z0-9]{8}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{12}$/;
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (!validUuidRegex.test(uuid)) {
return fail(new SyntaxError('The value is not a valid UUID format.'));
}
callback();
return true;
};
/**
* Creates a anonymous function that check if a given key is base 64 encoded.
*
* @param {string} key The key to validate.
* @return {function}
*/
exports.isBase64Encoded = function (key) {
var isValidBase64String = key.match(/^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$/);
if (isValidBase64String) {
return true;
} else {
throw new SyntaxError('The provided account key ' + key + ' is not a valid base64 string.');
}
};
/**
* Validates a function.
*
* @param {object} function The function to validate.
* @return {function}
*/
exports.isValidFunction = function (functionObject, functionName) {
if (!functionObject) {
throw new ArgumentNullError('functionObject', functionName + ' must be specified.');
}
if(!_.isFunction(functionObject)){
throw new TypeError(functionName + ' specified should be a function.');
}
return true;
};
var getNameError = function(name, typeName) {
// checks if name is null, undefined or empty
if (azureutil.stringIsEmpty(name)) {
return new ArgumentNullError('name', util.format('%s name must be a non empty string.', typeName));
}
// check if name is between 3 and 63 characters
if (name.length < 3 || name.length > 63) {
return new ArgumentError('name', util.format('%s name must be between 3 and 63 characters long.', typeName));
}
// check if name follows naming rules
if (name.match(/^([a-z0-9]+(-[a-z0-9]+)*)$/) === null) {
return new SyntaxError(util.format('%s name format is incorrect.', typeName));
}
return null;
};
/**
* Validates a container name.
*
* @param {string} containerName The container name.
*/
exports.containerNameIsValid = function (containerName, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
var nameError = getNameError(containerName, 'Container');
if (!nameError || containerName.match(/^(\$root|\$logs)$/)) {
callback();
return true;
} else {
return fail(nameError);
}
};
/**
* Validates a blob name.
*
* @param {string} containerName The container name.
* @param {string} blobname The blob name.
*/
exports.blobNameIsValid = function (containerName, blobName, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (!blobName) {
return fail(new ArgumentNullError('blobName', 'Blob name is not specified.'));
}
if (containerName === '$root' && blobName.indexOf('/') !== -1) {
return fail(new SyntaxError('Blob name format is incorrect.'));
}
callback();
return true;
};
/**
* Validates a share name.
*
* @param {string} shareName The share name.
*/
exports.shareNameIsValid = function (shareName, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
var nameError = getNameError(shareName, 'Share');
if (!nameError) {
callback();
return true;
} else {
return fail(nameError);
}
};
/**
* Validates a queue name.
*
* @param {string} queueName The queue name.
*/
exports.queueNameIsValid = function (queueName, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
var nameError = getNameError(queueName, 'Queue');
if (!nameError) {
callback();
return true;
} else {
return fail(nameError);
}
};
/**
* Validates a table name.
*
* @param {string} table The table name.
*/
exports.tableNameIsValid = function (table, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (azureutil.stringIsEmpty(table)) {
return fail(new ArgumentNullError('table', 'Table name must be a non empty string.'));
}
if (table.length < 3 || table.length > 63) {
return fail(new ArgumentError('table', 'Table name must be between 3 and 63 characters long.'));
}
if(table.toLowerCase() == 'tables') {
return fail(new RangeError('Table name cannot be \'Tables\'.'));
}
if (table.match(/^([A-Za-z][A-Za-z0-9]{2,62})$/) !== null || table === '$MetricsCapacityBlob' || table.match(/^(\$Metrics(HourPrimary|MinutePrimary|HourSecondary|MinuteSecondary)?(Transactions)(Blob|Queue|Table))$/) !== null)
{
callback();
return true;
} else {
return fail(new SyntaxError('Table name format is incorrect.'));
}
};
/**
* Validates page ranges.
*
* @param {int} rangeStart The range starting position.
* @param {int} rangeEnd The range ending position.
* @param {int} writeBlockSizeInBytes The block size.
*/
exports.pageRangesAreValid = function (rangeStart, rangeEnd, writeBlockSizeInBytes, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (rangeStart % 512 !== 0) {
return fail(new RangeError('Start byte offset must be a multiple of 512.'));
}
var size = null;
if (!azureutil.objectIsNull(rangeEnd)) {
if ((rangeEnd + 1) % 512 !== 0) {
return fail(new RangeError('End byte offset must be a multiple of 512 minus 1.'));
}
size = (rangeEnd - rangeStart) + 1;
if (size > writeBlockSizeInBytes) {
return fail(new RangeError('Page blob size cannot be larger than ' + writeBlockSizeInBytes + ' bytes.'));
}
}
callback();
return true;
};
/**
* Validates a blob type.
*
* @param {string} type The type name.
*/
exports.blobTypeIsValid = function (type, callback) {
var getEnumValues = function (obj) {
var values = [];
for (var prop in obj) {
if (obj.hasOwnProperty(prop)) {
values.push(obj[prop]);
}
}
return values;
};
return this.isValidEnumValue(type, getEnumValues(blobConstants.BlobTypes), callback);
};
/**
* Validates share ACL type.
*
* @param {string} type The type name.
*/
exports.shareACLIsValid = function (type, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (type != FileUtilities.SharePublicAccessType.OFF) {
fail(new ArgumentError('type', 'The access type is not supported.'));
}
callback();
return true;
};
/**
* Validates share quota value.
*
* @param {int} type The quota value.
*/
exports.shareQuotaIsValid = function (quota, callback) {
var fail;
initCallback(callback, function (f, cb) {
fail = f;
callback = cb;
});
if (quota && quota <= 0) {
fail(new RangeError('The share quota value, in GB, must be greater than 0.'));
}
callback();
return true;
};
// common functions for validating arguments
function throwMissingArgument(name, func) {
throw new ArgumentNullError(name, 'Required argument ' + name + ' for function ' + func + ' is not defined');
}
function ArgumentValidator(functionName) {
this.func = functionName;
}
_.extend(ArgumentValidator.prototype, {
string: function (val, name) {
this.exists(val, name);
if (typeof val !== 'string') {
throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a non-empty string');
}
},
stringAllowEmpty: function (val, name) {
if (typeof val !== 'string') {
throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a string');
}
},
object: function (val, name) {
this.exists(val, name);
if (typeof val !== 'object') {
throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be an object');
}
},
exists: function (val, name) {
if (!val) {
throwMissingArgument(name, this.func);
}
},
function: function (val, name) {
this.exists(val, name);
if (typeof val !== 'function') {
throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a function');
}
},
value: function (val, name) {
if (!val && val !== 0) {
throwMissingArgument(name, this.func);
}
},
nonEmptyArray: function (val, name) {
if (!val || val.length === 0) {
throw new TypeError('Required array argument ' + name + ' for function ' + this.func + ' is either not defined or empty');
}
},
callback: function (val) {
this.exists(val, 'callback');
this.function(val, 'callback');
},
test: function (predicate, message) {
if (!predicate()) {
throw new Error(message + ' in function ' + this.func);
}
},
tableNameIsValid: exports.tableNameIsValid,
containerNameIsValid: exports.containerNameIsValid,
shareNameIsValid: exports.shareNameIsValid,
blobNameIsValid: exports.blobNameIsValid,
pageRangesAreValid: exports.pageRangesAreValid,
queueNameIsValid: exports.queueNameIsValid,
blobTypeIsValid: exports.blobTypeIsValid,
shareACLIsValid: exports.shareACLIsValid,
shareQuotaIsValid: exports.shareQuotaIsValid,
isValidEnumValue: exports.isValidEnumValue
});
function validateArgs(functionName, validationRules) {
var validator = new ArgumentValidator(functionName);
validationRules(validator);
}
exports.ArgumentValidator = ArgumentValidator;
exports.validateArgs = validateArgs; | apache-2.0 |
Apache9/hbase | hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java | 29451 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@Category({RegionServerTests.class, MediumTests.class})
public class TestStripeStoreFileManager {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestStripeStoreFileManager.class);
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static final Path BASEDIR =
TEST_UTIL.getDataTestDir(TestStripeStoreFileManager.class.getSimpleName());
private static final Path CFDIR = HRegionFileSystem.getStoreHomedir(BASEDIR, "region",
Bytes.toBytes("cf"));
private static final byte[] KEY_A = Bytes.toBytes("aaa");
private static final byte[] KEY_B = Bytes.toBytes("aab");
private static final byte[] KEY_C = Bytes.toBytes("aac");
private static final byte[] KEY_D = Bytes.toBytes("aad");
private static final KeyValue KV_A = new KeyValue(KEY_A, 0L);
private static final KeyValue KV_B = new KeyValue(KEY_B, 0L);
private static final KeyValue KV_C = new KeyValue(KEY_C, 0L);
private static final KeyValue KV_D = new KeyValue(KEY_D, 0L);
@Before
public void setUp() throws Exception {
FileSystem fs = TEST_UTIL.getTestFileSystem();
if (!fs.mkdirs(CFDIR)) {
throw new IOException("Cannot create test directory " + CFDIR);
}
}
@After
public void tearDown() throws Exception {
FileSystem fs = TEST_UTIL.getTestFileSystem();
if (fs.exists(CFDIR) && !fs.delete(CFDIR, true)) {
throw new IOException("Cannot delete test directory " + CFDIR);
}
}
@Test
public void testInsertFilesIntoL0() throws Exception {
StripeStoreFileManager manager = createManager();
MockHStoreFile sf = createFile();
manager.insertNewFiles(al(sf));
assertEquals(1, manager.getStorefileCount());
Collection<HStoreFile> filesForGet = manager.getFilesForScan(KEY_A, true, KEY_A, true);
assertEquals(1, filesForGet.size());
assertTrue(filesForGet.contains(sf));
// Add some stripes and make sure we get this file for every stripe.
manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, OPEN_KEY)));
assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true).contains(sf));
assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true).contains(sf));
}
@Test
public void testClearFiles() throws Exception {
StripeStoreFileManager manager = createManager();
manager.insertNewFiles(al(createFile()));
manager.insertNewFiles(al(createFile()));
manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, OPEN_KEY)));
assertEquals(4, manager.getStorefileCount());
Collection<HStoreFile> allFiles = manager.clearFiles();
assertEquals(4, allFiles.size());
assertEquals(0, manager.getStorefileCount());
assertEquals(0, manager.getStorefiles().size());
}
private static ArrayList<HStoreFile> dumpIterator(Iterator<HStoreFile> iter) {
ArrayList<HStoreFile> result = new ArrayList<>();
for (; iter.hasNext(); result.add(iter.next())) {
continue;
}
return result;
}
@Test
public void testRowKeyBefore() throws Exception {
StripeStoreFileManager manager = createManager();
HStoreFile l0File = createFile(), l0File2 = createFile();
manager.insertNewFiles(al(l0File));
manager.insertNewFiles(al(l0File2));
// Get candidate files.
Iterator<HStoreFile> sfs = manager.getCandidateFilesForRowKeyBefore(KV_B);
sfs.next();
sfs.remove();
// Suppose we found a candidate in this file... make sure L0 file remaining is not removed.
sfs = manager.updateCandidateFilesForRowKeyBefore(sfs, KV_B, KV_A);
assertTrue(sfs.hasNext());
// Now add some stripes (remove L0 file too)
MockHStoreFile stripe0a = createFile(0, 100, OPEN_KEY, KEY_B),
stripe1 = createFile(KEY_B, OPEN_KEY);
manager.addCompactionResults(al(l0File), al(stripe0a, stripe1));
manager.removeCompactedFiles(al(l0File));
// If we want a key <= KEY_A, we should get everything except stripe1.
ArrayList<HStoreFile> sfsDump = dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_A));
assertEquals(2, sfsDump.size());
assertTrue(sfsDump.contains(stripe0a));
assertFalse(sfsDump.contains(stripe1));
// If we want a key <= KEY_B, we should get everything since lower bound is inclusive.
sfsDump = dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_B));
assertEquals(3, sfsDump.size());
assertTrue(sfsDump.contains(stripe1));
// For KEY_D, we should also get everything.
sfsDump = dumpIterator(manager.getCandidateFilesForRowKeyBefore(KV_D));
assertEquals(3, sfsDump.size());
// Suppose in the first file we found candidate with KEY_C.
// Then, stripe0 no longer matters and should be removed, but stripe1 should stay.
sfs = manager.getCandidateFilesForRowKeyBefore(KV_D);
sfs.next(); // Skip L0 file.
sfs.remove();
sfs = manager.updateCandidateFilesForRowKeyBefore(sfs, KV_D, KV_C);
assertEquals(stripe1, sfs.next());
assertFalse(sfs.hasNext());
// Add one more, later, file to stripe0, remove the last annoying L0 file.
// This file should be returned in preference to older L0 file; also, after we get
// a candidate from the first file, the old one should not be removed.
HStoreFile stripe0b = createFile(0, 101, OPEN_KEY, KEY_B);
manager.addCompactionResults(al(l0File2), al(stripe0b));
manager.removeCompactedFiles(al(l0File2));
sfs = manager.getCandidateFilesForRowKeyBefore(KV_A);
assertEquals(stripe0b, sfs.next());
sfs.remove();
sfs = manager.updateCandidateFilesForRowKeyBefore(sfs, KV_A, KV_A);
assertEquals(stripe0a, sfs.next());
}
@Test
public void testGetSplitPointEdgeCases() throws Exception {
StripeStoreFileManager manager = createManager();
// No files => no split.
assertFalse(manager.getSplitPoint().isPresent());
// If there are no stripes, should pick midpoint from the biggest file in L0.
MockHStoreFile sf5 = createFile(5, 0);
sf5.splitPoint = new byte[] { 1 };
manager.insertNewFiles(al(sf5));
manager.insertNewFiles(al(createFile(1, 0)));
assertArrayEquals(sf5.splitPoint, manager.getSplitPoint().get());
// Same if there's one stripe but the biggest file is still in L0.
manager.addCompactionResults(al(), al(createFile(2, 0, OPEN_KEY, OPEN_KEY)));
assertArrayEquals(sf5.splitPoint, manager.getSplitPoint().get());
// If the biggest file is in the stripe, should get from it.
MockHStoreFile sf6 = createFile(6, 0, OPEN_KEY, OPEN_KEY);
sf6.splitPoint = new byte[] { 2 };
manager.addCompactionResults(al(), al(sf6));
assertArrayEquals(sf6.splitPoint, manager.getSplitPoint().get());
}
@Test
public void testGetStripeBoundarySplits() throws Exception {
/* First number - split must be after this stripe; further numbers - stripes */
verifySplitPointScenario(5, false, 0f, 2, 1, 1, 1, 1, 1, 10);
verifySplitPointScenario(0, false, 0f, 6, 3, 1, 1, 2);
verifySplitPointScenario(2, false, 0f, 1, 1, 1, 1, 2);
verifySplitPointScenario(0, false, 0f, 5, 4);
verifySplitPointScenario(2, false, 0f, 5, 2, 5, 5, 5);
}
@Test
public void testGetUnbalancedSplits() throws Exception {
/* First number - split must be inside/after this stripe; further numbers - stripes */
verifySplitPointScenario(0, false, 2.1f, 4, 4, 4); // 8/4 is less than 2.1f
verifySplitPointScenario(1, true, 1.5f, 4, 4, 4); // 8/4 > 6/6
verifySplitPointScenario(1, false, 1.1f, 3, 4, 1, 1, 2, 2); // 7/6 < 8/5
verifySplitPointScenario(1, false, 1.1f, 3, 6, 1, 1, 2, 2); // 9/6 == 9/6
verifySplitPointScenario(1, true, 1.1f, 3, 8, 1, 1, 2, 2); // 11/6 > 10/7
verifySplitPointScenario(3, false, 1.1f, 2, 2, 1, 1, 4, 3); // reverse order
verifySplitPointScenario(4, true, 1.1f, 2, 2, 1, 1, 8, 3); // reverse order
verifySplitPointScenario(0, true, 1.5f, 10, 4); // 10/4 > 9/5
verifySplitPointScenario(0, false, 1.4f, 6, 4); // 6/4 == 6/4
verifySplitPointScenario(1, true, 1.5f, 4, 10); // reverse just in case
verifySplitPointScenario(0, false, 1.4f, 4, 6); // reverse just in case
}
/**
* Verifies scenario for finding a split point.
* @param splitPointAfter Stripe to expect the split point at/after.
* @param shouldSplitStripe If true, the split point is expected in the middle of the above
* stripe; if false, should be at the end.
* @param splitRatioToVerify Maximum split imbalance ratio.
* @param sizes Stripe sizes.
*/
private void verifySplitPointScenario(int splitPointAfter, boolean shouldSplitStripe,
float splitRatioToVerify, int... sizes) throws Exception {
assertTrue(sizes.length > 1);
ArrayList<HStoreFile> sfs = new ArrayList<>();
for (int sizeIx = 0; sizeIx < sizes.length; ++sizeIx) {
byte[] startKey = (sizeIx == 0) ? OPEN_KEY : Bytes.toBytes(sizeIx - 1);
byte[] endKey = (sizeIx == sizes.length - 1) ? OPEN_KEY : Bytes.toBytes(sizeIx);
MockHStoreFile sf = createFile(sizes[sizeIx], 0, startKey, endKey);
sf.splitPoint = Bytes.toBytes(-sizeIx); // set split point to the negative index
sfs.add(sf);
}
Configuration conf = HBaseConfiguration.create();
if (splitRatioToVerify != 0) {
conf.setFloat(StripeStoreConfig.MAX_REGION_SPLIT_IMBALANCE_KEY, splitRatioToVerify);
}
StripeStoreFileManager manager = createManager(al(), conf);
manager.addCompactionResults(al(), sfs);
int result = Bytes.toInt(manager.getSplitPoint().get());
// Either end key and thus positive index, or "middle" of the file and thus negative index.
assertEquals(splitPointAfter * (shouldSplitStripe ? -1 : 1), result);
}
private static byte[] keyAfter(byte[] key) {
return Arrays.copyOf(key, key.length + 1);
}
@Test
public void testGetFilesForGetAndScan() throws Exception {
StripeStoreFileManager manager = createManager();
verifyGetAndScanScenario(manager, null, null);
verifyGetAndScanScenario(manager, KEY_B, KEY_C);
// Populate one L0 file.
MockHStoreFile sf0 = createFile();
manager.insertNewFiles(al(sf0));
verifyGetAndScanScenario(manager, null, null, sf0);
verifyGetAndScanScenario(manager, null, KEY_C, sf0);
verifyGetAndScanScenario(manager, KEY_B, null, sf0);
verifyGetAndScanScenario(manager, KEY_B, KEY_C, sf0);
// Populate a bunch of files for stripes, keep L0.
MockHStoreFile sfA = createFile(OPEN_KEY, KEY_A);
MockHStoreFile sfB = createFile(KEY_A, KEY_B);
MockHStoreFile sfC = createFile(KEY_B, KEY_C);
MockHStoreFile sfD = createFile(KEY_C, KEY_D);
MockHStoreFile sfE = createFile(KEY_D, OPEN_KEY);
manager.addCompactionResults(al(), al(sfA, sfB, sfC, sfD, sfE));
verifyGetAndScanScenario(manager, null, null, sf0, sfA, sfB, sfC, sfD, sfE);
verifyGetAndScanScenario(manager, keyAfter(KEY_A), null, sf0, sfB, sfC, sfD, sfE);
verifyGetAndScanScenario(manager, null, keyAfter(KEY_C), sf0, sfA, sfB, sfC, sfD);
verifyGetAndScanScenario(manager, KEY_B, null, sf0, sfC, sfD, sfE);
verifyGetAndScanScenario(manager, null, KEY_C, sf0, sfA, sfB, sfC, sfD);
verifyGetAndScanScenario(manager, KEY_B, keyAfter(KEY_B), sf0, sfC);
verifyGetAndScanScenario(manager, keyAfter(KEY_A), KEY_B, sf0, sfB, sfC);
verifyGetAndScanScenario(manager, KEY_D, KEY_D, sf0, sfE);
verifyGetAndScanScenario(manager, keyAfter(KEY_B), keyAfter(KEY_C), sf0, sfC, sfD);
}
private void verifyGetAndScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
HStoreFile... results) throws Exception {
verifyGetOrScanScenario(manager, start, end, results);
}
@Test
@SuppressWarnings("unchecked")
public void testLoadFilesWithRecoverableBadFiles() throws Exception {
// In L0, there will be file w/o metadata (real L0, 3 files with invalid metadata, and 3
// files that overlap valid stripes in various ways). Note that the 4th way to overlap the
// stripes will cause the structure to be mostly scraped, and is tested separately.
ArrayList<HStoreFile> validStripeFiles = al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY),
createFile(KEY_C, OPEN_KEY));
ArrayList<HStoreFile> filesToGoToL0 = al(createFile(), createFile(null, KEY_A),
createFile(KEY_D, null), createFile(KEY_D, KEY_A), createFile(keyAfter(KEY_A), KEY_C),
createFile(OPEN_KEY, KEY_D), createFile(KEY_D, keyAfter(KEY_D)));
ArrayList<HStoreFile> allFilesToGo = flattenLists(validStripeFiles, filesToGoToL0);
Collections.shuffle(allFilesToGo);
StripeStoreFileManager manager = createManager(allFilesToGo);
List<HStoreFile> l0Files = manager.getLevel0Files();
assertEquals(filesToGoToL0.size(), l0Files.size());
for (HStoreFile sf : filesToGoToL0) {
assertTrue(l0Files.contains(sf));
}
verifyAllFiles(manager, allFilesToGo);
}
@Test
public void testLoadFilesWithBadStripe() throws Exception {
// Current "algorithm" will see the after-B key before C key, add it as valid stripe,
// and then fail all other stripes. So everything would end up in L0.
ArrayList<HStoreFile> allFilesToGo = al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY),
createFile(KEY_B, keyAfter(KEY_B)));
Collections.shuffle(allFilesToGo);
StripeStoreFileManager manager = createManager(allFilesToGo);
assertEquals(allFilesToGo.size(), manager.getLevel0Files().size());
}
@Test
public void testLoadFilesWithGaps() throws Exception {
// Stripes must not have gaps. If they do, everything goes to L0.
StripeStoreFileManager manager =
createManager(al(createFile(OPEN_KEY, KEY_B), createFile(KEY_C, OPEN_KEY)));
assertEquals(2, manager.getLevel0Files().size());
// Just one open stripe should be ok.
manager = createManager(al(createFile(OPEN_KEY, OPEN_KEY)));
assertEquals(0, manager.getLevel0Files().size());
assertEquals(1, manager.getStorefileCount());
}
@Test
public void testLoadFilesAfterSplit() throws Exception {
// If stripes are good but have non-open ends, they must be treated as open ends.
MockHStoreFile sf = createFile(KEY_B, KEY_C);
StripeStoreFileManager manager = createManager(al(createFile(OPEN_KEY, KEY_B), sf));
assertEquals(0, manager.getLevel0Files().size());
// Here, [B, C] is logically [B, inf), so we should be able to compact it to that only.
verifyInvalidCompactionScenario(manager, al(sf), al(createFile(KEY_B, KEY_C)));
manager.addCompactionResults(al(sf), al(createFile(KEY_B, OPEN_KEY)));
manager.removeCompactedFiles(al(sf));
// Do the same for other variants.
manager = createManager(al(sf, createFile(KEY_C, OPEN_KEY)));
verifyInvalidCompactionScenario(manager, al(sf), al(createFile(KEY_B, KEY_C)));
manager.addCompactionResults(al(sf), al(createFile(OPEN_KEY, KEY_C)));
manager.removeCompactedFiles(al(sf));
manager = createManager(al(sf));
verifyInvalidCompactionScenario(manager, al(sf), al(createFile(KEY_B, KEY_C)));
manager.addCompactionResults(al(sf), al(createFile(OPEN_KEY, OPEN_KEY)));
}
@Test
public void testAddingCompactionResults() throws Exception {
StripeStoreFileManager manager = createManager();
// First, add some L0 files and "compact" one with new stripe creation.
HStoreFile sf_L0_0a = createFile(), sf_L0_0b = createFile();
manager.insertNewFiles(al(sf_L0_0a, sf_L0_0b));
// Try compacting with invalid new branches (gaps, overlaps) - no effect.
verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B)));
verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_C, OPEN_KEY)));
verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, OPEN_KEY), createFile(KEY_A, KEY_D)));
verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_A, KEY_B), createFile(KEY_B, OPEN_KEY)));
HStoreFile sf_i2B_0 = createFile(OPEN_KEY, KEY_B);
HStoreFile sf_B2C_0 = createFile(KEY_B, KEY_C);
HStoreFile sf_C2i_0 = createFile(KEY_C, OPEN_KEY);
manager.addCompactionResults(al(sf_L0_0a), al(sf_i2B_0, sf_B2C_0, sf_C2i_0));
manager.removeCompactedFiles(al(sf_L0_0a));
verifyAllFiles(manager, al(sf_L0_0b, sf_i2B_0, sf_B2C_0, sf_C2i_0));
// Add another l0 file, "compact" both L0 into two stripes
HStoreFile sf_L0_1 = createFile();
HStoreFile sf_i2B_1 = createFile(OPEN_KEY, KEY_B);
HStoreFile sf_B2C_1 = createFile(KEY_B, KEY_C);
manager.insertNewFiles(al(sf_L0_1));
manager.addCompactionResults(al(sf_L0_0b, sf_L0_1), al(sf_i2B_1, sf_B2C_1));
manager.removeCompactedFiles(al(sf_L0_0b, sf_L0_1));
verifyAllFiles(manager, al(sf_i2B_0, sf_B2C_0, sf_C2i_0, sf_i2B_1, sf_B2C_1));
// Try compacting with invalid file (no metadata) - should add files to L0.
HStoreFile sf_L0_2 = createFile(null, null);
manager.addCompactionResults(al(), al(sf_L0_2));
manager.removeCompactedFiles(al());
verifyAllFiles(manager, al(sf_i2B_0, sf_B2C_0, sf_C2i_0, sf_i2B_1, sf_B2C_1, sf_L0_2));
// Remove it...
manager.addCompactionResults(al(sf_L0_2), al());
manager.removeCompactedFiles(al(sf_L0_2));
// Do regular compaction in the first stripe.
HStoreFile sf_i2B_3 = createFile(OPEN_KEY, KEY_B);
manager.addCompactionResults(al(sf_i2B_0, sf_i2B_1), al(sf_i2B_3));
manager.removeCompactedFiles(al(sf_i2B_0, sf_i2B_1));
verifyAllFiles(manager, al(sf_B2C_0, sf_C2i_0, sf_B2C_1, sf_i2B_3));
// Rebalance two stripes.
HStoreFile sf_B2D_4 = createFile(KEY_B, KEY_D);
HStoreFile sf_D2i_4 = createFile(KEY_D, OPEN_KEY);
manager.addCompactionResults(al(sf_B2C_0, sf_C2i_0, sf_B2C_1), al(sf_B2D_4, sf_D2i_4));
manager.removeCompactedFiles(al(sf_B2C_0, sf_C2i_0, sf_B2C_1));
verifyAllFiles(manager, al(sf_i2B_3, sf_B2D_4, sf_D2i_4));
// Split the first stripe.
HStoreFile sf_i2A_5 = createFile(OPEN_KEY, KEY_A);
HStoreFile sf_A2B_5 = createFile(KEY_A, KEY_B);
manager.addCompactionResults(al(sf_i2B_3), al(sf_i2A_5, sf_A2B_5));
manager.removeCompactedFiles(al(sf_i2B_3));
verifyAllFiles(manager, al(sf_B2D_4, sf_D2i_4, sf_i2A_5, sf_A2B_5));
// Split the middle stripe.
HStoreFile sf_B2C_6 = createFile(KEY_B, KEY_C);
HStoreFile sf_C2D_6 = createFile(KEY_C, KEY_D);
manager.addCompactionResults(al(sf_B2D_4), al(sf_B2C_6, sf_C2D_6));
manager.removeCompactedFiles(al(sf_B2D_4));
verifyAllFiles(manager, al(sf_D2i_4, sf_i2A_5, sf_A2B_5, sf_B2C_6, sf_C2D_6));
// Merge two different middle stripes.
HStoreFile sf_A2C_7 = createFile(KEY_A, KEY_C);
manager.addCompactionResults(al(sf_A2B_5, sf_B2C_6), al(sf_A2C_7));
manager.removeCompactedFiles(al(sf_A2B_5, sf_B2C_6));
verifyAllFiles(manager, al(sf_D2i_4, sf_i2A_5, sf_C2D_6, sf_A2C_7));
// Merge lower half.
HStoreFile sf_i2C_8 = createFile(OPEN_KEY, KEY_C);
manager.addCompactionResults(al(sf_i2A_5, sf_A2C_7), al(sf_i2C_8));
manager.removeCompactedFiles(al(sf_i2A_5, sf_A2C_7));
verifyAllFiles(manager, al(sf_D2i_4, sf_C2D_6, sf_i2C_8));
// Merge all.
HStoreFile sf_i2i_9 = createFile(OPEN_KEY, OPEN_KEY);
manager.addCompactionResults(al(sf_D2i_4, sf_C2D_6, sf_i2C_8), al(sf_i2i_9));
manager.removeCompactedFiles(al(sf_D2i_4, sf_C2D_6, sf_i2C_8));
verifyAllFiles(manager, al(sf_i2i_9));
}
@Test
public void testCompactionAndFlushConflict() throws Exception {
// Add file flush into stripes
StripeStoreFileManager sfm = createManager();
assertEquals(0, sfm.getStripeCount());
HStoreFile sf_i2c = createFile(OPEN_KEY, KEY_C), sf_c2i = createFile(KEY_C, OPEN_KEY);
sfm.insertNewFiles(al(sf_i2c, sf_c2i));
assertEquals(2, sfm.getStripeCount());
// Now try to add conflicting flush - should throw.
HStoreFile sf_i2d = createFile(OPEN_KEY, KEY_D), sf_d2i = createFile(KEY_D, OPEN_KEY);
sfm.insertNewFiles(al(sf_i2d, sf_d2i));
assertEquals(2, sfm.getStripeCount());
assertEquals(2, sfm.getLevel0Files().size());
verifyGetAndScanScenario(sfm, KEY_C, KEY_C, sf_i2d, sf_d2i, sf_c2i);
// Remove these files.
sfm.addCompactionResults(al(sf_i2d, sf_d2i), al());
sfm.removeCompactedFiles(al(sf_i2d, sf_d2i));
assertEquals(0, sfm.getLevel0Files().size());
// Add another file to stripe; then "rebalance" stripes w/o it - the file, which was
// presumably flushed during compaction, should go to L0.
HStoreFile sf_i2c_2 = createFile(OPEN_KEY, KEY_C);
sfm.insertNewFiles(al(sf_i2c_2));
sfm.addCompactionResults(al(sf_i2c, sf_c2i), al(sf_i2d, sf_d2i));
sfm.removeCompactedFiles(al(sf_i2c, sf_c2i));
assertEquals(1, sfm.getLevel0Files().size());
verifyGetAndScanScenario(sfm, KEY_C, KEY_C, sf_i2d, sf_i2c_2);
}
@Test
public void testEmptyResultsForStripes() throws Exception {
// Test that we can compact L0 into a subset of stripes.
StripeStoreFileManager manager = createManager();
HStoreFile sf0a = createFile();
HStoreFile sf0b = createFile();
manager.insertNewFiles(al(sf0a));
manager.insertNewFiles(al(sf0b));
ArrayList<HStoreFile> compacted = al(createFile(OPEN_KEY, KEY_B),
createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY));
manager.addCompactionResults(al(sf0a), compacted);
manager.removeCompactedFiles(al(sf0a));
// Next L0 compaction only produces file for the first and last stripe.
ArrayList<HStoreFile> compacted2 = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_C, OPEN_KEY));
manager.addCompactionResults(al(sf0b), compacted2);
manager.removeCompactedFiles(al(sf0b));
compacted.addAll(compacted2);
verifyAllFiles(manager, compacted);
}
@Test
public void testPriority() throws Exception {
// Expected priority, file limit, stripe count, files per stripe, l0 files.
testPriorityScenario(5, 5, 0, 0, 0);
testPriorityScenario(2, 5, 0, 0, 3);
testPriorityScenario(4, 25, 5, 1, 0); // example case.
testPriorityScenario(3, 25, 5, 1, 1); // L0 files counts for all stripes.
testPriorityScenario(3, 25, 5, 2, 0); // file to each stripe - same as one L0 file.
testPriorityScenario(2, 25, 5, 4, 0); // 1 is priority user, so 2 is returned.
testPriorityScenario(2, 25, 5, 4, 4); // don't return higher than user unless over limit.
testPriorityScenario(2, 25, 5, 1, 10); // same.
testPriorityScenario(0, 25, 5, 4, 5); // at limit.
testPriorityScenario(-5, 25, 5, 6, 0); // over limit!
testPriorityScenario(-1, 25, 0, 0, 26); // over limit with just L0
}
private void testPriorityScenario(int expectedPriority,
int limit, int stripes, int filesInStripe, int l0Files) throws Exception {
final byte[][] keys = { KEY_A, KEY_B, KEY_C, KEY_D };
assertTrue(stripes <= keys.length + 1);
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.hstore.blockingStoreFiles", limit);
StripeStoreFileManager sfm = createManager(al(), conf);
for (int i = 0; i < l0Files; ++i) {
sfm.insertNewFiles(al(createFile()));
}
for (int i = 0; i < filesInStripe; ++i) {
ArrayList<HStoreFile> stripe = new ArrayList<>();
for (int j = 0; j < stripes; ++j) {
stripe.add(createFile(
(j == 0) ? OPEN_KEY : keys[j - 1], (j == stripes - 1) ? OPEN_KEY : keys[j]));
}
sfm.addCompactionResults(al(), stripe);
}
assertEquals(expectedPriority, sfm.getStoreCompactionPriority());
}
private void verifyInvalidCompactionScenario(StripeStoreFileManager manager,
ArrayList<HStoreFile> filesToCompact, ArrayList<HStoreFile> filesToInsert) throws Exception {
Collection<HStoreFile> allFiles = manager.getStorefiles();
assertThrows(IllegalStateException.class,
() -> manager.addCompactionResults(filesToCompact, filesToInsert));
verifyAllFiles(manager, allFiles); // must have the same files.
}
private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
HStoreFile... results) throws Exception {
verifyGetOrScanScenario(manager, start, end, Arrays.asList(results));
}
private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
Collection<HStoreFile> results) throws Exception {
start = start != null ? start : HConstants.EMPTY_START_ROW;
end = end != null ? end : HConstants.EMPTY_END_ROW;
Collection<HStoreFile> sfs = manager.getFilesForScan(start, true, end, false);
assertEquals(results.size(), sfs.size());
for (HStoreFile result : results) {
assertTrue(sfs.contains(result));
}
}
private void verifyAllFiles(
StripeStoreFileManager manager, Collection<HStoreFile> results) throws Exception {
verifyGetOrScanScenario(manager, null, null, results);
}
// TODO: replace with Mockito?
private static MockHStoreFile createFile(
long size, long seqNum, byte[] startKey, byte[] endKey) throws Exception {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path testFilePath = StoreFileWriter.getUniqueFile(fs, CFDIR);
fs.create(testFilePath).close();
MockHStoreFile sf = new MockHStoreFile(TEST_UTIL, testFilePath, size, 0, false, seqNum);
if (startKey != null) {
sf.setMetadataValue(StripeStoreFileManager.STRIPE_START_KEY, startKey);
}
if (endKey != null) {
sf.setMetadataValue(StripeStoreFileManager.STRIPE_END_KEY, endKey);
}
return sf;
}
private static MockHStoreFile createFile(long size, long seqNum) throws Exception {
return createFile(size, seqNum, null, null);
}
private static MockHStoreFile createFile(byte[] startKey, byte[] endKey) throws Exception {
return createFile(0, 0, startKey, endKey);
}
private static MockHStoreFile createFile() throws Exception {
return createFile(null, null);
}
private static StripeStoreFileManager createManager() throws Exception {
return createManager(new ArrayList<>());
}
private static StripeStoreFileManager createManager(ArrayList<HStoreFile> sfs) throws Exception {
return createManager(sfs, TEST_UTIL.getConfiguration());
}
private static StripeStoreFileManager createManager(
ArrayList<HStoreFile> sfs, Configuration conf) throws Exception {
StripeStoreConfig config = new StripeStoreConfig(
conf, Mockito.mock(StoreConfigInformation.class));
StripeStoreFileManager result = new StripeStoreFileManager(CellComparatorImpl.COMPARATOR, conf,
config);
result.loadFiles(sfs);
return result;
}
private static ArrayList<HStoreFile> al(HStoreFile... sfs) {
return new ArrayList<>(Arrays.asList(sfs));
}
private static ArrayList<HStoreFile> flattenLists(ArrayList<HStoreFile>... sfls) {
ArrayList<HStoreFile> result = new ArrayList<>();
for (ArrayList<HStoreFile> sfl : sfls) {
result.addAll(sfl);
}
return result;
}
}
| apache-2.0 |
jwagenleitner/incubator-groovy | src/main/java/org/apache/groovy/ast/tools/AnnotatedNodeUtils.java | 1904 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.groovy.ast.tools;
import groovy.transform.Generated;
import org.codehaus.groovy.ast.AnnotatedNode;
import org.codehaus.groovy.ast.AnnotationNode;
import org.codehaus.groovy.ast.ClassHelper;
import org.codehaus.groovy.ast.ClassNode;
import java.util.List;
/**
* Utility class for working with AnnotatedNodes
*/
public class AnnotatedNodeUtils {
private static final ClassNode GENERATED_TYPE = ClassHelper.make(Generated.class);
private AnnotatedNodeUtils() {
}
public static void markAsGenerated(ClassNode containingClass, AnnotatedNode nodeToMark) {
boolean shouldAnnotate = containingClass.getModule() != null && containingClass.getModule().getContext() != null;
if (shouldAnnotate && !hasAnnotation(nodeToMark, GENERATED_TYPE)) {
nodeToMark.addAnnotation(new AnnotationNode(GENERATED_TYPE));
}
}
public static boolean hasAnnotation(AnnotatedNode node, ClassNode annotation) {
List annots = node.getAnnotations(annotation);
return (annots != null && !annots.isEmpty());
}
}
| apache-2.0 |
robander/dita-ot | src/test/java/org/dita/dost/exception/DITAOTXMLErrorHandlerTest.java | 1226 | /*
* This file is part of the DITA Open Toolkit project.
*
* Copyright 2011 Jarno Elovirta
*
* See the accompanying LICENSE file for applicable license.
*/
package org.dita.dost.exception;
import org.junit.Test;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.dita.dost.TestUtils.TestLogger;
import org.dita.dost.log.DITAOTLogger;
public class DITAOTXMLErrorHandlerTest {
private final DITAOTLogger logger = new TestLogger();
private final DITAOTXMLErrorHandler e = new DITAOTXMLErrorHandler("path", logger);
private final SAXParseException se = new SAXParseException("message", "publicId", "systemId", 3, 1,
new RuntimeException("msg"));
@Test
public void testDITAOTXMLErrorHandler() {
new DITAOTXMLErrorHandler("path", logger);
new DITAOTXMLErrorHandler(null, logger);
}
@Test(expected = SAXExceptionWrapper.class)
public void testError() throws SAXException {
e.error(se);
}
@Test(expected = SAXExceptionWrapper.class)
public void testFatalError() throws SAXException {
e.fatalError(se);
}
@Test
public void testWarning() throws SAXException {
e.warning(se);
}
}
| apache-2.0 |
mikeweisskopf/RxNetty | rxnetty-tcp/src/main/java/io/reactivex/netty/protocol/tcp/server/TcpServer.java | 19482 | /*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.reactivex.netty.protocol.tcp.server;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.ServerChannel;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.ssl.SslHandler;
import io.netty.util.concurrent.EventExecutorGroup;
import io.reactivex.netty.events.EventSource;
import io.reactivex.netty.protocol.tcp.server.events.TcpServerEventListener;
import io.reactivex.netty.protocol.tcp.server.events.TcpServerEventPublisher;
import io.reactivex.netty.protocol.tcp.ssl.SslCodec;
import rx.functions.Action1;
import rx.functions.Func0;
import rx.functions.Func1;
import javax.net.ssl.SSLEngine;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.concurrent.TimeUnit;
/**
* A TCP server.
*
* <h2>Immutability</h2>
* An instance of this server is immutable and all mutations produce a new server instance.
*
* @param <R> The type of objects read from this server.
* @param <W> The type of objects written to this server.
*/
public abstract class TcpServer<R, W> implements EventSource<TcpServerEventListener> {
/**
* Creates a new server instance, inheriting all configurations from this server and adding a
* {@link ChannelOption} for the server socket created by the newly created server instance.
*
* @param option Option to add.
* @param value Value for the option.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <T> TcpServer<R, W> channelOption(ChannelOption<T> option, T value);
/**
* Creates a new server instance, inheriting all configurations from this server and adding a
* {@link ChannelOption} for the client socket created by the newly created server instance.
*
* @param option Option to add.
* @param value Value for the option.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <T> TcpServer<R, W> clientChannelOption(ChannelOption<T> option, T value);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server.
* The specified handler is added at the first position of the pipeline as specified by
* {@link ChannelPipeline#addFirst(String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)}
* will be more convenient.</em>
*
* @param name Name of the handler.
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerFirst(String name, Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added at the first position of the pipeline as specified by
* {@link ChannelPipeline#addFirst(EventExecutorGroup, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be
* more convenient.</em>
*
* @param group The {@link EventExecutorGroup} which will be used to execute the {@link ChannelHandler}
* methods
* @param name The name of the handler to append
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerFirst(EventExecutorGroup group, String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added at the last position of the pipeline as specified by
* {@link ChannelPipeline#addLast(String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be
* more convenient.</em>
*
* @param name Name of the handler.
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerLast(String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added at the last position of the pipeline as specified by
* {@link ChannelPipeline#addLast(EventExecutorGroup, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be more
* convenient.</em>
*
* @param group the {@link EventExecutorGroup} which will be used to execute the {@link ChannelHandler}
* methods
* @param name the name of the handler to append
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerLast(EventExecutorGroup group, String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added before an existing handler with the passed {@code baseName} in the pipeline as specified by
* {@link ChannelPipeline#addBefore(String, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be more
* convenient.</em>
*
* @param baseName the name of the existing handler
* @param name Name of the handler.
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerBefore(String baseName, String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added before an existing handler with the passed {@code baseName} in the pipeline as specified by
* {@link ChannelPipeline#addBefore(EventExecutorGroup, String, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be more
* convenient.</em>
*
* @param group the {@link EventExecutorGroup} which will be used to execute the {@link ChannelHandler}
* methods
* @param baseName the name of the existing handler
* @param name the name of the handler to append
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerBefore(EventExecutorGroup group, String baseName,
String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added after an existing handler with the passed {@code baseName} in the pipeline as specified by
* {@link ChannelPipeline#addAfter(String, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be more
* convenient.</em>
*
* @param baseName the name of the existing handler
* @param name Name of the handler.
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerAfter(String baseName, String name,
Func0<ChannelHandler> handlerFactory);
/**
* Adds a {@link ChannelHandler} to {@link ChannelPipeline} for all connections created by this server. The specified
* handler is added after an existing handler with the passed {@code baseName} in the pipeline as specified by
* {@link ChannelPipeline#addAfter(EventExecutorGroup, String, String, ChannelHandler)}
*
* <em>For better flexibility of pipeline modification, the method {@link #pipelineConfigurator(Action1)} will be more
* convenient.</em>
*
* @param group the {@link EventExecutorGroup} which will be used to execute the {@link ChannelHandler}
* methods
* @param baseName the name of the existing handler
* @param name the name of the handler to append
* @param handlerFactory Factory to create handler instance to add.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> addChannelHandlerAfter(EventExecutorGroup group, String baseName,
String name, Func0<ChannelHandler> handlerFactory);
/**
* Creates a new server instances, inheriting all configurations from this server and using the passed
* action to configure all the connections created by the newly created server instance.
*
* @param pipelineConfigurator Action to configure {@link ChannelPipeline}.
*
* @return A new {@link TcpServer} instance.
*/
public abstract <RR, WW> TcpServer<RR, WW> pipelineConfigurator(Action1<ChannelPipeline> pipelineConfigurator);
/**
* Creates a new server instances, inheriting all configurations from this server and using the passed
* {@code sslEngineFactory} for all secured connections accepted by the newly created server instance.
*
* If the {@link SSLEngine} instance can be statically, created, {@link #secure(SSLEngine)} can be used.
*
* @param sslEngineFactory Factory for all secured connections created by the newly created server instance.
*
* @return A new {@link TcpServer} instance.
*/
public abstract TcpServer<R, W> secure(Func1<ByteBufAllocator, SSLEngine> sslEngineFactory);
/**
* Creates a new server instances, inheriting all configurations from this server and using the passed
* {@code sslEngine} for all secured connections accepted by the newly created server instance.
*
* If the {@link SSLEngine} instance can not be statically, created, {@link #secure(Func1)} )} can be used.
*
* @param sslEngine {@link SSLEngine} for all secured connections created by the newly created server instance.
*
* @return A new {@link TcpServer} instance.
*/
public abstract TcpServer<R, W> secure(SSLEngine sslEngine);
/**
* Creates a new server instances, inheriting all configurations from this server and using the passed
* {@code sslCodec} for all secured connections accepted by the newly created server instance.
*
* This is required only when the {@link SslHandler} used by {@link SslCodec} is to be modified before adding to
* the {@link ChannelPipeline}. For most of the cases, {@link #secure(Func1)} or {@link #secure(SSLEngine)} will be
* enough.
*
* @param sslCodec {@link SslCodec} for all secured connections created by the newly created server instance.
*
* @return A new {@link TcpServer} instance.
*/
public abstract TcpServer<R, W> secure(SslCodec sslCodec);
/**
* Creates a new server instances, inheriting all configurations from this server and using a self-signed
* certificate for all secured connections accepted by the newly created server instance.
*
* <b>This is only for testing and should not be used for real production servers.</b>
*
* @return A new {@link TcpServer} instance.
*/
public abstract TcpServer<R, W> unsafeSecure();
/**
* Creates a new server instances, inheriting all configurations from this server and enabling wire logging at the
* passed level for the newly created server instance.
*
* @param wireLoggingLevel Logging level at which the wire logs will be logged. The wire logging will only be done if
* logging is enabled at this level for {@link io.netty.handler.logging.LoggingHandler}
*
* @return A new {@link TcpServer} instance.
*/
public abstract TcpServer<R, W> enableWireLogging(LogLevel wireLoggingLevel);
/**
* Returns the port at which this server is running.
*
* For servers using ephemeral ports, this would return the actual port used, only after the server is started.
*
* @return The port at which this server is running.
*/
public abstract int getServerPort();
/**
* Returns the address at which this server is running.
*
* @return The address at which this server is running.
*/
public abstract SocketAddress getServerAddress();
/**
* Starts this server.
*
* @param connectionHandler Connection handler that will handle any new server connections to this server.
*
* @return This server.
*/
public abstract TcpServer<R, W> start(ConnectionHandler<R, W> connectionHandler);
/**
* Shutdown this server and waits till the server socket is closed.
*/
public abstract void shutdown();
/**
* Waits for the shutdown of this server.
*
* <b>This does not actually shutdown the server.</b> It just waits for some other action to shutdown.
*/
public abstract void awaitShutdown();
/**
* Waits for the shutdown of this server, waiting a maximum of the passed duration.
*
* <b>This does not actually shutdown the server.</b> It just waits for some other action to shutdown.
*
* @param duration Duration to wait for shutdown.
* @param timeUnit Timeunit for the duration to wait for shutdown.
*/
public abstract void awaitShutdown(long duration, TimeUnit timeUnit);
/**
* Returns the event publisher for this server.
*
* @return The event publisher for this server.
*/
public abstract TcpServerEventPublisher getEventPublisher();
/**
* Creates a new server using an ephemeral port. The port used can be queried after starting this server, using
* {@link #getServerPort()}
*
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer() {
return newServer(0);
}
/**
* Creates a new server using the passed port.
*
* @param port Port for the server. {@code 0} to use ephemeral port.
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(int port) {
return new TcpServerImpl<>(new InetSocketAddress(port));
}
/**
* Creates a new server using the passed port.
*
* @param port Port for the server. {@code 0} to use ephemeral port.
* @param eventLoopGroup Eventloop group to be used for server as well as client sockets.
* @param channelClass The class to be used for server channel.
*
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(int port, EventLoopGroup eventLoopGroup,
Class<? extends ServerChannel> channelClass) {
return newServer(port, eventLoopGroup, eventLoopGroup, channelClass);
}
/**
* Creates a new server using the passed port.
*
* @param port Port for the server. {@code 0} to use ephemeral port.
* @param acceptGroup Eventloop group to be used for server sockets.
* @param clientGroup Eventloop group to be used for client sockets.
* @param channelClass The class to be used for server channel.
*
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(int port, EventLoopGroup acceptGroup,
EventLoopGroup clientGroup,
Class<? extends ServerChannel> channelClass) {
return newServer(new InetSocketAddress(port), acceptGroup, clientGroup, channelClass);
}
/**
* Creates a new server using the passed address.
*
* @param socketAddress Socket address for the server.
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(SocketAddress socketAddress) {
return new TcpServerImpl<>(socketAddress);
}
/**
* Creates a new server using the passed address.
*
* @param socketAddress Socket address for the server.
* @param eventLoopGroup Eventloop group to be used for server as well as client sockets.
* @param channelClass The class to be used for server channel.
*
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(SocketAddress socketAddress, EventLoopGroup eventLoopGroup,
Class<? extends ServerChannel> channelClass) {
return new TcpServerImpl<>(socketAddress, eventLoopGroup, eventLoopGroup, channelClass);
}
/**
* Creates a new server using the passed address.
*
* @param socketAddress Socket address for the server.
* @param acceptGroup Eventloop group to be used for server sockets.
* @param clientGroup Eventloop group to be used for client sockets.
* @param channelClass The class to be used for server channel.
*
* @return A new {@link TcpServer}
*/
public static TcpServer<ByteBuf, ByteBuf> newServer(SocketAddress socketAddress, EventLoopGroup acceptGroup,
EventLoopGroup clientGroup,
Class<? extends ServerChannel> channelClass) {
return new TcpServerImpl<>(socketAddress, acceptGroup, clientGroup, channelClass);
}
}
| apache-2.0 |
desiderantes/jgentle | src/org/jgentleframework/context/enums/RegisterAnnotationInjecting.java | 2442 | /*
* Copyright 2007-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Project: JGentleFramework
*/
package org.jgentleframework.context.enums;
import java.lang.annotation.Annotation;
import org.jgentleframework.configure.annotation.AnnotationClass;
import org.jgentleframework.configure.annotation.AnnotationValidators;
import org.jgentleframework.configure.annotation.Bean;
import org.jgentleframework.configure.annotation.Builder;
import org.jgentleframework.configure.annotation.DefaultConstructor;
import org.jgentleframework.configure.annotation.Filter;
import org.jgentleframework.configure.annotation.Inject;
import org.jgentleframework.configure.annotation.Outject;
/**
* The Enum RegisterAnnotationInjecting.
*
* @author LE QUOC CHUNG - mailto: <a
* href="mailto:skydunkpro@yahoo.com">skydunkpro@yahoo.com</a>
* @date Oct 19, 2007
*/
public enum RegisterAnnotationInjecting {
/** The Inject. */
Inject (Inject.class),
/** The Outject. */
Outject (Outject.class),
/** The Filter. */
Filter (Filter.class),
/** The Default constructor. */
DefaultConstructor (DefaultConstructor.class),
/** The Builder. */
Builder (Builder.class),
/** The Bean. */
Bean (Bean.class),
/** The Annotation class. */
AnnotationClass (AnnotationClass.class),
/** The Annotation validator. */
AnnotationValidator (AnnotationValidators.class);
/** The annotation class. */
Class<? extends Annotation> annotationClass;
/**
* The Constructor.
*
* @param clazz
* the clazz
*/
RegisterAnnotationInjecting(Class<? extends Annotation> clazz) {
this.annotationClass = clazz;
}
/**
* Gets the annotation class.
*
* @return the annotation class
*/
@AnnotationClass
public Class<? extends Annotation> getAnnotationClass() {
return annotationClass;
}
}
| apache-2.0 |
secondsun/maven-android-plugin | src/main/java/com/simpligility/maven/plugins/android/InclusionExclusionResolver.java | 5478 | package com.simpligility.maven.plugins.android;
import java.util.Collection;
import java.util.List;
import com.android.annotations.NonNull;
import com.android.annotations.Nullable;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Splitter;
import org.apache.maven.artifact.Artifact;
import static com.google.common.collect.FluentIterable.from;
public class InclusionExclusionResolver
{
private InclusionExclusionResolver()
{
}
/**
* @param skipDependencies Skip all dependencies, but respect {@code includeArtifactTypes}
* @param includeArtifactTypes Artifact types to be always included even if {@code skipDependencies} is
* {@code true}
* @param excludeArtifactTypes Artifact types to be always excluded even if {@code skipDependencies} is
* {@code false}
* @param includeArtifactQualifiers Artifact qualifiers to be always included even if {@code skipDependencies} is
* {@code false}
* @param excludeArtifactQualifiers Artifact qualifiers to be always excluded even if {@code skipDependencies} is
* {@code true}
*/
public static Collection< Artifact > filterArtifacts( @NonNull Iterable< Artifact > artifacts,
final boolean skipDependencies, @Nullable final Collection< String > includeArtifactTypes,
@Nullable final Collection< String > excludeArtifactTypes,
@Nullable final Collection< String > includeArtifactQualifiers,
@Nullable final Collection< String > excludeArtifactQualifiers )
{
final boolean hasIncludeTypes = includeArtifactTypes != null;
final boolean hasExcludeTypes = excludeArtifactTypes != null;
final boolean hasIncludeQualifier = includeArtifactQualifiers != null;
final boolean hasExcludeQualifier = excludeArtifactQualifiers != null;
return from( artifacts )
.filter( new Predicate<Artifact>() {
@Override
public boolean apply( Artifact artifact )
{
final boolean includedByType = hasIncludeTypes
&& includeArtifactTypes.contains( artifact.getType() );
final boolean includedByQualifier = hasIncludeQualifier
&& match( artifact, includeArtifactQualifiers );
final boolean excludedByType = hasExcludeTypes
&& excludeArtifactTypes.contains( artifact.getType() );
final boolean excludedByQualifier = hasExcludeQualifier
&& match( artifact, excludeArtifactQualifiers );
if ( !skipDependencies )
{
return !excludedByType && !excludedByQualifier
|| includedByQualifier
|| includedByType && !excludedByQualifier;
}
else
{
return includedByQualifier
|| includedByType && hasExcludeQualifier && !excludedByQualifier
|| includedByType;
}
}
} )
.toSet();
}
private static boolean match( final Artifact artifact, Iterable< String > artifactQualifiers )
{
return from( artifactQualifiers )
.filter( MUST_NOT_BE_BLANK )
.anyMatch( new Predicate< String >() {
@Override
public boolean apply( String artifactQualifier )
{
return match( artifact, artifactQualifier );
}
} );
}
private static boolean match( Artifact artifact, String artifactQualifier )
{
final List< String > split = from( COLON_SPLITTER.split( artifactQualifier ) ).transform( TRIMMER ).toList();
final int count = split.size();
if ( split.isEmpty() || count > 3 )
{
throw new IllegalArgumentException( "Invalid artifact qualifier: " + artifactQualifier );
}
// check groupId
final String groupId = split.get( 0 );
if ( !groupId.equals( artifact.getGroupId() ) )
{
return false;
}
if ( count == 1 )
{
return true;
}
// check artifactId
final String artifactId = split.get( 1 );
if ( !artifactId.equals( artifact.getArtifactId() ) )
{
return false;
}
if ( count == 2 )
{
return true;
}
// check version
final String version = split.get( 2 );
return version.equals( artifact.getVersion() );
}
private static final Splitter COLON_SPLITTER = Splitter.on( ':' );
private static final Function< String, String > TRIMMER = new Function< String, String >()
{
@Override
public String apply( String value )
{
return value.trim();
}
};
private static final Predicate< String > MUST_NOT_BE_BLANK = new Predicate< String >()
{
@Override
public boolean apply( String value )
{
return !value.trim().isEmpty();
}
};
}
| apache-2.0 |
christophd/camel | dsl/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/LambdaEndpointRouteBuilder.java | 1419 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint;
import org.apache.camel.CamelContext;
import org.apache.camel.util.function.ThrowingConsumer;
/**
* Functional interface for adding routes to a context using a lambda expression. It can be used as following:
*
* <pre>
* EndpointRouteBuilder.addEndpointRoutes(context, rb ->
* rb.from(rb.direct("inbound")).bean(MyBean.class)));
* </pre>
*
* @see EndpointRouteBuilder#addEndpointRoutes(CamelContext, LambdaEndpointRouteBuilder)
*/
@FunctionalInterface
public interface LambdaEndpointRouteBuilder extends ThrowingConsumer<EndpointRouteBuilder, Exception> {
}
| apache-2.0 |
Demonsu/DogesAdventure | Doge's Adventure/Assets/Models/DynamicElements_Effects/scripts/PrefabGenerator.js | 2543 | var createThis:GameObject[]; // list of possible prefabs
private var rndNr:float; // this is for just a random number holder when we need it
var thisManyTimes:int=3;
var overThisTime:float=1.0;
var xWidth:float; // define the square where prefabs will be generated
var yWidth:float;
var zWidth:float;
var xRotMax:float; // define maximum rotation of each prefab
var yRotMax:float=180;
var zRotMax:float;
var allUseSameRotation:boolean=false;
private var allRotationDecided:boolean=false;
var detachToWorld:boolean=true;
private var x_cur:float; // these are used in the random palcement process
private var y_cur:float;
private var z_cur:float;
private var xRotCur:float; // these are used in the random protation process
private var yRotCur:float;
private var zRotCur:float;
private var timeCounter:float; // counts the time :p
private var effectCounter:int; // you will guess ti
private var trigger:float; // trigger: at which interwals should we generate a particle
function Start () {
trigger=overThisTime/thisManyTimes; //define the intervals of time of the prefab generation.
}
function Update () {
timeCounter+=Time.deltaTime;
if(timeCounter>trigger&&effectCounter<=thisManyTimes)
{
rndNr=Mathf.Floor(Random.value*createThis.length); //decide which prefab to create
x_cur=transform.position.x+(Random.value*xWidth)-(xWidth*0.5); // decide an actual place
y_cur=transform.position.y+(Random.value*yWidth)-(yWidth*0.5);
z_cur=transform.position.z+(Random.value*zWidth)-(zWidth*0.5);
if(allUseSameRotation==false||allRotationDecided==false) // basically this plays only once if allRotationDecided=true, otherwise it plays all the time
{
xRotCur=transform.rotation.x+(Random.value*xRotMax*2)-(xRotMax); // decide rotation
yRotCur=transform.rotation.y+(Random.value*yRotMax*2)-(yRotMax);
zRotCur=transform.rotation.z+(Random.value*zRotMax*2)-(zRotMax);
allRotationDecided=true;
}
//var justCreated:GameObject=Instantiate(createThis[rndNr], Vector3(x_cur, y_cur, z_cur), Vector3(xRotCur, yRotCur, zRotCur)); //create the prefab
var justCreated:GameObject=Instantiate(createThis[rndNr], Vector3(x_cur, y_cur, z_cur), transform.rotation); //create the prefab
justCreated.transform.Rotate(xRotCur, yRotCur, zRotCur);
if(detachToWorld==false) // if needed we attach the freshly generated prefab to the object that is holding this script
{
justCreated.transform.parent=transform;
}
timeCounter-=trigger; //administration :p
effectCounter+=1;
}
} | apache-2.0 |
QuantConnect/Lean | Report/ReportElements/TradesPerDayReportElement.cs | 2628 | /*
* QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
* Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System.Collections.Generic;
using System.Linq;
using QuantConnect.Orders;
using QuantConnect.Packets;
namespace QuantConnect.Report.ReportElements
{
internal sealed class TradesPerDayReportElement : ReportElement
{
private LiveResult _live;
private BacktestResult _backtest;
/// <summary>
/// Estimate the trades per day of the strategy.
/// </summary>
/// <param name="name">Name of the widget</param>
/// <param name="key">Location of injection</param>
/// <param name="backtest">Backtest result object</param>
/// <param name="live">Live result object</param>
public TradesPerDayReportElement(string name, string key, BacktestResult backtest, LiveResult live)
{
_live = live;
_backtest = backtest;
Name = name;
Key = key;
}
/// <summary>
/// Generate trades per day
/// </summary>
public override string Render()
{
var liveOrders = _live?.Orders?.Values.ToList();
if (liveOrders == null)
{
liveOrders = new List<Order>();
}
var orders = _backtest?.Orders?.Values.Concat(liveOrders).OrderBy(x => x.Time);
if (orders == null)
{
return "-";
}
if (!orders.Any())
{
return "-";
}
var days = orders.Last().Time
.Subtract(orders.First().Time)
.TotalDays;
if (days == 0)
{
days = 1;
}
var tradesPerDay = orders.Count() / days;
Result = tradesPerDay;
if (tradesPerDay > 9)
{
return $"{tradesPerDay:F0}";
}
return $"{tradesPerDay:F1}";
}
}
}
| apache-2.0 |
auduny/home-assistant | homeassistant/components/deconz/config_flow.py | 7614 | """Config flow to configure deCONZ component."""
import asyncio
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import CONF_BRIDGEID, DEFAULT_PORT, DOMAIN
CONF_SERIAL = 'serial'
@callback
def configured_gateways(hass):
"""Return a set of all configured gateways."""
return {entry.data[CONF_BRIDGEID]: entry for entry
in hass.config_entries.async_entries(DOMAIN)}
@callback
def get_master_gateway(hass):
"""Return the gateway which is marked as master."""
for gateway in hass.data[DOMAIN].values():
if gateway.master:
return gateway
@config_entries.HANDLERS.register(DOMAIN)
class DeconzFlowHandler(config_entries.ConfigFlow):
"""Handle a deCONZ config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
_hassio_discovery = None
def __init__(self):
"""Initialize the deCONZ config flow."""
self.bridges = []
self.deconz_config = {}
async def async_step_init(self, user_input=None):
"""Needed in order to not require re-translation of strings."""
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Handle a deCONZ config flow start.
If only one bridge is found go to link step.
If more than one bridge is found let user choose bridge to link.
If no bridge is found allow user to manually input configuration.
"""
from pydeconz.utils import async_discovery
if user_input is not None:
for bridge in self.bridges:
if bridge[CONF_HOST] == user_input[CONF_HOST]:
self.deconz_config = bridge
return await self.async_step_link()
self.deconz_config = user_input
return await self.async_step_link()
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.bridges = await async_discovery(session)
except asyncio.TimeoutError:
self.bridges = []
if len(self.bridges) == 1:
self.deconz_config = self.bridges[0]
return await self.async_step_link()
if len(self.bridges) > 1:
hosts = []
for bridge in self.bridges:
hosts.append(bridge[CONF_HOST])
return self.async_show_form(
step_id='init',
data_schema=vol.Schema({
vol.Required(CONF_HOST): vol.In(hosts)
})
)
return self.async_show_form(
step_id='init',
data_schema=vol.Schema({
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}),
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the deCONZ bridge."""
from pydeconz.errors import ResponseError, RequestError
from pydeconz.utils import async_get_api_key
errors = {}
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
api_key = await async_get_api_key(
session, **self.deconz_config)
except (ResponseError, RequestError, asyncio.TimeoutError):
errors['base'] = 'no_key'
else:
self.deconz_config[CONF_API_KEY] = api_key
return await self._create_entry()
return self.async_show_form(
step_id='link',
errors=errors,
)
async def _create_entry(self):
"""Create entry for gateway."""
from pydeconz.utils import async_get_bridgeid
if CONF_BRIDGEID not in self.deconz_config:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.deconz_config[CONF_BRIDGEID] = \
await async_get_bridgeid(
session, **self.deconz_config)
except asyncio.TimeoutError:
return self.async_abort(reason='no_bridges')
return self.async_create_entry(
title='deCONZ-' + self.deconz_config[CONF_BRIDGEID],
data=self.deconz_config
)
async def _update_entry(self, entry, host):
"""Update existing entry."""
entry.data[CONF_HOST] = host
self.hass.config_entries.async_update_entry(entry)
async def async_step_discovery(self, discovery_info):
"""Prepare configuration for a discovered deCONZ bridge.
This flow is triggered by the discovery component.
"""
bridgeid = discovery_info[CONF_SERIAL]
gateway_entries = configured_gateways(self.hass)
if bridgeid in gateway_entries:
entry = gateway_entries[bridgeid]
await self._update_entry(entry, discovery_info[CONF_HOST])
return self.async_abort(reason='updated_instance')
deconz_config = {
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: discovery_info[CONF_PORT],
CONF_BRIDGEID: discovery_info[CONF_SERIAL]
}
return await self.async_step_import(deconz_config)
async def async_step_import(self, import_config):
"""Import a deCONZ bridge as a config entry.
This flow is triggered by `async_setup` for configured bridges.
This flow is also triggered by `async_step_discovery`.
This will execute for any bridge that does not have a
config entry yet (based on host).
If an API key is provided, we will create an entry.
Otherwise we will delegate to `link` step which
will ask user to link the bridge.
"""
self.deconz_config = import_config
if CONF_API_KEY not in import_config:
return await self.async_step_link()
return await self._create_entry()
async def async_step_hassio(self, user_input=None):
"""Prepare configuration for a Hass.io deCONZ bridge.
This flow is triggered by the discovery component.
"""
bridgeid = user_input[CONF_SERIAL]
gateway_entries = configured_gateways(self.hass)
if bridgeid in gateway_entries:
entry = gateway_entries[bridgeid]
await self._update_entry(entry, user_input[CONF_HOST])
return self.async_abort(reason='updated_instance')
self._hassio_discovery = user_input
return await self.async_step_hassio_confirm()
async def async_step_hassio_confirm(self, user_input=None):
"""Confirm a Hass.io discovery."""
if user_input is not None:
self.deconz_config = {
CONF_HOST: self._hassio_discovery[CONF_HOST],
CONF_PORT: self._hassio_discovery[CONF_PORT],
CONF_BRIDGEID: self._hassio_discovery[CONF_SERIAL],
CONF_API_KEY: self._hassio_discovery[CONF_API_KEY]
}
return await self._create_entry()
return self.async_show_form(
step_id='hassio_confirm',
description_placeholders={
'addon': self._hassio_discovery['addon']
}
)
| apache-2.0 |
nissSK/Prebid.js | modules/admaticBidAdapter.js | 4504 | import * as utils from 'src/utils';
import { registerBidder } from 'src/adapters/bidderFactory';
const BIDDER_CODE = 'admatic';
const ENDPOINT_URL = '//ads4.admatic.com.tr/prebid/v3/bidrequest';
export const spec = {
code: BIDDER_CODE,
aliases: ['admatic'], // short code
/**
* Determines whether or not the given bid request is valid.
*
* @param {BidRequest} bid The bid params to validate.
* @return boolean True if this is a valid bid, and false otherwise.
*/
isBidRequestValid: function (bid) {
return !!(bid.params.pid && bid.params.wid && bid.params.url);
},
/**
* Make a server request from the list of BidRequests.
*
* @param {validBidRequests[]} - an array of bids
* @return ServerRequest Info describing the request to the server.
*/
buildRequests: function (validBidRequests) {
const payload = {
request: []
};
for (var i = 0; i < validBidRequests.length; i++) {
var validBidRequest = validBidRequests[i];
payload.auctionId = validBidRequest.auctionId;
payload.bidder = validBidRequest.bidder;
payload.bidderRequestId = validBidRequest.bidderRequestId;
payload.pid = validBidRequest.params.pid;
payload.wid = validBidRequest.params.wid;
payload.url = validBidRequest.params.url;
var request = {
adUnitCode: validBidRequest.adUnitCode,
bidId: validBidRequest.bidId,
transactionId: validBidRequest.transactionId,
priceType: validBidRequest.params.priceType,
sizes: transformSizes(validBidRequest.sizes)
}
payload.request.push(request);
}
const payloadString = JSON.stringify(payload);
return {
method: 'POST',
url: ENDPOINT_URL,
data: payloadString,
bidder: 'admatic',
bids: validBidRequests
};
},
/**
* Unpack the response from the server into a list of bids.
*
* @param {ServerResponse} serverResponse A successful response from the server.
* @return {Bid[]} An array of bids which were nested inside the server.
*/
interpretResponse: function (serverResponse, bidRequest) {
const serverBody = serverResponse.body;
const bidResponses = [];
if (serverBody) {
if (serverBody.tags && serverBody.tags.length > 0) {
serverBody.tags.forEach(serverBid => {
if (serverBid != null) {
if (serverBid.cpm !== 0) {
const bidResponse = {
requestId: serverBid.bidId,
cpm: serverBid.cpm,
width: serverBid.width,
height: serverBid.height,
creativeId: serverBid.creativeId,
dealId: serverBid.dealId,
currency: serverBid.currency,
netRevenue: serverBid.netRevenue,
ttl: serverBid.ttl,
referrer: serverBid.referrer,
ad: serverBid.ad
};
bidResponses.push(bidResponse);
}
}
});
}
}
return bidResponses;
},
/**
* Register the user sync pixels which should be dropped after the auction.
*
* @param {SyncOptions} syncOptions Which user syncs are allowed?
* @param {ServerResponse[]} serverResponses List of server's responses.
* @return {UserSync[]} The user syncs which should be dropped.
*/
getUserSyncs: function (syncOptions, serverResponses) {
const syncs = [];
if (syncOptions.iframeEnabled) {
syncs.push({
type: 'iframe',
url: '//ads4.admatic.com.tr/prebid/static/usersync/v3/async_usersync.html'
});
}
if (syncOptions.pixelEnabled && serverResponses.length > 0) {
syncs.push({
type: 'image',
url: 'https://ads5.admatic.com.tr/prebid/v3/bidrequest/usersync'
});
}
return syncs;
}
}
/* Turn bid request sizes into ut-compatible format */
function transformSizes(requestSizes) {
let sizes = [];
let sizeObj = {};
if (utils.isArray(requestSizes) && requestSizes.length === 2 && !utils.isArray(requestSizes[0])) {
sizeObj.width = parseInt(requestSizes[0], 10);
sizeObj.height = parseInt(requestSizes[1], 10);
sizes.push(sizeObj);
} else if (typeof requestSizes === 'object') {
for (let i = 0; i < requestSizes.length; i++) {
let size = requestSizes[i];
sizeObj = {};
sizeObj.width = parseInt(size[0], 10);
sizeObj.height = parseInt(size[1], 10);
sizes.push(sizeObj);
}
}
return sizes;
}
registerBidder(spec);
| apache-2.0 |
reaction1989/roslyn | src/Workspaces/SharedUtilitiesAndExtensions/Compiler/Core/Utilities/EditorConfigFileGenerator_NamingStyles.cs | 6451 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#nullable enable
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Text;
using Microsoft.CodeAnalysis.CodeStyle;
using Microsoft.CodeAnalysis.Diagnostics;
using Microsoft.CodeAnalysis.Diagnostics.Analyzers.NamingStyles;
namespace Microsoft.CodeAnalysis.Options
{
internal static partial class EditorConfigFileGenerator
{
public static void AppendNamingStylePreferencesToEditorConfig(NamingStylePreferences namingStylePreferences, string language, StringBuilder editorconfig)
{
editorconfig.AppendLine($"#### {CompilerExtensionsResources.Naming_styles} ####");
var serializedNameMap = AssignNamesToNamingStyleElements(namingStylePreferences);
var ruleNameMap = AssignNamesToNamingStyleRules(namingStylePreferences, serializedNameMap);
var referencedElements = new HashSet<Guid>();
editorconfig.AppendLine();
editorconfig.AppendLine($"# {CompilerExtensionsResources.Naming_rules}");
foreach (var namingRule in namingStylePreferences.NamingRules)
{
referencedElements.Add(namingRule.SymbolSpecificationID);
referencedElements.Add(namingRule.NamingStyleID);
editorconfig.AppendLine();
editorconfig.AppendLine($"dotnet_naming_rule.{ruleNameMap[namingRule]}.severity = {namingRule.EnforcementLevel.ToNotificationOption(defaultSeverity: DiagnosticSeverity.Hidden).ToEditorConfigString()}");
editorconfig.AppendLine($"dotnet_naming_rule.{ruleNameMap[namingRule]}.symbols = {serializedNameMap[namingRule.SymbolSpecificationID]}");
editorconfig.AppendLine($"dotnet_naming_rule.{ruleNameMap[namingRule]}.style = {serializedNameMap[namingRule.NamingStyleID]}");
}
editorconfig.AppendLine();
editorconfig.AppendLine($"# {CompilerExtensionsResources.Symbol_specifications}");
foreach (var symbolSpecification in namingStylePreferences.SymbolSpecifications)
{
if (!referencedElements.Contains(symbolSpecification.ID))
{
continue;
}
editorconfig.AppendLine();
editorconfig.AppendLine($"dotnet_naming_symbols.{serializedNameMap[symbolSpecification.ID]}.applicable_kinds = {symbolSpecification.ApplicableSymbolKindList.ToEditorConfigString()}");
editorconfig.AppendLine($"dotnet_naming_symbols.{serializedNameMap[symbolSpecification.ID]}.applicable_accessibilities = {symbolSpecification.ApplicableAccessibilityList.ToEditorConfigString(language)}");
editorconfig.AppendLine($"dotnet_naming_symbols.{serializedNameMap[symbolSpecification.ID]}.required_modifiers = {symbolSpecification.RequiredModifierList.ToEditorConfigString(language)}");
}
editorconfig.AppendLine();
editorconfig.AppendLine($"# {CompilerExtensionsResources.Naming_styles}");
foreach (var namingStyle in namingStylePreferences.NamingStyles)
{
if (!referencedElements.Contains(namingStyle.ID))
{
continue;
}
editorconfig.AppendLine();
editorconfig.AppendLine($"dotnet_naming_style.{serializedNameMap[namingStyle.ID]}.required_prefix = {namingStyle.Prefix}");
editorconfig.AppendLine($"dotnet_naming_style.{serializedNameMap[namingStyle.ID]}.required_suffix = {namingStyle.Suffix}");
editorconfig.AppendLine($"dotnet_naming_style.{serializedNameMap[namingStyle.ID]}.word_separator = {namingStyle.WordSeparator}");
editorconfig.AppendLine($"dotnet_naming_style.{serializedNameMap[namingStyle.ID]}.capitalization = {namingStyle.CapitalizationScheme.ToEditorConfigString()}");
}
}
private static ImmutableDictionary<Guid, string> AssignNamesToNamingStyleElements(NamingStylePreferences namingStylePreferences)
{
var symbolSpecificationNames = new HashSet<string>();
var builder = ImmutableDictionary.CreateBuilder<Guid, string>();
foreach (var symbolSpecification in namingStylePreferences.SymbolSpecifications)
{
var name = ToSnakeCaseName(symbolSpecification.Name);
if (!symbolSpecificationNames.Add(name))
{
name = symbolSpecification.ID.ToString("n");
}
builder.Add(symbolSpecification.ID, name);
}
var namingStyleNames = new HashSet<string>();
foreach (var namingStyle in namingStylePreferences.NamingStyles)
{
var name = ToSnakeCaseName(namingStyle.Name);
if (!namingStyleNames.Add(name))
{
name = namingStyle.ID.ToString("n");
}
builder.Add(namingStyle.ID, name);
}
return builder.ToImmutable();
static string ToSnakeCaseName(string name)
{
return new string(name
.Select(ch =>
{
if (char.IsLetterOrDigit(ch))
{
return char.ToLowerInvariant(ch);
}
else
{
return '_';
}
})
.ToArray());
}
}
private static ImmutableDictionary<SerializableNamingRule, string> AssignNamesToNamingStyleRules(NamingStylePreferences namingStylePreferences, ImmutableDictionary<Guid, string> serializedNameMap)
{
var builder = ImmutableDictionary.CreateBuilder<SerializableNamingRule, string>();
foreach (var rule in namingStylePreferences.NamingRules)
{
builder.Add(rule, $"{serializedNameMap[rule.SymbolSpecificationID]}_should_be_{serializedNameMap[rule.NamingStyleID]}");
}
return builder.ToImmutable();
}
}
}
| apache-2.0 |
lincoln-lil/flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecExpand.java | 4352 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.exec.common;
import org.apache.flink.api.dag.Transformation;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.planner.codegen.CodeGeneratorContext;
import org.apache.flink.table.planner.codegen.ExpandCodeGenerator;
import org.apache.flink.table.planner.delegation.PlannerBase;
import org.apache.flink.table.planner.plan.nodes.exec.ExecEdge;
import org.apache.flink.table.planner.plan.nodes.exec.ExecNode;
import org.apache.flink.table.planner.plan.nodes.exec.ExecNodeBase;
import org.apache.flink.table.planner.plan.nodes.exec.InputProperty;
import org.apache.flink.table.planner.plan.nodes.exec.SingleTransformationTranslator;
import org.apache.flink.table.planner.plan.nodes.exec.utils.ExecNodeUtil;
import org.apache.flink.table.runtime.operators.CodeGenOperatorFactory;
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.calcite.rex.RexNode;
import java.util.List;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Base {@link ExecNode} that can expand one row to multiple rows based on given projects. */
public abstract class CommonExecExpand extends ExecNodeBase<RowData>
implements SingleTransformationTranslator<RowData> {
public static final String FIELD_NAME_PROJECTS = "projects";
@JsonProperty(FIELD_NAME_PROJECTS)
private final List<List<RexNode>> projects;
@JsonIgnore private final boolean retainHeader;
public CommonExecExpand(
List<List<RexNode>> projects,
boolean retainHeader,
int id,
List<InputProperty> inputProperties,
RowType outputType,
String description) {
super(id, inputProperties, outputType, description);
checkArgument(inputProperties.size() == 1);
this.projects = checkNotNull(projects);
checkArgument(
projects.size() > 0
&& projects.get(0).size() > 0
&& projects.stream().map(List::size).distinct().count() == 1);
this.retainHeader = retainHeader;
}
@SuppressWarnings("unchecked")
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform =
(Transformation<RowData>) inputEdge.translateToPlan(planner);
final CodeGenOperatorFactory<RowData> operatorFactory =
ExpandCodeGenerator.generateExpandOperator(
new CodeGeneratorContext(planner.getTableConfig()),
(RowType) inputEdge.getOutputType(),
(RowType) getOutputType(),
projects,
retainHeader,
getClass().getSimpleName());
return ExecNodeUtil.createOneInputTransformation(
inputTransform,
getOperatorName(planner.getTableConfig()),
getOperatorDescription(planner.getTableConfig()),
operatorFactory,
InternalTypeInfo.of(getOutputType()),
inputTransform.getParallelism());
}
}
| apache-2.0 |