code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
//
// TcpRequestChannel.cs
//
// Author:
// Atsushi Enomoto <atsushi@ximian.com>
//
// Copyright (C) 2009 Novell, Inc. http://www.novell.com
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Net.Sockets;
using System.ServiceModel;
using System.ServiceModel.Description;
using System.Threading;
using System.Xml;
namespace System.ServiceModel.Channels.NetTcp
{
internal class TcpRequestChannel : RequestChannelBase
{
TcpChannelInfo info;
TcpClient client;
TcpBinaryFrameManager frame;
public TcpRequestChannel (ChannelFactoryBase factory, TcpChannelInfo info, EndpointAddress address, Uri via)
: base (factory, address, via)
{
this.info = info;
}
public MessageEncoder Encoder {
get { return info.MessageEncoder; }
}
protected override void OnAbort ()
{
OnClose (TimeSpan.Zero);
}
protected override void OnClose (TimeSpan timeout)
{
if (client != null)
client.Close ();
}
protected override void OnOpen (TimeSpan timeout)
{
CreateClient (timeout);
}
void CreateClient (TimeSpan timeout)
{
int explicitPort = Via.Port;
client = new TcpClient (Via.Host, explicitPort <= 0 ? TcpTransportBindingElement.DefaultPort : explicitPort);
NetworkStream ns = client.GetStream ();
frame = new TcpBinaryFrameManager (TcpBinaryFrameManager.SingletonUnsizedMode, ns, false) {
Encoder = this.Encoder,
Via = this.Via };
}
public override Message Request (Message input, TimeSpan timeout)
{
DateTime start = DateTime.Now;
// FIXME: use timeouts.
frame.ProcessPreambleInitiator ();
frame.ProcessPreambleAckInitiator ();
if (input.Headers.To == null)
input.Headers.To = RemoteAddress.Uri;
if (input.Headers.MessageId == null)
input.Headers.MessageId = new UniqueId ();
Logger.LogMessage (MessageLogSourceKind.TransportSend, ref input, int.MaxValue); // It is not a receive buffer
frame.WriteUnsizedMessage (input, timeout - (DateTime.Now - start));
// LAMESPEC: it contradicts the protocol described at section 3.1.1.1.1 in [MC-NMF].
// Moving this WriteEndRecord() after ReadUnsizedMessage() causes TCP connection blocking.
frame.WriteEndRecord ();
var ret = frame.ReadUnsizedMessage (timeout - (DateTime.Now - start));
Logger.LogMessage (MessageLogSourceKind.TransportReceive, ref ret, info.BindingElement.MaxReceivedMessageSize);
frame.ReadEndRecord (); // both
return ret;
}
}
}
| edwinspire/VSharp | class/System.ServiceModel/System.ServiceModel.Channels.NetTcp/TcpRequestChannel.cs | C# | lgpl-3.0 | 3,556 |
/* -*-c++-*- */
/* osgEarth - Dynamic map generation toolkit for OpenSceneGraph
* Copyright 2008-2010 Pelican Mapping
* http://osgearth.org
*
* osgEarth is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>
*/
#include <osgEarth/SparseTexture2DArray>
// this class is only supported in newer OSG versions.
#if OSG_VERSION_GREATER_OR_EQUAL( 2, 9, 8 )
using namespace osgEarth;
int
SparseTexture2DArray::firstValidImageIndex() const
{
for( int i=0; i<_images.size(); ++i )
if ( _images[i].valid() )
return i;
return -1;
}
osg::Image*
SparseTexture2DArray::firstValidImage() const
{
int i = firstValidImageIndex();
return i >= 0 ? _images[i].get() : 0L;
}
void
SparseTexture2DArray::computeInternalFormat() const
{
osg::Image* image = firstValidImage();
if ( image )
computeInternalFormatWithImage( *image );
else
computeInternalFormatType();
}
void
SparseTexture2DArray::apply( osg::State& state ) const
{
// get the contextID (user defined ID of 0 upwards) for the
// current OpenGL context.
const unsigned int contextID = state.getContextID();
Texture::TextureObjectManager* tom = Texture::getTextureObjectManager(contextID).get();
//ElapsedTime elapsedTime(&(tom->getApplyTime()));
tom->getNumberApplied()++;
const Extensions* extensions = getExtensions(contextID,true);
// if not supported, then return
if (!extensions->isTexture2DArraySupported() || !extensions->isTexture3DSupported())
{
OSG_WARN<<"Warning: Texture2DArray::apply(..) failed, 2D texture arrays are not support by OpenGL driver."<<std::endl;
return;
}
// get the texture object for the current contextID.
TextureObject* textureObject = getTextureObject(contextID);
if (textureObject && _textureDepth>0)
{
const osg::Image* image = firstValidImage();
if (image && getModifiedCount(0, contextID) != image->getModifiedCount())
{
// compute the internal texture format, this set the _internalFormat to an appropriate value.
computeInternalFormat();
GLsizei new_width, new_height, new_numMipmapLevels;
// compute the dimensions of the texture.
computeRequiredTextureDimensions(state, *image, new_width, new_height, new_numMipmapLevels);
if (!textureObject->match(GL_TEXTURE_2D_ARRAY_EXT, new_numMipmapLevels, _internalFormat, new_width, new_height, 1, _borderWidth))
{
Texture::releaseTextureObject(contextID, _textureObjectBuffer[contextID].get());
_textureObjectBuffer[contextID] = 0;
textureObject = 0;
}
}
}
// if we already have an texture object, then
if (textureObject)
{
// bind texture object
textureObject->bind();
// if texture parameters changed, then reset them
if (getTextureParameterDirty(state.getContextID())) applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT,state);
// if subload is specified, then use it to subload the images to GPU memory
//if (_subloadCallback.valid())
//{
// _subloadCallback->subload(*this,state);
//}
//else
{
// for each image of the texture array do
for (GLsizei n=0; n < _textureDepth; n++)
{
osg::Image* image = _images[n].get();
// if image content is modified, then upload it to the GPU memory
// GW: this means we have to "dirty" an image before setting it!
if (image && getModifiedCount(n,contextID) != image->getModifiedCount())
{
applyTexImage2DArray_subload(state, image, _textureWidth, _textureHeight, n, _internalFormat, _numMipmapLevels);
getModifiedCount(n,contextID) = image->getModifiedCount();
}
}
}
}
// nothing before, but we have valid images, so do manual upload and create texture object manually
else if ( firstValidImage() != 0L ) // if (imagesValid())
{
// compute the internal texture format, this set the _internalFormat to an appropriate value.
computeInternalFormat();
// compute the dimensions of the texture.
osg::Image* firstImage = firstValidImage();
computeRequiredTextureDimensions(state, *firstImage, _textureWidth, _textureHeight, _numMipmapLevels);
// create texture object
textureObject = generateTextureObject(
this, contextID,GL_TEXTURE_2D_ARRAY_EXT,_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0);
// bind texture
textureObject->bind();
applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT, state);
_textureObjectBuffer[contextID] = textureObject;
// First we need to allocate the texture memory
int sourceFormat = _sourceFormat ? _sourceFormat : _internalFormat;
if( isCompressedInternalFormat( sourceFormat ) &&
sourceFormat == _internalFormat &&
extensions->isCompressedTexImage3DSupported() )
{
extensions->glCompressedTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat,
_textureWidth, _textureHeight, _textureDepth, _borderWidth,
firstImage->getImageSizeInBytes() * _textureDepth,
0);
}
else
{
// Override compressed source format with safe GL_RGBA value which not generate error
// We can safely do this as source format is not important when source data is NULL
if( isCompressedInternalFormat( sourceFormat ) )
sourceFormat = GL_RGBA;
extensions->glTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat,
_textureWidth, _textureHeight, _textureDepth, _borderWidth,
sourceFormat, _sourceType ? _sourceType : GL_UNSIGNED_BYTE,
0);
}
// For certain we have to manually allocate memory for mipmaps if images are compressed
// if not allocated OpenGL will produce errors on mipmap upload.
// I have not tested if this is neccessary for plain texture formats but
// common sense suggests its required as well.
if( _min_filter != LINEAR && _min_filter != NEAREST && firstImage->isMipmap() )
allocateMipmap( state );
// now for each layer we upload it into the memory
for (GLsizei n=0; n<_textureDepth; n++)
{
// if image is valid then upload it to the texture memory
osg::Image* image = _images[n].get();
if (image)
{
// now load the image data into the memory, this will also check if image do have valid properties
applyTexImage2DArray_subload(state, image, _textureWidth, _textureHeight, n, _internalFormat, _numMipmapLevels);
getModifiedCount(n,contextID) = image->getModifiedCount();
}
}
const Texture::Extensions* texExtensions = Texture::getExtensions(contextID,true);
// source images have no mipmamps but we could generate them...
if( _min_filter != LINEAR && _min_filter != NEAREST && !firstImage->isMipmap() &&
_useHardwareMipMapGeneration && texExtensions->isGenerateMipMapSupported() )
{
_numMipmapLevels = osg::Image::computeNumberOfMipmapLevels( _textureWidth, _textureHeight );
generateMipmap( state );
}
textureObject->setAllocated(_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0);
// unref image data?
if (isSafeToUnrefImageData(state))
{
SparseTexture2DArray* non_const_this = const_cast<SparseTexture2DArray*>(this);
for (int n=0; n<_textureDepth; n++)
{
if (_images[n].valid() && _images[n]->getDataVariance()==STATIC)
{
non_const_this->_images[n] = NULL;
}
}
}
}
// No images present, but dimensions are set. So create empty texture
else if ( (_textureWidth > 0) && (_textureHeight > 0) && (_textureDepth > 0) && (_internalFormat!=0) )
{
// generate texture
_textureObjectBuffer[contextID] = textureObject = generateTextureObject(
this, contextID, GL_TEXTURE_2D_ARRAY_EXT,_numMipmapLevels,_internalFormat,_textureWidth,_textureHeight,_textureDepth,0);
textureObject->bind();
applyTexParameters(GL_TEXTURE_2D_ARRAY_EXT,state);
extensions->glTexImage3D( GL_TEXTURE_2D_ARRAY_EXT, 0, _internalFormat,
_textureWidth, _textureHeight, _textureDepth,
_borderWidth,
_sourceFormat ? _sourceFormat : _internalFormat,
_sourceType ? _sourceType : GL_UNSIGNED_BYTE,
0);
}
// nothing before, so just unbind the texture target
else
{
glBindTexture( GL_TEXTURE_2D_ARRAY_EXT, 0 );
}
// if texture object is now valid and we have to allocate mipmap levels, then
if (textureObject != 0 && _texMipmapGenerationDirtyList[contextID])
{
generateMipmap(state);
}
}
// replaces the same func in the superclass
void
SparseTexture2DArray::applyTexImage2DArray_subload(osg::State& state, osg::Image* image,
GLsizei inwidth, GLsizei inheight, GLsizei indepth,
GLint inInternalFormat, GLsizei& numMipmapLevels) const
{
//// if we don't have a valid image we can't create a texture!
//if (!imagesValid())
// return;
// get the contextID (user defined ID of 0 upwards) for the
// current OpenGL context.
const unsigned int contextID = state.getContextID();
const Extensions* extensions = getExtensions(contextID,true);
const Texture::Extensions* texExtensions = Texture::getExtensions(contextID,true);
GLenum target = GL_TEXTURE_2D_ARRAY_EXT;
// compute the internal texture format, this set the _internalFormat to an appropriate value.
computeInternalFormat();
// select the internalFormat required for the texture.
// bool compressed = isCompressedInternalFormat(_internalFormat);
bool compressed_image = isCompressedInternalFormat((GLenum)image->getPixelFormat());
// if the required layer is exceeds the maximum allowed layer sizes
if (indepth > extensions->maxLayerCount())
{
// we give a warning and do nothing
OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) the given layer number exceeds the maximum number of supported layers."<<std::endl;
return;
}
//Rescale if resize hint is set or NPOT not supported or dimensions exceed max size
if( _resizeNonPowerOfTwoHint || !texExtensions->isNonPowerOfTwoTextureSupported(_min_filter)
|| inwidth > extensions->max2DSize()
|| inheight > extensions->max2DSize())
image->ensureValidSizeForTexturing(extensions->max2DSize());
// image size or format has changed, this is not allowed, hence return
if (image->s()!=inwidth ||
image->t()!=inheight ||
image->getInternalTextureFormat()!=inInternalFormat )
{
OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) given image do have wrong dimension or internal format."<<std::endl;
return;
}
glPixelStorei(GL_UNPACK_ALIGNMENT,image->getPacking());
bool useHardwareMipmapGeneration =
!image->isMipmap() && _useHardwareMipMapGeneration && texExtensions->isGenerateMipMapSupported();
// if no special mipmapping is required, then
if( _min_filter == LINEAR || _min_filter == NEAREST || useHardwareMipmapGeneration )
{
if( _min_filter == LINEAR || _min_filter == NEAREST )
numMipmapLevels = 1;
else //Hardware Mipmap Generation
numMipmapLevels = image->getNumMipmapLevels();
// upload non-compressed image
if ( !compressed_image )
{
extensions->glTexSubImage3D( target, 0,
0, 0, indepth,
inwidth, inheight, 1,
(GLenum)image->getPixelFormat(),
(GLenum)image->getDataType(),
image->data() );
}
// if we support compression and image is compressed, then
else if (extensions->isCompressedTexImage3DSupported())
{
// OSG_WARN<<"glCompressedTexImage3D "<<inwidth<<", "<<inheight<<", "<<indepth<<std::endl;
GLint blockSize, size;
getCompressedSize(_internalFormat, inwidth, inheight, 1, blockSize,size);
extensions->glCompressedTexSubImage3D(target, 0,
0, 0, indepth,
inwidth, inheight, 1,
(GLenum)image->getPixelFormat(),
size,
image->data());
}
// we want to use mipmapping, so enable it
}else
{
// image does not provide mipmaps, so we have to create them
if( !image->isMipmap() )
{
numMipmapLevels = 1;
OSG_WARN<<"Warning: Texture2DArray::applyTexImage2DArray_subload(..) mipmap layer not passed, and auto mipmap generation turned off or not available. Check texture's min/mag filters & hardware mipmap generation."<<std::endl;
// the image object does provide mipmaps, so upload the in the certain levels of a layer
}else
{
numMipmapLevels = image->getNumMipmapLevels();
int width = image->s();
int height = image->t();
if( !compressed_image )
{
for( GLsizei k = 0 ; k < numMipmapLevels && (width || height ) ;k++)
{
if (width == 0)
width = 1;
if (height == 0)
height = 1;
extensions->glTexSubImage3D( target, k, 0, 0, indepth,
width, height, 1,
(GLenum)image->getPixelFormat(),
(GLenum)image->getDataType(),
image->getMipmapData(k));
width >>= 1;
height >>= 1;
}
}
else if (extensions->isCompressedTexImage3DSupported())
{
GLint blockSize,size;
for( GLsizei k = 0 ; k < numMipmapLevels && (width || height) ;k++)
{
if (width == 0)
width = 1;
if (height == 0)
height = 1;
getCompressedSize(image->getInternalTextureFormat(), width, height, 1, blockSize,size);
// state.checkGLErrors("before extensions->glCompressedTexSubImage3D(");
extensions->glCompressedTexSubImage3D(target, k, 0, 0, indepth,
width, height, 1,
(GLenum)image->getPixelFormat(),
size,
image->getMipmapData(k));
// state.checkGLErrors("after extensions->glCompressedTexSubImage3D(");
width >>= 1;
height >>= 1;
}
}
}
}
}
#endif // OSG_VERSION_GREATER_OR_EQUAL( 2, 9, 8 )
| airwzz999/osgearth-for-android | src/osgEarth/SparseTexture2DArray.cpp | C++ | lgpl-3.0 | 16,225 |
/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2017 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/
/*! \file
\ingroup CCTRIPLES
\brief Enter brief description of file here
*/
#include <cstdio>
#include <cstdlib>
#include "psi4/libciomr/libciomr.h"
#include "psi4/libpsi4util/exception.h"
#include "psi4/psifiles.h"
namespace psi { namespace cctriples {
void cache_abcd_rhf(int **cachelist);
void cache_iabc_rhf(int **cachelist);
void cache_ijab_rhf(int **cachelist);
void cache_iajb_rhf(int **cachelist);
void cache_ijka_rhf(int **cachelist);
void cache_ijkl_rhf(int **cachelist);
void cache_abcd_uhf(int **cachelist);
void cache_iabc_uhf(int **cachelist);
void cache_ijab_uhf(int **cachelist);
void cache_iajb_uhf(int **cachelist);
void cache_ijka_uhf(int **cachelist);
void cache_ijkl_uhf(int **cachelist);
int **cacheprep_uhf(int level, int *cachefiles)
{
int **cachelist;
/* The listing of CC files whose entries may be cached */
cachefiles[PSIF_CC_AINTS] = 1;
cachefiles[PSIF_CC_CINTS] = 1;
cachefiles[PSIF_CC_DINTS] = 1;
cachefiles[PSIF_CC_EINTS] = 1;
cachefiles[PSIF_CC_DENOM] = 1;
cachefiles[PSIF_CC_TAMPS] = 1;
cachefiles[PSIF_CC_LAMPS] = 1;
cachefiles[PSIF_CC_HBAR] = 1;
/* The listing of DPD patterns which may be cached */
cachelist = init_int_matrix(32,32);
if(level == 0) return cachelist;
else if(level == 1) {
/*** Cache oooo and ooov ***/
cache_ijkl_uhf(cachelist);
cache_ijka_uhf(cachelist);
return cachelist;
}
else if(level == 2) {
/*** Cache oooo, ooov, oovv, and ovov ***/
cache_ijkl_uhf(cachelist);
cache_ijka_uhf(cachelist);
cache_ijab_uhf(cachelist);
cache_iajb_uhf(cachelist);
return cachelist;
}
else if(level == 3) {
/*** Cache, oooo, oov, oovv, ovov, and ovvv ***/
cache_ijkl_uhf(cachelist);
cache_ijka_uhf(cachelist);
cache_ijab_uhf(cachelist);
cache_iajb_uhf(cachelist);
cache_iabc_uhf(cachelist);
return cachelist;
}
else if(level == 4) {
/*** Cache everything ***/
cache_ijkl_uhf(cachelist);
cache_ijka_uhf(cachelist);
cache_ijab_uhf(cachelist);
cache_iajb_uhf(cachelist);
cache_iabc_uhf(cachelist);
cache_abcd_uhf(cachelist);
return cachelist;
}
else {
throw PsiException("CCTRIPLES error", __FILE__, __LINE__);
}
}
int **cacheprep_rhf(int level, int *cachefiles)
{
int **cachelist;
/* The listing of CC files whose entries may be cached */
cachefiles[PSIF_CC_AINTS] = 1;
cachefiles[PSIF_CC_CINTS] = 1;
cachefiles[PSIF_CC_DINTS] = 1;
cachefiles[PSIF_CC_EINTS] = 1;
cachefiles[PSIF_CC_DENOM] = 1;
cachefiles[PSIF_CC_TAMPS] = 1;
cachefiles[PSIF_CC_LAMPS] = 1;
cachefiles[PSIF_CC_HBAR] = 1;
/* The listing of DPD patterns which may be cached */
cachelist = init_int_matrix(12,12);
if(level == 0) return cachelist;
else if(level == 1) {
/*** Cache oooo and ooov ***/
cache_ijkl_rhf(cachelist);
cache_ijka_rhf(cachelist);
return cachelist;
}
else if(level == 2) {
/*** Cache oooo, ooov, oovv, and ovov ***/
cache_ijkl_rhf(cachelist);
cache_ijka_rhf(cachelist);
cache_ijab_rhf(cachelist);
cache_iajb_rhf(cachelist);
return cachelist;
}
else if(level == 3) {
/*** Cache, oooo, oov, oovv, ovov, and ovvv ***/
cache_ijkl_rhf(cachelist);
cache_ijka_rhf(cachelist);
cache_ijab_rhf(cachelist);
cache_iajb_rhf(cachelist);
cache_iabc_rhf(cachelist);
return cachelist;
}
else if(level == 4) {
/*** Cache everything ***/
cache_ijkl_rhf(cachelist);
cache_ijka_rhf(cachelist);
cache_ijab_rhf(cachelist);
cache_iajb_rhf(cachelist);
cache_iabc_rhf(cachelist);
cache_abcd_rhf(cachelist);
return cachelist;
}
else {
throw PsiException("CCTRIPLES error", __FILE__,__LINE__);
}
}
void cache_abcd_uhf(int **cachelist)
{
/* <ab|cd> */
cachelist[5][5] = 1;
cachelist[5][6] = 1;
cachelist[5][7] = 1;
cachelist[5][8] = 1;
cachelist[5][9] = 1;
cachelist[6][5] = 1;
cachelist[6][6] = 1;
cachelist[6][7] = 1;
cachelist[6][8] = 1;
cachelist[6][9] = 1;
cachelist[7][5] = 1;
cachelist[7][6] = 1;
cachelist[7][7] = 1;
cachelist[7][8] = 1;
cachelist[7][9] = 1;
cachelist[8][5] = 1;
cachelist[8][6] = 1;
cachelist[8][7] = 1;
cachelist[8][8] = 1;
cachelist[8][9] = 1;
cachelist[9][5] = 1;
cachelist[9][6] = 1;
cachelist[9][7] = 1;
cachelist[9][8] = 1;
cachelist[9][9] = 1;
/* <AB|CD> */
cachelist[15][15] = 1;
cachelist[15][16] = 1;
cachelist[15][17] = 1;
cachelist[15][18] = 1;
cachelist[15][19] = 1;
cachelist[16][15] = 1;
cachelist[16][16] = 1;
cachelist[16][17] = 1;
cachelist[16][18] = 1;
cachelist[16][19] = 1;
cachelist[17][15] = 1;
cachelist[17][16] = 1;
cachelist[17][17] = 1;
cachelist[17][18] = 1;
cachelist[17][19] = 1;
cachelist[18][15] = 1;
cachelist[18][16] = 1;
cachelist[18][17] = 1;
cachelist[18][18] = 1;
cachelist[18][19] = 1;
cachelist[19][15] = 1;
cachelist[19][16] = 1;
cachelist[19][17] = 1;
cachelist[19][18] = 1;
cachelist[19][19] = 1;
/* <Ab|Cd> */
cachelist[28][28] = 1;
cachelist[29][29] = 1;
cachelist[28][29] = 1;
cachelist[29][28] = 1;
}
void cache_abcd_rhf(int **cachelist)
{
/* <ab|cd> */
cachelist[5][5] = 1;
cachelist[5][6] = 1;
cachelist[5][7] = 1;
cachelist[5][8] = 1;
cachelist[5][9] = 1;
cachelist[6][5] = 1;
cachelist[6][6] = 1;
cachelist[6][7] = 1;
cachelist[6][8] = 1;
cachelist[6][9] = 1;
cachelist[7][5] = 1;
cachelist[7][6] = 1;
cachelist[7][7] = 1;
cachelist[7][8] = 1;
cachelist[7][9] = 1;
cachelist[8][5] = 1;
cachelist[8][6] = 1;
cachelist[8][7] = 1;
cachelist[8][8] = 1;
cachelist[8][9] = 1;
cachelist[9][5] = 1;
cachelist[9][6] = 1;
cachelist[9][7] = 1;
cachelist[9][8] = 1;
cachelist[9][9] = 1;
}
void cache_iabc_rhf(int **cachelist)
{
/* <ia|bc> */
cachelist[10][5] = 1;
cachelist[10][6] = 1;
cachelist[10][7] = 1;
cachelist[10][8] = 1;
cachelist[10][9] = 1;
cachelist[11][5] = 1;
cachelist[11][6] = 1;
cachelist[11][7] = 1;
cachelist[11][8] = 1;
cachelist[11][9] = 1;
/* <ab|ci> */
cachelist[5][10] = 1;
cachelist[5][11] = 1;
cachelist[6][10] = 1;
cachelist[6][11] = 1;
cachelist[7][10] = 1;
cachelist[7][11] = 1;
cachelist[8][10] = 1;
cachelist[8][11] = 1;
cachelist[9][10] = 1;
cachelist[9][11] = 1;
}
void cache_iabc_uhf(int **cachelist)
{
/* <IA|BC> */
cachelist[20][5] = 1;
cachelist[20][6] = 1;
cachelist[20][7] = 1;
cachelist[20][8] = 1;
cachelist[20][9] = 1;
cachelist[21][5] = 1;
cachelist[21][6] = 1;
cachelist[21][7] = 1;
cachelist[21][8] = 1;
cachelist[21][9] = 1;
/* <AB|CI> */
cachelist[5][20] = 1;
cachelist[5][21] = 1;
cachelist[6][20] = 1;
cachelist[6][21] = 1;
cachelist[7][20] = 1;
cachelist[7][21] = 1;
cachelist[8][20] = 1;
cachelist[8][21] = 1;
cachelist[9][20] = 1;
cachelist[9][21] = 1;
/* <ia|bc> */
cachelist[30][15] = 1;
cachelist[30][16] = 1;
cachelist[30][17] = 1;
cachelist[30][18] = 1;
cachelist[30][19] = 1;
cachelist[31][15] = 1;
cachelist[31][16] = 1;
cachelist[31][17] = 1;
cachelist[31][18] = 1;
cachelist[31][19] = 1;
/* <ab|ci> */
cachelist[15][30] = 1;
cachelist[15][31] = 1;
cachelist[16][30] = 1;
cachelist[16][31] = 1;
cachelist[17][30] = 1;
cachelist[17][31] = 1;
cachelist[18][30] = 1;
cachelist[18][31] = 1;
cachelist[19][30] = 1;
cachelist[19][31] = 1;
/* <Ia|Bc> */
cachelist[24][28] = 1;
cachelist[24][29] = 1;
cachelist[25][28] = 1;
cachelist[25][29] = 1;
/* <Ab|Ci> */
cachelist[28][24] = 1;
cachelist[28][25] = 1;
cachelist[29][24] = 1;
cachelist[29][25] = 1;
}
void cache_ijab_rhf(int **cachelist)
{
/* <ij|ab> */
cachelist[0][5] = 1;
cachelist[0][6] = 1;
cachelist[0][7] = 1;
cachelist[0][8] = 1;
cachelist[0][9] = 1;
cachelist[1][5] = 1;
cachelist[1][6] = 1;
cachelist[1][7] = 1;
cachelist[1][8] = 1;
cachelist[1][9] = 1;
cachelist[2][5] = 1;
cachelist[2][6] = 1;
cachelist[2][7] = 1;
cachelist[2][8] = 1;
cachelist[2][9] = 1;
cachelist[3][5] = 1;
cachelist[3][6] = 1;
cachelist[3][7] = 1;
cachelist[3][8] = 1;
cachelist[3][9] = 1;
cachelist[4][5] = 1;
cachelist[4][6] = 1;
cachelist[4][7] = 1;
cachelist[4][8] = 1;
cachelist[4][9] = 1;
/* <ab|ij> */
cachelist[5][0] = 1;
cachelist[5][1] = 1;
cachelist[5][2] = 1;
cachelist[5][3] = 1;
cachelist[5][4] = 1;
cachelist[6][0] = 1;
cachelist[6][1] = 1;
cachelist[6][2] = 1;
cachelist[6][3] = 1;
cachelist[6][4] = 1;
cachelist[7][0] = 1;
cachelist[7][1] = 1;
cachelist[7][2] = 1;
cachelist[7][3] = 1;
cachelist[7][4] = 1;
cachelist[8][0] = 1;
cachelist[8][1] = 1;
cachelist[8][2] = 1;
cachelist[8][3] = 1;
cachelist[8][4] = 1;
cachelist[9][0] = 1;
cachelist[9][1] = 1;
cachelist[9][2] = 1;
cachelist[9][3] = 1;
cachelist[9][4] = 1;
}
void cache_ijab_uhf(int **cachelist)
{
/* <IJ|AB> */
cachelist[0][5] = 1;
cachelist[0][6] = 1;
cachelist[0][7] = 1;
cachelist[0][8] = 1;
cachelist[0][9] = 1;
cachelist[1][5] = 1;
cachelist[1][6] = 1;
cachelist[1][7] = 1;
cachelist[1][8] = 1;
cachelist[1][9] = 1;
cachelist[2][5] = 1;
cachelist[2][6] = 1;
cachelist[2][7] = 1;
cachelist[2][8] = 1;
cachelist[2][9] = 1;
cachelist[3][5] = 1;
cachelist[3][6] = 1;
cachelist[3][7] = 1;
cachelist[3][8] = 1;
cachelist[3][9] = 1;
cachelist[4][5] = 1;
cachelist[4][6] = 1;
cachelist[4][7] = 1;
cachelist[4][8] = 1;
cachelist[4][9] = 1;
/* <AB|IJ> */
cachelist[5][0] = 1;
cachelist[5][1] = 1;
cachelist[5][2] = 1;
cachelist[5][3] = 1;
cachelist[5][4] = 1;
cachelist[6][0] = 1;
cachelist[6][1] = 1;
cachelist[6][2] = 1;
cachelist[6][3] = 1;
cachelist[6][4] = 1;
cachelist[7][0] = 1;
cachelist[7][1] = 1;
cachelist[7][2] = 1;
cachelist[7][3] = 1;
cachelist[7][4] = 1;
cachelist[8][0] = 1;
cachelist[8][1] = 1;
cachelist[8][2] = 1;
cachelist[8][3] = 1;
cachelist[8][4] = 1;
cachelist[9][0] = 1;
cachelist[9][1] = 1;
cachelist[9][2] = 1;
cachelist[9][3] = 1;
cachelist[9][4] = 1;
/* <ij|ab> */
cachelist[10][15] = 1;
cachelist[10][16] = 1;
cachelist[10][17] = 1;
cachelist[10][18] = 1;
cachelist[10][19] = 1;
cachelist[11][15] = 1;
cachelist[11][16] = 1;
cachelist[11][17] = 1;
cachelist[11][18] = 1;
cachelist[11][19] = 1;
cachelist[12][15] = 1;
cachelist[12][16] = 1;
cachelist[12][17] = 1;
cachelist[12][18] = 1;
cachelist[12][19] = 1;
cachelist[13][15] = 1;
cachelist[13][16] = 1;
cachelist[13][17] = 1;
cachelist[13][18] = 1;
cachelist[13][19] = 1;
cachelist[14][15] = 1;
cachelist[14][16] = 1;
cachelist[14][17] = 1;
cachelist[14][18] = 1;
cachelist[14][19] = 1;
/* <ab|ij> */
cachelist[15][10] = 1;
cachelist[15][11] = 1;
cachelist[15][12] = 1;
cachelist[15][13] = 1;
cachelist[15][14] = 1;
cachelist[16][10] = 1;
cachelist[16][11] = 1;
cachelist[16][12] = 1;
cachelist[16][13] = 1;
cachelist[16][14] = 1;
cachelist[17][10] = 1;
cachelist[17][11] = 1;
cachelist[17][12] = 1;
cachelist[17][13] = 1;
cachelist[17][14] = 1;
cachelist[18][10] = 1;
cachelist[18][11] = 1;
cachelist[18][12] = 1;
cachelist[18][13] = 1;
cachelist[18][14] = 1;
cachelist[19][10] = 1;
cachelist[19][11] = 1;
cachelist[19][12] = 1;
cachelist[19][13] = 1;
cachelist[19][14] = 1;
/* <Ij|Ab> */
cachelist[22][28] = 1;
cachelist[23][28] = 1;
cachelist[22][29] = 1;
cachelist[23][29] = 1;
/* <Ab|Ij> */
cachelist[28][22] = 1;
cachelist[28][23] = 1;
cachelist[29][22] = 1;
cachelist[29][23] = 1;
}
void cache_iajb_rhf(int **cachelist)
{
/* <ia|jb> */
cachelist[10][10] = 1;
cachelist[10][11] = 1;
cachelist[11][10] = 1;
cachelist[11][11] = 1;
}
void cache_iajb_uhf(int **cachelist)
{
/* <IA|JB> */
cachelist[20][20] = 1;
cachelist[20][21] = 1;
cachelist[21][20] = 1;
cachelist[21][21] = 1;
/* <ia|jb> */
cachelist[30][30] = 1;
cachelist[30][31] = 1;
cachelist[31][30] = 1;
cachelist[31][31] = 1;
/* <Ia|Jb> */
cachelist[24][24] = 1;
cachelist[24][25] = 1;
cachelist[25][24] = 1;
cachelist[25][25] = 1;
}
void cache_ijka_rhf(int **cachelist)
{
/* <ij|ka> */
cachelist[0][10] = 1;
cachelist[0][11] = 1;
cachelist[1][10] = 1;
cachelist[1][11] = 1;
cachelist[2][10] = 1;
cachelist[2][11] = 1;
cachelist[3][10] = 1;
cachelist[3][11] = 1;
cachelist[4][10] = 1;
cachelist[4][11] = 1;
/* <ia|jk> */
cachelist[10][0] = 1;
cachelist[10][1] = 1;
cachelist[10][2] = 1;
cachelist[10][3] = 1;
cachelist[10][4] = 1;
cachelist[11][0] = 1;
cachelist[11][1] = 1;
cachelist[11][2] = 1;
cachelist[11][3] = 1;
cachelist[11][4] = 1;
}
void cache_ijka_uhf(int **cachelist)
{
/* <IJ|KA> */
cachelist[0][20] = 1;
cachelist[0][21] = 1;
cachelist[1][20] = 1;
cachelist[1][21] = 1;
cachelist[2][20] = 1;
cachelist[2][21] = 1;
cachelist[3][20] = 1;
cachelist[3][21] = 1;
cachelist[4][20] = 1;
cachelist[4][21] = 1;
/* <IA|JK> */
cachelist[20][0] = 1;
cachelist[20][1] = 1;
cachelist[20][2] = 1;
cachelist[20][3] = 1;
cachelist[20][4] = 1;
cachelist[21][0] = 1;
cachelist[21][1] = 1;
cachelist[21][2] = 1;
cachelist[21][3] = 1;
cachelist[21][4] = 1;
/* <ij|ka> */
cachelist[10][30] = 1;
cachelist[10][31] = 1;
cachelist[11][30] = 1;
cachelist[11][31] = 1;
cachelist[12][30] = 1;
cachelist[12][31] = 1;
cachelist[13][30] = 1;
cachelist[13][31] = 1;
cachelist[14][30] = 1;
cachelist[14][31] = 1;
/* <ia|jk> */
cachelist[30][10] = 1;
cachelist[30][11] = 1;
cachelist[30][12] = 1;
cachelist[30][13] = 1;
cachelist[30][14] = 1;
cachelist[31][10] = 1;
cachelist[31][11] = 1;
cachelist[31][12] = 1;
cachelist[31][13] = 1;
cachelist[31][14] = 1;
/* <Ij|Ka> */
cachelist[22][24] = 1;
cachelist[22][25] = 1;
cachelist[23][24] = 1;
cachelist[23][25] = 1;
/* <Ka|Ij> */
cachelist[24][22] = 1;
cachelist[25][22] = 1;
cachelist[24][23] = 1;
cachelist[25][23] = 1;
}
void cache_ijkl_rhf(int **cachelist)
{
/* <ij|kl> */
cachelist[0][0] = 1;
cachelist[0][1] = 1;
cachelist[0][2] = 1;
cachelist[0][3] = 1;
cachelist[0][4] = 1;
cachelist[1][0] = 1;
cachelist[1][1] = 1;
cachelist[1][2] = 1;
cachelist[1][3] = 1;
cachelist[1][4] = 1;
cachelist[2][0] = 1;
cachelist[2][1] = 1;
cachelist[2][2] = 1;
cachelist[2][3] = 1;
cachelist[2][4] = 1;
cachelist[3][0] = 1;
cachelist[3][1] = 1;
cachelist[3][2] = 1;
cachelist[3][3] = 1;
cachelist[3][4] = 1;
cachelist[4][0] = 1;
cachelist[4][1] = 1;
cachelist[4][2] = 1;
cachelist[4][3] = 1;
cachelist[4][4] = 1;
}
void cache_ijkl_uhf(int **cachelist)
{
/* <IJ|KL> */
cachelist[0][0] = 1;
cachelist[0][1] = 1;
cachelist[0][2] = 1;
cachelist[0][3] = 1;
cachelist[0][4] = 1;
cachelist[1][0] = 1;
cachelist[1][1] = 1;
cachelist[1][2] = 1;
cachelist[1][3] = 1;
cachelist[1][4] = 1;
cachelist[2][0] = 1;
cachelist[2][1] = 1;
cachelist[2][2] = 1;
cachelist[2][3] = 1;
cachelist[2][4] = 1;
cachelist[3][0] = 1;
cachelist[3][1] = 1;
cachelist[3][2] = 1;
cachelist[3][3] = 1;
cachelist[3][4] = 1;
cachelist[4][0] = 1;
cachelist[4][1] = 1;
cachelist[4][2] = 1;
cachelist[4][3] = 1;
cachelist[4][4] = 1;
/* <ij|kl> */
cachelist[10][10] = 1;
cachelist[10][11] = 1;
cachelist[10][12] = 1;
cachelist[10][13] = 1;
cachelist[10][14] = 1;
cachelist[11][10] = 1;
cachelist[11][11] = 1;
cachelist[11][12] = 1;
cachelist[11][13] = 1;
cachelist[11][14] = 1;
cachelist[12][10] = 1;
cachelist[12][11] = 1;
cachelist[12][12] = 1;
cachelist[12][13] = 1;
cachelist[12][14] = 1;
cachelist[13][10] = 1;
cachelist[13][11] = 1;
cachelist[13][12] = 1;
cachelist[13][13] = 1;
cachelist[13][14] = 1;
cachelist[14][10] = 1;
cachelist[14][11] = 1;
cachelist[14][12] = 1;
cachelist[14][13] = 1;
cachelist[14][14] = 1;
/* <Ij|Kl> */
cachelist[22][22] = 1;
cachelist[22][23] = 1;
cachelist[23][22] = 1;
cachelist[23][23] = 1;
}
void cachedone_uhf(int **cachelist)
{
free_int_matrix(cachelist);
}
void cachedone_rhf(int **cachelist)
{
free_int_matrix(cachelist);
}
}} // namespace psi::CCTRIPLES
| rmcgibbo/psi4public | psi4/src/psi4/cctriples/cache.cc | C++ | lgpl-3.0 | 17,267 |
<?php
class InserFeaturesInProjectMetadata extends AbstractMatecatMigration
{
/**
* Insert features in project metadata, reading from owner_features.
*
* Only operate on projects that were created after the addition of the owner feature to the user,
* so that we avoid strange due to inconsistencies with old projects receiving features that were
* not enabled when the project was created.
*
*/
public $sql_up = <<<EOF
INSERT INTO project_metadata ( value, `key`, id_project )
SELECT GROUP_CONCAT( f.feature_code ), 'features', projects.id
FROM projects
JOIN users ON users.email = projects.id_customer
JOIN owner_features f ON f.uid = users.uid
WHERE
f.create_date > projects.create_date
GROUP BY projects.id ;
EOF;
public $sql_down = <<<EOF
DELETE FROM project_metadata WHERE `key` = 'features' ;
EOF;
}
| Ostico/MateCat | migrations/20170113150724_inser_features_in_project_metadata.php | PHP | lgpl-3.0 | 1,022 |
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from androguard.decompiler.dad.basic_blocks import (Condition,
ShortCircuitBlock,
LoopBlock)
from androguard.decompiler.dad.graph import Graph
from androguard.decompiler.dad.node import Interval
from androguard.decompiler.dad.util import common_dom
logger = logging.getLogger('dad.control_flow')
def intervals(graph):
'''
Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval)
'''
interval_graph = Graph() # graph of intervals
heads = set([graph.entry]) # set of header nodes
interv_heads = {} # interv_heads[i] = interval of header i
processed = dict([(i, False) for i in graph])
edges = {}
while heads:
head = heads.pop()
if not processed[head]:
processed[head] = True
interv_heads[head] = Interval(head)
# Check if if there is a node which has all its predecessor in the
# current interval. If there is, add that node to the interval and
# repeat until all the possible nodes have been added.
change = True
while change:
change = False
for node in graph.rpo[1:]:
if all(p in interv_heads[head] for p in graph.preds(node)):
change |= interv_heads[head].add_node(node)
# At this stage, a node which is not in the interval, but has one
# of its predecessor in it, is the header of another interval. So
# we add all such nodes to the header list.
for node in graph:
if node not in interv_heads[head] and node not in heads:
if any(p in interv_heads[head] for p in graph.preds(node)):
edges.setdefault(interv_heads[head], []).append(node)
heads.add(node)
interval_graph.add_node(interv_heads[head])
interv_heads[head].compute_end(graph)
# Edges is a mapping of 'Interval -> [header nodes of interval successors]'
for interval, heads in edges.items():
for head in heads:
interval_graph.add_edge(interval, interv_heads[head])
interval_graph.entry = graph.entry.interval
if graph.exit:
interval_graph.exit = graph.exit.interval
return interval_graph, interv_heads
def derived_sequence(graph):
'''
Compute the derived sequence of the graph G
The intervals of G are collapsed into nodes, intervals of these nodes are
built, and the process is repeated iteratively until we obtain a single
node (if the graph is not irreducible)
'''
deriv_seq = [graph]
deriv_interv = []
single_node = False
while not single_node:
interv_graph, interv_heads = intervals(graph)
deriv_interv.append(interv_heads)
single_node = len(interv_graph) == 1
if not single_node:
deriv_seq.append(interv_graph)
graph = interv_graph
if 0:
graph.draw(graph.entry.name, 'tmp/dad/intervals/')
graph.compute_rpo()
return deriv_seq, deriv_interv
def mark_loop_rec(graph, node, s_num, e_num, interval, nodes_in_loop):
if node in nodes_in_loop:
return
nodes_in_loop.append(node)
for pred in graph.preds(node):
if s_num < pred.num <= e_num and pred in interval:
mark_loop_rec(graph, pred, s_num, e_num, interval, nodes_in_loop)
def mark_loop(graph, start, end, interval):
logger.debug('MARKLOOP : %s END : %s', start, end)
head = start.get_head()
latch = end.get_end()
nodes_in_loop = [head]
mark_loop_rec(graph, latch, head.num, latch.num, interval, nodes_in_loop)
head.startloop = True
head.latch = latch
return nodes_in_loop
def loop_type(start, end, nodes_in_loop):
if end.type.is_cond:
if start.type.is_cond:
if start.true in nodes_in_loop and start.false in nodes_in_loop:
start.looptype.is_posttest = True
else:
start.looptype.is_pretest = True
else:
start.looptype.is_posttest = True
else:
if start.type.is_cond:
if start.true in nodes_in_loop and start.false in nodes_in_loop:
start.looptype.is_endless = True
else:
start.looptype.is_pretest = True
else:
start.looptype.is_endless = True
def loop_follow(start, end, nodes_in_loop):
follow = None
if start.looptype.is_pretest:
if start.true in nodes_in_loop:
follow = start.false
else:
follow = start.true
elif start.looptype.is_posttest:
if end.true in nodes_in_loop:
follow = end.false
else:
follow = end.true
else:
num_next = float('inf')
for node in nodes_in_loop:
if node.type.is_cond:
if (node.true.num < num_next
and node.true not in nodes_in_loop):
follow = node.true
num_next = follow.num
elif (node.false.num < num_next
and node.false not in nodes_in_loop):
follow = node.false
num_next = follow.num
start.follow['loop'] = follow
for node in nodes_in_loop:
node.follow['loop'] = follow
logger.debug('Start of loop %s', start)
logger.debug('Follow of loop: %s', start.follow['loop'])
def loop_struct(graphs_list, intervals_list):
first_graph = graphs_list[0]
for i, graph in enumerate(graphs_list):
interval = intervals_list[i]
for head in sorted(interval.keys(), key=lambda x: x.num):
loop_nodes = set()
for node in graph.preds(head):
if node.interval is head.interval:
lnodes = mark_loop(first_graph, head, node, head.interval)
loop_nodes.update(lnodes)
head.get_head().loop_nodes = loop_nodes
def if_struct(graph, idoms):
unresolved = set()
for node in graph.post_order():
if node.type.is_cond:
ldominates = []
for n, idom in idoms.iteritems():
if node is idom and len(graph.preds(n)) > 1:
ldominates.append(n)
if len(ldominates) > 0:
n = max(ldominates, key=lambda x: x.num)
node.follow['if'] = n
for x in unresolved.copy():
if node.num < x.num < n.num:
x.follow['if'] = n
unresolved.remove(x)
else:
unresolved.add(node)
return unresolved
def switch_struct(graph, idoms):
unresolved = set()
for node in graph.post_order():
if node.type.is_switch:
m = node
for suc in graph.sucs(node):
if idoms[suc] is not node:
m = common_dom(idoms, node, suc)
ldominates = []
for n, dom in idoms.iteritems():
if m is dom and len(graph.preds(n)) > 1:
ldominates.append(n)
if len(ldominates) > 0:
n = max(ldominates, key=lambda x: x.num)
node.follow['switch'] = n
for x in unresolved:
x.follow['switch'] = n
unresolved = set()
else:
unresolved.add(node)
node.order_cases()
def short_circuit_struct(graph, idom, node_map):
def MergeNodes(node1, node2, is_and, is_not):
lpreds = set()
ldests = set()
for node in (node1, node2):
lpreds.update(graph.preds(node))
ldests.update(graph.sucs(node))
graph.remove_node(node)
done.add(node)
lpreds.difference_update((node1, node2))
ldests.difference_update((node1, node2))
entry = graph.entry in (node1, node2)
new_name = '%s+%s' % (node1.name, node2.name)
condition = Condition(node1, node2, is_and, is_not)
new_node = ShortCircuitBlock(new_name, condition)
for old_n, new_n in node_map.iteritems():
if new_n in (node1, node2):
node_map[old_n] = new_node
node_map[node1] = new_node
node_map[node2] = new_node
idom[new_node] = idom[node1]
idom.pop(node1)
idom.pop(node2)
new_node.copy_from(node1)
graph.add_node(new_node)
for pred in lpreds:
pred.update_attribute_with(node_map)
graph.add_edge(node_map.get(pred, pred), new_node)
for dest in ldests:
graph.add_edge(new_node, node_map.get(dest, dest))
if entry:
graph.entry = new_node
return new_node
change = True
while change:
change = False
done = set()
for node in graph.post_order():
if node.type.is_cond and node not in done:
then = node.true
els = node.false
if node in (then, els):
continue
if then.type.is_cond and len(graph.preds(then)) == 1:
if then.false is els: # node && t
change = True
merged_node = MergeNodes(node, then, True, False)
merged_node.true = then.true
merged_node.false = els
elif then.true is els: # !node || t
change = True
merged_node = MergeNodes(node, then, False, True)
merged_node.true = els
merged_node.false = then.false
elif els.type.is_cond and len(graph.preds(els)) == 1:
if els.false is then: # !node && e
change = True
merged_node = MergeNodes(node, els, True, True)
merged_node.true = els.true
merged_node.false = then
elif els.true is then: # node || e
change = True
merged_node = MergeNodes(node, els, False, False)
merged_node.true = then
merged_node.false = els.false
done.add(node)
if change:
graph.reset_rpo()
def while_block_struct(graph, node_map):
change = False
for node in graph.rpo[:]:
if node.startloop:
change = True
new_node = LoopBlock(node.name, node)
node_map[node] = new_node
new_node.copy_from(node)
entry = node is graph.entry
lpreds = graph.preds(node)
lsuccs = graph.sucs(node)
for pred in lpreds:
graph.add_edge(node_map.get(pred, pred), new_node)
for suc in lsuccs:
graph.add_edge(new_node, node_map.get(suc, suc))
if entry:
graph.entry = new_node
if node.type.is_cond:
new_node.true = node.true
new_node.false = node.false
graph.add_node(new_node)
graph.remove_node(node)
if change:
graph.reset_rpo()
def update_dom(idoms, node_map):
for n, dom in idoms.iteritems():
idoms[n] = node_map.get(dom, dom)
def identify_structures(graph, idoms):
Gi, Li = derived_sequence(graph)
switch_struct(graph, idoms)
loop_struct(Gi, Li)
node_map = {}
short_circuit_struct(graph, idoms, node_map)
update_dom(idoms, node_map)
if_unresolved = if_struct(graph, idoms)
while_block_struct(graph, node_map)
update_dom(idoms, node_map)
loop_starts = []
for node in graph.rpo:
node.update_attribute_with(node_map)
if node.startloop:
loop_starts.append(node)
for node in loop_starts:
loop_type(node, node.latch, node.loop_nodes)
loop_follow(node, node.latch, node.loop_nodes)
for node in if_unresolved:
follows = [n for n in (node.follow['loop'],
node.follow['switch']) if n]
if len(follows) >= 1:
follow = min(follows, key=lambda x: x.num)
node.follow['if'] = follow
| flamableconcrete/androguard | androguard/decompiler/dad/control_flow.py | Python | lgpl-3.0 | 13,166 |
/*******************************************************************************************************
This file was created/modified by Hervé PHILIPPE alchiweb[at]gmail.com
********************************************************************************************************
Copyright (C) 2013 Hervé PHILIPPE, Web: http://xmadevlab.net
Project based on the files auto generated with the tool "WebIDLParser"
Copyright (C) 2013 Sebastian Loncar, Web: http://loncar.de
Copyright (C) 2009 Apple Inc. All Rights Reserved.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************************************/
using System;
using System.ComponentModel;
namespace SharpKit.DotNet.Html
{
using SharpKit.JavaScript;
using SharpKit.DotNet.JavaScript;
using SharpKit.DotNet.Html.fileapi;
using SharpKit.DotNet.Html.html.shadow;
using SharpKit.DotNet.Html.html.track;
using SharpKit.DotNet.Html.inspector;
using SharpKit.DotNet.Html.loader.appcache;
using SharpKit.DotNet.Html.battery;
using SharpKit.DotNet.Html.gamepad;
using SharpKit.DotNet.Html.geolocation;
using SharpKit.DotNet.Html.indexeddb;
using SharpKit.DotNet.Html.intents;
using SharpKit.DotNet.Html.mediasource;
using SharpKit.DotNet.Html.mediastream;
using SharpKit.DotNet.Html.networkinfo;
using SharpKit.DotNet.Html.notifications;
using SharpKit.DotNet.Html.proximity;
using SharpKit.DotNet.Html.quota;
using SharpKit.DotNet.Html.speech;
using SharpKit.DotNet.Html.webaudio;
using SharpKit.DotNet.Html.webdatabase;
using SharpKit.DotNet.Html.plugins;
using SharpKit.DotNet.Html.storage;
using SharpKit.DotNet.Html.svg;
using SharpKit.DotNet.Html.workers;
[JsType(JsMode.Json, Export = false)]
[JsEnum(ValuesAsNames = true)]
public enum DeviceOrientationEventType
{
[JsField(Name = "deviceorientation")]
DeviceOrientation,
[JsField(Name = "orientationchange")]
OrientationChange
}
} | hultqvist/SharpKit-SDK | Defs/Xdk.Html/dom/DeviceOrientationEventType.cs | C# | lgpl-3.0 | 3,094 |
/**
* Copyright © 2002 Instituto Superior Técnico
*
* This file is part of FenixEdu Academic.
*
* FenixEdu Academic is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* FenixEdu Academic is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FenixEdu Academic. If not, see <http://www.gnu.org/licenses/>.
*/
package org.fenixedu.academic.domain.accessControl;
import java.util.stream.Stream;
import org.fenixedu.academic.domain.degreeStructure.ProgramConclusion;
import org.fenixedu.academic.domain.student.Student;
import org.fenixedu.academic.domain.student.registrationStates.RegistrationStateType;
import org.fenixedu.academic.util.Bundle;
import org.fenixedu.bennu.core.annotation.GroupOperator;
import org.fenixedu.bennu.core.domain.Bennu;
import org.fenixedu.bennu.core.domain.User;
import org.fenixedu.bennu.core.groups.GroupStrategy;
import org.fenixedu.bennu.core.i18n.BundleUtil;
import org.joda.time.DateTime;
@GroupOperator("allAlumni")
public class AllAlumniGroup extends GroupStrategy {
private static final long serialVersionUID = -2926898164196025354L;
@Override
public String getPresentationName() {
return BundleUtil.getString(Bundle.GROUP, "label.name.AllAlumniGroup");
}
/**
* Returns true if any of the student registrations has a curriculum group
* with a conclusion process associated of a program conclusion that provides alumni
*
* @param student
* @return
*/
private boolean isAlumni(Student student) {
return student
.getRegistrationsSet()
.stream()
.anyMatch(
registration -> ProgramConclusion
.conclusionsFor(registration)
.filter(ProgramConclusion::isAlumniProvider)
.anyMatch(
conclusion -> conclusion.groupFor(registration).isPresent()
&& conclusion.groupFor(registration).get().isConclusionProcessed()));
}
@Override
public Stream<User> getMembers() {
return Bennu
.getInstance()
.getStudentsSet()
.stream()
.filter(student -> student.getAlumni() != null
|| student.hasAnyRegistrationInState(RegistrationStateType.CONCLUDED)
|| student.hasAnyRegistrationInState(RegistrationStateType.STUDYPLANCONCLUDED) || isAlumni(student))
.map(student -> student.getPerson().getUser());
}
@Override
public Stream<User> getMembers(DateTime when) {
return getMembers();
}
@Override
public boolean isMember(User user) {
return user != null
&& user.getPerson() != null
&& user.getPerson().getStudent() != null
&& (user.getPerson().getStudent().getAlumni() != null
|| user.getPerson().getStudent().hasAnyRegistrationInState(RegistrationStateType.CONCLUDED)
|| user.getPerson().getStudent().hasAnyRegistrationInState(RegistrationStateType.STUDYPLANCONCLUDED) || isAlumni(user
.getPerson().getStudent()));
}
@Override
public boolean isMember(User user, DateTime when) {
return isMember(user);
}
}
| gil-l/fenix | src/main/java/org/fenixedu/academic/domain/accessControl/AllAlumniGroup.java | Java | lgpl-3.0 | 3,863 |
/*
* Copyright 2010, 2011, 2012, 2013 mapsforge.org
* Copyright 2016 Andrey Novikov
* Copyright 2017 Gustl22
*
* This program is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.mapsforge.map.writer.model;
/**
* Represents an OSM entity which is defined by a tag/value pair. Each OSM entity is attributed with the zoom level on
* which it should appear first.
*/
public class OSMTag {
private static final String KEY_VALUE_SEPARATOR = "=";
/**
* Convenience method that constructs a new OSMTag with a new id from another OSMTag.
*
* @param otherTag the OSMTag to copy
* @param newID the new id
* @return a newly constructed OSMTag with the attributes of otherTag
*/
public static OSMTag fromOSMTag(OSMTag otherTag, short newID) {
return new OSMTag(newID, otherTag.getKey(), otherTag.getValue(), otherTag.getZoomAppear(),
otherTag.isRenderable(), otherTag.isForcePolygonLine(), otherTag.isLabelPosition());
}
/**
* Convenience method for generating a string representation of a key/value pair.
*
* @param key the key of the tag
* @param value the value of the tag
* @return a string representation of the key/Value pair
*/
public static String tagKey(String key, String value) {
return key + KEY_VALUE_SEPARATOR + value;
}
private final boolean forcePolygonLine;
private final short id;
private final String key;
private final boolean labelPosition;
// TODO is the renderable attribute still needed?
private final boolean renderable;
private final String value;
private final byte zoomAppear;
/**
* @param id the internal id of the tag
* @param key the key of the tag
* @param value the value of the tag
* @param zoomAppear the minimum zoom level the tag appears first
* @param renderable flag if the tag represents a renderable entity
* @param forcePolygonLine flag if polygon line instead of area is forced with closed polygons
* @param labelPosition flag if label position should be computed for polygons with such tag
*/
public OSMTag(short id, String key, String value, byte zoomAppear, boolean renderable, boolean forcePolygonLine, boolean labelPosition) {
super();
this.id = id;
this.key = key;
this.value = value;
this.zoomAppear = zoomAppear;
this.renderable = renderable;
this.forcePolygonLine = forcePolygonLine;
this.labelPosition = labelPosition;
}
@Override
public final boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (!(obj instanceof OSMTag)) {
return false;
}
OSMTag other = (OSMTag) obj;
return this.id == other.id;
}
/**
* @return the id
*/
public final short getId() {
return this.id;
}
/**
* @return the key
*/
public final String getKey() {
return this.key;
}
/**
* @return the value
*/
public final String getValue() {
return this.value;
}
/**
* @return the zoomAppear
*/
public final byte getZoomAppear() {
return this.zoomAppear;
}
@Override
public final int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + this.id;
return result;
}
/**
* @return whether the tag represents a building
*/
public final boolean isBuilding() {
return this.key.equals("building");
}
/**
* @return whether the tag represents a building part
*/
public final boolean isBuildingPart() {
return this.key.equals("building:part");
}
/**
* @return whether the tag represents a coastline
*/
public final boolean isCoastline() {
return this.key.equals("natural") && this.value.equals("coastline");
}
/**
* @return the forcePolygonLine
*/
public final boolean isForcePolygonLine() {
return this.forcePolygonLine;
}
/**
* @return the labelPosition
*/
public boolean isLabelPosition() {
return this.labelPosition;
}
/**
* @return the renderable
*/
public final boolean isRenderable() {
return this.renderable;
}
/**
* @return the string representation of the OSMTag
*/
public final String tagKey() {
return this.key + KEY_VALUE_SEPARATOR + this.value;
}
@Override
public final String toString() {
return "OSMTag [id=" + this.id + ", key=" + this.key + ", value=" + this.value + ", zoomAppear="
+ this.zoomAppear + ", renderable=" + this.renderable + ", labelPosition=" + this.labelPosition + "]";
}
}
| usrusr/mapsforge | mapsforge-map-writer/src/main/java/org/mapsforge/map/writer/model/OSMTag.java | Java | lgpl-3.0 | 5,490 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.workflow.jbpm;
import java.util.Collection;
import java.util.Map;
import org.jbpm.graph.def.ActionHandler;
import org.jbpm.graph.exe.ExecutionContext;
import org.jbpm.graph.exe.Token;
import org.jbpm.taskmgmt.exe.TaskInstance;
import org.jbpm.taskmgmt.exe.TaskMgmtInstance;
/**
* Action Handler for ending child tokens / tasks
*
* @author davidc
*/
public class JoinEndForkedTokens implements ActionHandler
{
private static final long serialVersionUID = 8679390550752208189L;
/**
* Constructor
*/
public JoinEndForkedTokens()
{
super();
}
/**
* {@inheritDoc}
*/
public void execute(ExecutionContext executionContext)
{
Token token = executionContext.getToken();
Map<?, ?> childTokens = token.getActiveChildren();
for (Object childToken : childTokens.values())
{
cancelToken(executionContext, (Token)childToken);
}
}
/**
* Cancel token
*
* @param executionContext ExecutionContext
* @param token Token
*/
protected void cancelToken(ExecutionContext executionContext, Token token)
{
// visit child tokens
Map<?, ?> childTokens = token.getActiveChildren();
for (Object childToken : childTokens.values())
{
cancelToken(executionContext, (Token)childToken);
}
// end token
if (!token.hasEnded())
{
token.end(false);
}
// end any associated tasks
cancelTokenTasks(executionContext, token);
}
/**
* Cancel tasks associated with a token
*
* @param executionContext ExecutionContext
* @param token Token
*/
protected void cancelTokenTasks(ExecutionContext executionContext, Token token)
{
TaskMgmtInstance tms = executionContext.getTaskMgmtInstance();
Collection<TaskInstance> tasks = tms.getUnfinishedTasks(token);
for (Object task : tasks)
{
TaskInstance taskInstance = (TaskInstance)task;
if (taskInstance.isBlocking())
{
taskInstance.setBlocking(false);
}
if (taskInstance.isSignalling())
{
taskInstance.setSignalling(false);
}
if (!taskInstance.hasEnded())
{
taskInstance.cancel();
}
}
}
}
| Alfresco/community-edition | projects/repository/source/java/org/alfresco/repo/workflow/jbpm/JoinEndForkedTokens.java | Java | lgpl-3.0 | 3,578 |
package org.molgenis.data.validation.meta;
import org.molgenis.data.AbstractRepositoryDecorator;
import org.molgenis.data.Repository;
import org.molgenis.data.meta.model.Attribute;
import org.molgenis.data.validation.meta.AttributeValidator.ValidationMode;
import java.util.stream.Stream;
import static java.util.Objects.requireNonNull;
public class AttributeRepositoryValidationDecorator extends AbstractRepositoryDecorator<Attribute>
{
private final Repository<Attribute> decoratedRepo;
private final AttributeValidator attributeValidator;
public AttributeRepositoryValidationDecorator(Repository<Attribute> decoratedRepo,
AttributeValidator attributeValidator)
{
this.decoratedRepo = requireNonNull(decoratedRepo);
this.attributeValidator = requireNonNull(attributeValidator);
}
@Override
protected Repository<Attribute> delegate()
{
return decoratedRepo;
}
@Override
public void update(Attribute attr)
{
attributeValidator.validate(attr, ValidationMode.UPDATE);
decoratedRepo.update(attr);
}
@Override
public void update(Stream<Attribute> attrs)
{
decoratedRepo.update(attrs.filter(attr ->
{
attributeValidator.validate(attr, ValidationMode.UPDATE);
return true;
}));
}
@Override
public void add(Attribute attr)
{
attributeValidator.validate(attr, ValidationMode.ADD);
decoratedRepo.add(attr);
}
@Override
public Integer add(Stream<Attribute> attrs)
{
return decoratedRepo.add(attrs.filter(attr ->
{
attributeValidator.validate(attr, ValidationMode.ADD);
return true;
}));
}
} | jjettenn/molgenis | molgenis-data-validation/src/main/java/org/molgenis/data/validation/meta/AttributeRepositoryValidationDecorator.java | Java | lgpl-3.0 | 1,556 |
package com.javabeast.teltonikia;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.annotation.Transient;
import java.io.Serializable;
import java.util.List;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class TeltonikaMessage implements Serializable {
private static final long serialVersionUID = -4557304960075040713L;
@Id
private Long id;
@Transient
private UDPChannelHeader udpChannelHeader;
private AVLPacketHeader avlPacketHeader;
private List<AVLData> avlData;
}
| AJauffre/trackr | domain/src/main/java/com/javabeast/teltonikia/TeltonikaMessage.java | Java | unlicense | 660 |
/// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met:
/// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to
/// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.5-4-170",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-170.js",
description: "Object.create - one property in 'Properties' is the Math object that uses Object's [[Get]] method to access the 'value' property (8.10.5 step 5.a)",
test: function testcase() {
try {
Math.value = "MathValue";
var newObj = Object.create({}, {
prop: Math
});
return newObj.prop === "MathValue";
} finally {
delete Math.value;
}
},
precondition: function prereq() {
return fnExists(Object.create);
}
});
| hnafar/IronJS | Src/Tests/ietestcenter/chapter15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-170.js | JavaScript | apache-2.0 | 2,234 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2014, Red Hat, Inc. and/or its affiliates, and individual
* contributors by the @authors tag. See the copyright.txt in the
* distribution for a full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.as.quickstarts.deltaspike.exceptionhandling.jsf;
import java.util.Iterator;
import javax.enterprise.inject.spi.BeanManager;
import javax.faces.FacesException;
import javax.faces.context.ExceptionHandler;
import javax.faces.context.ExceptionHandlerWrapper;
import javax.faces.event.ExceptionQueuedEvent;
import org.apache.deltaspike.core.api.exception.control.event.ExceptionToCatchEvent;
import org.apache.deltaspike.core.api.provider.BeanManagerProvider;
public class DeltaSpikeExceptionHandler extends ExceptionHandlerWrapper {
private final BeanManager beanManager;
private final ExceptionHandler wrapped;
public DeltaSpikeExceptionHandler(final ExceptionHandler wrapped) {
this.wrapped = wrapped;
this.beanManager = BeanManagerProvider.getInstance().getBeanManager();
}
@Override
public ExceptionHandler getWrapped() {
return this.wrapped;
}
@Override
public void handle() throws FacesException {
Iterator<ExceptionQueuedEvent> it = getUnhandledExceptionQueuedEvents().iterator();
while (it.hasNext()) {
try {
ExceptionQueuedEvent evt = it.next();
// Fires the Event with the Exception (with expected Qualifier) to
// be handled
ExceptionToCatchEvent etce = new ExceptionToCatchEvent(evt.getContext().getException(),
FacesRequestLiteral.INSTANCE);
beanManager.fireEvent(etce);
} finally {
it.remove();
}
}
getWrapped().handle();
}
} | jboss-developer/jboss-wfk-quickstarts | deltaspike-exception-handling/src/main/java/org/jboss/as/quickstarts/deltaspike/exceptionhandling/jsf/DeltaSpikeExceptionHandler.java | Java | apache-2.0 | 2,430 |
# -*- coding: utf-8 -*-
"""This file contains the airport plist plugin in Plaso."""
from plaso.events import plist_event
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class AirportPlugin(interface.PlistPlugin):
"""Plist plugin that extracts WiFi information."""
NAME = u'airport'
DESCRIPTION = u'Parser for Airport plist files.'
PLIST_PATH = u'com.apple.airport.preferences.plist'
PLIST_KEYS = frozenset([u'RememberedNetworks'])
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Airport entries.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
match: Optional dictionary containing keys extracted from PLIST_KEYS.
The default is None.
"""
if u'RememberedNetworks' not in match:
return
for wifi in match[u'RememberedNetworks']:
description = (
u'[WiFi] Connected to network: <{0:s}> using security {1:s}').format(
wifi.get(u'SSIDString', u'UNKNOWN_SSID'),
wifi.get(u'SecurityType', u'UNKNOWN_SECURITY_TYPE'))
event_object = plist_event.PlistEvent(
u'/RememberedNetworks', u'item', wifi.get(u'LastConnected', 0),
description)
parser_mediator.ProduceEvent(event_object)
plist.PlistParser.RegisterPlugin(AirportPlugin)
| ostree/plaso | plaso/parsers/plist_plugins/airport.py | Python | apache-2.0 | 1,452 |
/*
* ******************************************************************************
* Copyright 2014-2017 Spectra Logic Corporation. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* ****************************************************************************
*/
// This code is auto-generated, do not modify
using Ds3.Models;
namespace Ds3.Calls
{
public class GetS3TargetReadPreferenceSpectraS3Response
{
public S3TargetReadPreference ResponsePayload { get; private set; }
public GetS3TargetReadPreferenceSpectraS3Response(S3TargetReadPreference responsePayload)
{
this.ResponsePayload = responsePayload;
}
}
}
| RachelTucker/ds3_net_sdk | Ds3/Calls/GetS3TargetReadPreferenceSpectraS3Response.cs | C# | apache-2.0 | 1,167 |
toptenants_resource = {
"_links": {
"self": {
"href": "/usagedata/toptenants"
}
},
"_embedded": {
"tenants": [
]
},
}
tenant_resource = {
"ranking": 0,
"tenantId": 0,
"vmsActiveNum": 0,
"ramAllocatedTot": 0,
"vcpuAllocatedTot": 0,
"ramUsedPct": 0,
"cpuUsedPct": 0,
"tmpSumCpuPct": 0,
"tmpSumRamPct": 0,
"regions": [
]
} | attybro/FIWARELab-monitoringAPI | monitoringProxy/model/usagedata_resources.py | Python | apache-2.0 | 424 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilation operators"""
import tvm
from tvm import te
from .. import utils
from .. import tag
@te.tag_scope(tag=tag.INJECTIVE + ",dilate")
def dilate(data, strides, dilation_value=0.0, name="DilatedInput"):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.te.Tensor
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as data.
"""
n = len(data.shape)
if len(strides) != n:
raise ValueError("data dimension and strides size dismatch : %d vs %d" % (n, len(strides)))
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n))
def _dilate(*indices):
not_zero = []
index_tuple = []
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
for i in range(n):
if not utils.equal_const_int(strides[i], 1):
index_tuple.append(idxdiv(indices[i], strides[i]))
not_zero.append(idxmod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, data(*index_tuple), tvm.tir.const(dilation_value, data.dtype)
)
return data(*index_tuple)
return te.compute(out_shape, _dilate, name=name)
| dmlc/tvm | python/tvm/topi/nn/dilate.py | Python | apache-2.0 | 2,530 |
/*
* Copyright 2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module.exports = function(kernelP) {
return (function() {
var Utils = require('../utils.js');
var PipelineStage = require('./PipelineStage')();
/**
* @classdesc
* Abstract class for transformers that transform one dataset into another.
* @class
* @memberof module:eclairjs/ml
* @extends module:eclairjs/ml.PipelineStage
*/
function Transformer() {
Utils.handleAbstractConstructor(this, arguments);
}
Transformer.prototype = Object.create(PipelineStage.prototype);
Transformer.prototype.constructor = Transformer;
/**
* Transforms the dataset with optional parameters
* @param {module:eclairjs/sql.Dataset} dataset input dataset
* @param {module:eclairjs/ml/param.ParamMap | module:eclairjs/ml/param.ParamPair} [params] additional parameters, overwrite embedded params, overwrite embedded params
* @param {...module:eclairjs/ml/param.ParamPair} [otherParamPairs] other param pairs, Only used if argument two is {@link module:eclairjs/ml/param.ParamPair}. Overwrite embedded params
* @returns {module:eclairjs/sql.Dataset} transformed dataset
*/
Transformer.prototype.transform = function() {
var Dataset = require('../sql/Dataset');
var args = {
target: this,
method: 'transform',
args: Utils.wrapArguments(arguments),
returnType: Dataset
};
return Utils.generate(args);
};
/**
* @param {module:eclairjs/ml/param.ParamMap} extra
* @returns {module:eclairjs/ml.Transformer}
*/
Transformer.prototype.copy = function(extra) {
var args = {
target: this,
method: 'copy',
args: Utils.wrapArguments(arguments),
returnType: Utils.getContextClass(this)
};
return Utils.generate(args);
};
return Transformer;
})();
}; | EclairJS/eclairjs-node | lib/ml/Transformer.js | JavaScript | apache-2.0 | 2,461 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/discovery/model/CreateTagsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::ApplicationDiscoveryService::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
CreateTagsRequest::CreateTagsRequest() :
m_configurationIdsHasBeenSet(false),
m_tagsHasBeenSet(false)
{
}
Aws::String CreateTagsRequest::SerializePayload() const
{
JsonValue payload;
if(m_configurationIdsHasBeenSet)
{
Array<JsonValue> configurationIdsJsonList(m_configurationIds.size());
for(unsigned configurationIdsIndex = 0; configurationIdsIndex < configurationIdsJsonList.GetLength(); ++configurationIdsIndex)
{
configurationIdsJsonList[configurationIdsIndex].AsString(m_configurationIds[configurationIdsIndex]);
}
payload.WithArray("configurationIds", std::move(configurationIdsJsonList));
}
if(m_tagsHasBeenSet)
{
Array<JsonValue> tagsJsonList(m_tags.size());
for(unsigned tagsIndex = 0; tagsIndex < tagsJsonList.GetLength(); ++tagsIndex)
{
tagsJsonList[tagsIndex].AsObject(m_tags[tagsIndex].Jsonize());
}
payload.WithArray("tags", std::move(tagsJsonList));
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection CreateTagsRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "AWSPoseidonService_V2015_11_01.CreateTags"));
return headers;
}
| awslabs/aws-sdk-cpp | aws-cpp-sdk-discovery/source/model/CreateTagsRequest.cpp | C++ | apache-2.0 | 1,606 |
<?php
/**
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Google\Cloud\Spanner\Tests\System;
use Google\Auth\Cache\MemoryCacheItemPool;
use Google\Cloud\Spanner\Session\CacheSessionPool;
use Google\Cloud\Spanner\Session\Session;
use Psr\Cache\CacheItemPoolInterface;
/**
* @group spanner
* @group spanner-session
*/
class SessionTest extends SpannerTestCase
{
public function testCacheSessionPool()
{
$identity = self::$database->identity();
$cacheKey = sprintf(
CacheSessionPool::CACHE_KEY_TEMPLATE,
$identity['projectId'],
$identity['instance'],
$identity['database']
);
$cache = new MemoryCacheItemPool;
$pool = new CacheSessionPool($cache, [
'maxSessions' => 10,
'minSessions' => 5,
'shouldWaitForSession' => false
]);
$pool->setDatabase(self::$database);
$this->assertNull($cache->getItem($cacheKey)->get());
$pool->warmup();
$this->assertPoolCounts($cache, $cacheKey, 5, 0, 0);
$session = $pool->acquire();
$this->assertInstanceOf(Session::class, $session);
$this->assertTrue($session->exists());
$this->assertPoolCounts($cache, $cacheKey, 4, 1, 0);
$this->assertEquals($session->name(), current($cache->getItem($cacheKey)->get()['inUse'])['name']);
$pool->release($session);
$inUse = [];
for ($i = 0; $i < 10; $i++) {
$inUse[] = $pool->acquire();
}
$this->assertPoolCounts($cache, $cacheKey, 0, 10, 0);
$exception = null;
try {
$pool->acquire();
} catch (\RuntimeException $exception) {
// no-op
}
$this->assertInstanceOf(
\RuntimeException::class,
$exception,
'Should catch a RuntimeException when pool is exhausted.'
);
foreach ($inUse as $i) {
$pool->release($i);
}
sleep(1);
$this->assertPoolCounts($cache, $cacheKey, 10, 0, 0);
$pool->clear();
sleep(1);
$this->assertNull($cache->getItem($cacheKey)->get());
$this->assertFalse($inUse[0]->exists());
}
private function assertPoolCounts(CacheItemPoolInterface $cache, $key, $queue, $inUse, $toCreate)
{
$item = $cache->getItem($key)->get();
$this->assertCount($queue, $item['queue'], 'Sessions In Queue');
$this->assertCount($inUse, $item['inUse'], 'Sessions In Use');
$this->assertCount($toCreate, $item['toCreate'], 'Sessions To Create');
}
}
| googleapis/google-cloud-php-spanner | tests/System/SessionTest.php | PHP | apache-2.0 | 3,173 |
<?php
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/compute/v1/compute.proto
namespace Google\Cloud\Compute\V1;
use Google\Protobuf\Internal\GPBType;
use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
* A request message for Instances.GetIamPolicy. See the method description for details.
*
* Generated from protobuf message <code>google.cloud.compute.v1.GetIamPolicyInstanceRequest</code>
*/
class GetIamPolicyInstanceRequest extends \Google\Protobuf\Internal\Message
{
/**
* Requested IAM Policy version.
*
* Generated from protobuf field <code>optional int32 options_requested_policy_version = 499220029;</code>
*/
private $options_requested_policy_version = null;
/**
* Project ID for this request.
*
* Generated from protobuf field <code>string project = 227560217 [(.google.api.field_behavior) = REQUIRED];</code>
*/
private $project = '';
/**
* Name or id of the resource for this request.
*
* Generated from protobuf field <code>string resource = 195806222 [(.google.api.field_behavior) = REQUIRED];</code>
*/
private $resource = '';
/**
* The name of the zone for this request.
*
* Generated from protobuf field <code>string zone = 3744684 [(.google.api.field_behavior) = REQUIRED];</code>
*/
private $zone = '';
/**
* Constructor.
*
* @param array $data {
* Optional. Data for populating the Message object.
*
* @type int $options_requested_policy_version
* Requested IAM Policy version.
* @type string $project
* Project ID for this request.
* @type string $resource
* Name or id of the resource for this request.
* @type string $zone
* The name of the zone for this request.
* }
*/
public function __construct($data = NULL) {
\GPBMetadata\Google\Cloud\Compute\V1\Compute::initOnce();
parent::__construct($data);
}
/**
* Requested IAM Policy version.
*
* Generated from protobuf field <code>optional int32 options_requested_policy_version = 499220029;</code>
* @return int
*/
public function getOptionsRequestedPolicyVersion()
{
return isset($this->options_requested_policy_version) ? $this->options_requested_policy_version : 0;
}
public function hasOptionsRequestedPolicyVersion()
{
return isset($this->options_requested_policy_version);
}
public function clearOptionsRequestedPolicyVersion()
{
unset($this->options_requested_policy_version);
}
/**
* Requested IAM Policy version.
*
* Generated from protobuf field <code>optional int32 options_requested_policy_version = 499220029;</code>
* @param int $var
* @return $this
*/
public function setOptionsRequestedPolicyVersion($var)
{
GPBUtil::checkInt32($var);
$this->options_requested_policy_version = $var;
return $this;
}
/**
* Project ID for this request.
*
* Generated from protobuf field <code>string project = 227560217 [(.google.api.field_behavior) = REQUIRED];</code>
* @return string
*/
public function getProject()
{
return $this->project;
}
/**
* Project ID for this request.
*
* Generated from protobuf field <code>string project = 227560217 [(.google.api.field_behavior) = REQUIRED];</code>
* @param string $var
* @return $this
*/
public function setProject($var)
{
GPBUtil::checkString($var, True);
$this->project = $var;
return $this;
}
/**
* Name or id of the resource for this request.
*
* Generated from protobuf field <code>string resource = 195806222 [(.google.api.field_behavior) = REQUIRED];</code>
* @return string
*/
public function getResource()
{
return $this->resource;
}
/**
* Name or id of the resource for this request.
*
* Generated from protobuf field <code>string resource = 195806222 [(.google.api.field_behavior) = REQUIRED];</code>
* @param string $var
* @return $this
*/
public function setResource($var)
{
GPBUtil::checkString($var, True);
$this->resource = $var;
return $this;
}
/**
* The name of the zone for this request.
*
* Generated from protobuf field <code>string zone = 3744684 [(.google.api.field_behavior) = REQUIRED];</code>
* @return string
*/
public function getZone()
{
return $this->zone;
}
/**
* The name of the zone for this request.
*
* Generated from protobuf field <code>string zone = 3744684 [(.google.api.field_behavior) = REQUIRED];</code>
* @param string $var
* @return $this
*/
public function setZone($var)
{
GPBUtil::checkString($var, True);
$this->zone = $var;
return $this;
}
}
| googleapis/google-cloud-php-compute | src/V1/GetIamPolicyInstanceRequest.php | PHP | apache-2.0 | 5,095 |
package net.compitek.javakit.database.dao;/**
* Created by Evgene on 04.06.2015.
*/
import net.compitek.javakit.database.domain.News;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Repository;
@Repository("NewsDao")
public class NewsDao extends AbstractDao<Long,News>{
private static final Logger log = Logger.getLogger(NewsDao.class);
}
| Javakit/Javakit | src/main/java/net/compitek/javakit/database/dao/NewsDao.java | Java | apache-2.0 | 370 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.core.metamodel.facetdecorator.help;
import org.apache.isis.applib.Identifier;
import org.apache.isis.core.metamodel.facetapi.Facet;
import org.apache.isis.core.metamodel.facetapi.FacetHolder;
import org.apache.isis.core.metamodel.facetapi.IdentifiedHolder;
import org.apache.isis.core.metamodel.facetdecorator.FacetDecoratorAbstract;
import org.apache.isis.core.metamodel.facets.all.help.HelpFacet;
public class HelpFacetDecoratorUsingHelpManager extends FacetDecoratorAbstract implements HelpFacetDecorator {
private final HelpManager helpManager;
public HelpFacetDecoratorUsingHelpManager(final HelpManager manager) {
helpManager = manager;
}
@Override
public Facet decorate(final Facet facet, final FacetHolder facetHolder) {
if (facet.facetType() != HelpFacet.class) {
return facet;
}
if (!(facetHolder instanceof IdentifiedHolder)) {
return null;
}
final IdentifiedHolder identifiedHolder = (IdentifiedHolder) facetHolder;
return decorateWithHelpFacet(facet, identifiedHolder);
}
private Facet decorateWithHelpFacet(final Facet facet, final IdentifiedHolder identifiedHolder) {
final Identifier identifier = identifiedHolder.getIdentifier();
final String helpText = helpManager.getHelpText(identifier);
if (helpText != null) {
final HelpFacetLookedUpViaHelpManager decoratingFacet = new HelpFacetLookedUpViaHelpManager(helpText, facet.getFacetHolder());
identifiedHolder.addFacet(decoratingFacet);
return decoratingFacet;
}
return null;
}
@Override
@SuppressWarnings("unchecked")
public Class<? extends Facet>[] getFacetTypes() {
return new Class[] { HelpFacet.class };
}
}
| howepeng/isis | core/metamodel/src/main/java/org/apache/isis/core/metamodel/facetdecorator/help/HelpFacetDecoratorUsingHelpManager.java | Java | apache-2.0 | 2,651 |
<?php
/**
* Copyright 2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// [START tasks_quickstart]
require __DIR__ . '/vendor/autoload.php';
if (php_sapi_name() != 'cli') {
throw new Exception('This application must be run on the command line.');
}
/**
* Returns an authorized API client.
* @return Google_Client the authorized client object
*/
function getClient()
{
$client = new Google_Client();
$client->setApplicationName('Google Tasks API PHP Quickstart');
$client->setScopes(Google_Service_Tasks::TASKS_READONLY);
$client->setAuthConfig('credentials.json');
$client->setAccessType('offline');
$client->setPrompt('select_account consent');
// Load previously authorized token from a file, if it exists.
// The file token.json stores the user's access and refresh tokens, and is
// created automatically when the authorization flow completes for the first
// time.
$tokenPath = 'token.json';
if (file_exists($tokenPath)) {
$accessToken = json_decode(file_get_contents($tokenPath), true);
$client->setAccessToken($accessToken);
}
// If there is no previous token or it's expired.
if ($client->isAccessTokenExpired()) {
// Refresh the token if possible, else fetch a new one.
if ($client->getRefreshToken()) {
$client->fetchAccessTokenWithRefreshToken($client->getRefreshToken());
} else {
// Request authorization from the user.
$authUrl = $client->createAuthUrl();
printf("Open the following link in your browser:\n%s\n", $authUrl);
print 'Enter verification code: ';
$authCode = trim(fgets(STDIN));
// Exchange authorization code for an access token.
$accessToken = $client->fetchAccessTokenWithAuthCode($authCode);
$client->setAccessToken($accessToken);
// Check to see if there was an error.
if (array_key_exists('error', $accessToken)) {
throw new Exception(join(', ', $accessToken));
}
}
// Save the token to a file.
if (!file_exists(dirname($tokenPath))) {
mkdir(dirname($tokenPath), 0700, true);
}
file_put_contents($tokenPath, json_encode($client->getAccessToken()));
}
return $client;
}
// Get the API client and construct the service object.
$client = getClient();
$service = new Google_Service_Tasks($client);
// Print the first 10 task lists.
$optParams = array(
'maxResults' => 10,
);
$results = $service->tasklists->listTasklists($optParams);
if (count($results->getItems()) == 0) {
print "No task lists found.\n";
} else {
print "Task lists:\n";
foreach ($results->getItems() as $tasklist) {
printf("%s (%s)\n", $tasklist->getTitle(), $tasklist->getId());
}
}
// [END tasks_quickstart]
| googleworkspace/php-samples | tasks/quickstart/quickstart.php | PHP | apache-2.0 | 3,388 |
/*
* Copyright (C) 2004-2017 ZNC, see the NOTICE file for details.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! @author prozac@rottenboy.com
//
// The encryption here was designed to be compatible with mircryption's CBC
// mode.
//
// Latest tested against:
// MircryptionSuite - Mircryption ver 1.19.01 (dll v1.15.01 , mirc v7.32) CBC
// loaded and ready.
//
// TODO:
//
// 1) Encrypt key storage file
// 2) Some way of notifying the user that the current channel is in "encryption
// mode" verses plain text
// 3) Temporarily disable a target (nick/chan)
//
// NOTE: This module is currently NOT intended to secure you from your shell
// admin.
// The keys are currently stored in plain text, so anyone with access to
// your account (or root) can obtain them.
// It is strongly suggested that you enable SSL between znc and your
// client otherwise the encryption stops at znc and gets sent to your
// client in plain text.
//
#include <znc/Chan.h>
#include <znc/User.h>
#include <znc/IRCNetwork.h>
#include <openssl/dh.h>
#include <openssl/bn.h>
#include <znc/SHA256.h>
#define REQUIRESSL 1
// To be removed in future versions
#define NICK_PREFIX_OLD_KEY "[nick-prefix]"
#define NICK_PREFIX_KEY "@nick-prefix@"
class CCryptMod : public CModule {
private:
/*
* As used in other implementations like KVIrc, fish10, Quassel, FiSH-irssi, ...
* all the way back to the original located at http://mircryption.sourceforge.net/Extras/McpsFishDH.zip
*/
const char* m_sPrime1080 = "FBE1022E23D213E8ACFA9AE8B9DFADA3EA6B7AC7A7B7E95AB5EB2DF858921FEADE95E6AC7BE7DE6ADBAB8A783E7AF7A7FA6A2B7BEB1E72EAE2B72F9FA2BFB2A2EFBEFAC868BADB3E828FA8BADFADA3E4CC1BE7E8AFE85E9698A783EB68FA07A77AB6AD7BEB618ACF9CA2897EB28A6189EFA07AB99A8A7FA9AE299EFA7BA66DEAFEFBEFBF0B7D8B";
/* Generate our keys once and reuse, just like ssh keys */
std::unique_ptr<DH, decltype(&DH_free)> m_pDH;
CString m_sPrivKey;
CString m_sPubKey;
#if OPENSSL_VERSION_NUMBER < 0X10100000L
static int DH_set0_pqg(DH* dh, BIGNUM* p, BIGNUM* q, BIGNUM* g) {
/* If the fields p and g in dh are nullptr, the corresponding input
* parameters MUST be non-nullptr. q may remain nullptr.
*/
if (dh == nullptr || (dh->p == nullptr && p == nullptr) || (dh->g == nullptr && g == nullptr))
return 0;
if (p != nullptr) {
BN_free(dh->p);
dh->p = p;
}
if (g != nullptr) {
BN_free(dh->g);
dh->g = g;
}
if (q != nullptr) {
BN_free(dh->q);
dh->q = q;
dh->length = BN_num_bits(q);
}
return 1;
}
static void DH_get0_key(const DH* dh, const BIGNUM** pub_key, const BIGNUM** priv_key) {
if (dh != nullptr) {
if (pub_key != nullptr)
*pub_key = dh->pub_key;
if (priv_key != nullptr)
*priv_key = dh->priv_key;
}
}
#endif
bool DH1080_gen() {
/* Generate our keys on first call */
if (m_sPrivKey.empty() || m_sPubKey.empty()) {
int len;
const BIGNUM* bPrivKey = nullptr;
const BIGNUM* bPubKey = nullptr;
BIGNUM* bPrime = nullptr;
BIGNUM* bGen = nullptr;
if (!BN_hex2bn(&bPrime, m_sPrime1080) || !BN_dec2bn(&bGen, "2") || !DH_set0_pqg(m_pDH.get(), bPrime, nullptr, bGen) || !DH_generate_key(m_pDH.get())) {
/* one of them failed */
if (bPrime != nullptr)
BN_clear_free(bPrime);
if (bGen != nullptr)
BN_clear_free(bGen);
return false;
}
/* Get our keys */
DH_get0_key(m_pDH.get(), &bPubKey, &bPrivKey);
/* Get our private key */
len = BN_num_bytes(bPrivKey);
m_sPrivKey.resize(len);
BN_bn2bin(bPrivKey, (unsigned char*)m_sPrivKey.data());
m_sPrivKey.Base64Encode();
/* Get our public key */
len = BN_num_bytes(bPubKey);
m_sPubKey.resize(len);
BN_bn2bin(bPubKey, (unsigned char*)m_sPubKey.data());
m_sPubKey.Base64Encode();
}
return true;
}
bool DH1080_comp(CString& sOtherPubKey, CString& sSecretKey) {
unsigned long len;
unsigned char* key = nullptr;
BIGNUM* bOtherPubKey = nullptr;
/* Prepare other public key */
len = sOtherPubKey.Base64Decode();
bOtherPubKey = BN_bin2bn((unsigned char*)sOtherPubKey.data(), len, nullptr);
/* Generate secret key */
key = (unsigned char*)calloc(DH_size(m_pDH.get()), 1);
if ((len = DH_compute_key(key, bOtherPubKey, m_pDH.get())) == -1) {
sSecretKey = "";
if (bOtherPubKey != nullptr)
BN_clear_free(bOtherPubKey);
if (key != nullptr)
free(key);
return false;
}
/* Get our secret key */
sSecretKey.resize(SHA256_DIGEST_SIZE);
sha256(key, len, (unsigned char*)sSecretKey.data());
sSecretKey.Base64Encode();
sSecretKey.TrimRight("=");
if (bOtherPubKey != nullptr)
BN_clear_free(bOtherPubKey);
if (key != nullptr)
free(key);
return true;
}
CString NickPrefix() {
MCString::iterator it = FindNV(NICK_PREFIX_KEY);
/*
* Check for different Prefixes to not confuse modules with nicknames
* Also check for overlap for rare cases like:
* SP = "*"; NP = "*s"; "tatus" sends an encrypted message appearing at "*status"
*/
CString sStatusPrefix = GetUser()->GetStatusPrefix();
if (it != EndNV()) {
size_t sp = sStatusPrefix.size();
size_t np = it->second.size();
int min = std::min(sp, np);
if (min == 0 || sStatusPrefix.CaseCmp(it->second, min) != 0)
return it->second;
}
return sStatusPrefix.StartsWith("*") ? "." : "*";
}
public:
/* MODCONSTRUCTOR(CLASS) is of form "CLASS(...) : CModule(...)" */
MODCONSTRUCTOR(CCryptMod) , m_pDH(DH_new(), DH_free) {
AddHelpCommand();
AddCommand("DelKey", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnDelKeyCommand),
"<#chan|Nick>", "Remove a key for nick or channel");
AddCommand("SetKey", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnSetKeyCommand),
"<#chan|Nick> <Key>", "Set a key for nick or channel");
AddCommand("ListKeys", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnListKeysCommand),
"", "List all keys");
AddCommand("KeyX", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnKeyXCommand),
"<Nick>", "Start a DH1080 key exchange with nick");
AddCommand("GetNickPrefix", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnGetNickPrefixCommand),
"", "Get the nick prefix");
AddCommand("SetNickPrefix", static_cast<CModCommand::ModCmdFunc>(
&CCryptMod::OnSetNickPrefixCommand),
"[Prefix]", "Set the nick prefix, with no argument it's disabled.");
}
~CCryptMod() override {
}
bool OnLoad(const CString& sArgsi, CString& sMessage) override {
MCString::iterator it = FindNV(NICK_PREFIX_KEY);
if (it == EndNV()) {
/* Don't have the new prefix key yet */
it = FindNV(NICK_PREFIX_OLD_KEY);
if (it != EndNV()) {
SetNV(NICK_PREFIX_KEY, it->second);
DelNV(NICK_PREFIX_OLD_KEY);
}
}
return true;
}
EModRet OnUserMsg(CString& sTarget, CString& sMessage) override {
return FilterOutgoing(sTarget, sMessage, "PRIVMSG", "", "");
}
EModRet OnUserNotice(CString& sTarget, CString& sMessage) override {
return FilterOutgoing(sTarget, sMessage, "NOTICE", "", "");
}
EModRet OnUserAction(CString& sTarget, CString& sMessage) override {
return FilterOutgoing(sTarget, sMessage, "PRIVMSG", "\001ACTION ", "\001");
}
EModRet OnUserTopic(CString& sTarget, CString& sMessage) override {
sTarget.TrimPrefix(NickPrefix());
if (sMessage.TrimPrefix("``")) {
return CONTINUE;
}
MCString::iterator it = FindNV(sTarget.AsLower());
if (it != EndNV()) {
sMessage = MakeIvec() + sMessage;
sMessage.Encrypt(it->second);
sMessage.Base64Encode();
sMessage = "+OK *" + sMessage;
}
return CONTINUE;
}
EModRet OnPrivMsg(CNick& Nick, CString& sMessage) override {
FilterIncoming(Nick.GetNick(), Nick, sMessage);
return CONTINUE;
}
EModRet OnPrivNotice(CNick& Nick, CString& sMessage) override {
CString sCommand = sMessage.Token(0);
CString sOtherPubKey = sMessage.Token(1);
if ((sCommand.Equals("DH1080_INIT") || sCommand.Equals("DH1080_INIT_CBC")) && !sOtherPubKey.empty()) {
CString sSecretKey;
CString sTail = sMessage.Token(2); /* For fish10 */
/* remove trailing A */
if (sOtherPubKey.TrimSuffix("A") && DH1080_gen() && DH1080_comp(sOtherPubKey, sSecretKey)) {
PutModule("Received DH1080 public key from " + Nick.GetNick() + ", sending mine...");
PutIRC("NOTICE " + Nick.GetNick() + " :DH1080_FINISH " + m_sPubKey + "A" + (sTail.empty()?"":(" " + sTail)));
SetNV(Nick.GetNick().AsLower(), sSecretKey);
PutModule("Key for " + Nick.GetNick() + " successfully set.");
return HALT;
}
PutModule("Error in " + sCommand + " with " + Nick.GetNick() + ": " + (sSecretKey.empty()?"no secret key computed":sSecretKey));
return CONTINUE;
} else if (sCommand.Equals("DH1080_FINISH") && !sOtherPubKey.empty()) {
/*
* In theory we could get a DH1080_FINISH without us having sent a DH1080_INIT first,
* but then to have any use for the other user, they'd already have our pub key
*/
CString sSecretKey;
/* remove trailing A */
if (sOtherPubKey.TrimSuffix("A") && DH1080_gen() && DH1080_comp(sOtherPubKey, sSecretKey)) {
SetNV(Nick.GetNick().AsLower(), sSecretKey);
PutModule("Key for " + Nick.GetNick() + " successfully set.");
return HALT;
}
PutModule("Error in " + sCommand + " with " + Nick.GetNick() + ": " + (sSecretKey.empty()?"no secret key computed":sSecretKey));
return CONTINUE;
}
FilterIncoming(Nick.GetNick(), Nick, sMessage);
return CONTINUE;
}
EModRet OnPrivAction(CNick& Nick, CString& sMessage) override {
FilterIncoming(Nick.GetNick(), Nick, sMessage);
return CONTINUE;
}
EModRet OnChanMsg(CNick& Nick, CChan& Channel, CString& sMessage) override {
FilterIncoming(Channel.GetName(), Nick, sMessage);
return CONTINUE;
}
EModRet OnChanNotice(CNick& Nick, CChan& Channel,
CString& sMessage) override {
FilterIncoming(Channel.GetName(), Nick, sMessage);
return CONTINUE;
}
EModRet OnChanAction(CNick& Nick, CChan& Channel,
CString& sMessage) override {
FilterIncoming(Channel.GetName(), Nick, sMessage);
return CONTINUE;
}
EModRet OnTopic(CNick& Nick, CChan& Channel, CString& sMessage) override {
FilterIncoming(Channel.GetName(), Nick, sMessage);
return CONTINUE;
}
EModRet OnRaw(CString& sLine) override {
if (!sLine.Token(1).Equals("332")) {
return CONTINUE;
}
CChan* pChan = GetNetwork()->FindChan(sLine.Token(3));
if (pChan) {
CNick* Nick = pChan->FindNick(sLine.Token(2));
CString sTopic = sLine.Token(4, true);
sTopic.TrimPrefix(":");
FilterIncoming(pChan->GetName(), *Nick, sTopic);
sLine = sLine.Token(0) + " " + sLine.Token(1) + " " +
sLine.Token(2) + " " + pChan->GetName() + " :" + sTopic;
}
return CONTINUE;
}
EModRet FilterOutgoing(CString& sTarget, CString& sMessage, const CString& sType, const CString& sPreMsg, const CString& sPostMsg) {
sTarget.TrimPrefix(NickPrefix());
if (sMessage.TrimPrefix("``")) {
return CONTINUE;
}
MCString::iterator it = FindNV(sTarget.AsLower());
if (it != EndNV()) {
CChan* pChan = GetNetwork()->FindChan(sTarget);
CString sNickMask = GetNetwork()->GetIRCNick().GetNickMask();
if (pChan) {
if (!pChan->AutoClearChanBuffer())
pChan->AddBuffer(":" + NickPrefix() + _NAMEDFMT(sNickMask) +
" " + sType + " " + _NAMEDFMT(sTarget) +
" :" + sPreMsg + "{text}" + sPostMsg,
sMessage);
GetUser()->PutUser(":" + NickPrefix() + sNickMask +
" " + sType + " " + sTarget + " :" +
sPreMsg + sMessage + sPostMsg,
nullptr, GetClient());
}
CString sMsg = MakeIvec() + sMessage;
sMsg.Encrypt(it->second);
sMsg.Base64Encode();
sMsg = "+OK *" + sMsg;
PutIRC(sType + " " + sTarget + " :" + sPreMsg + sMsg + sPostMsg);
return HALTCORE;
}
return CONTINUE;
}
void FilterIncoming(const CString& sTarget, CNick& Nick,
CString& sMessage) {
if (sMessage.TrimPrefix("+OK *")) {
MCString::iterator it = FindNV(sTarget.AsLower());
if (it != EndNV()) {
sMessage.Base64Decode();
sMessage.Decrypt(it->second);
sMessage.LeftChomp(8);
sMessage = sMessage.c_str();
Nick.SetNick(NickPrefix() + Nick.GetNick());
}
}
}
void OnDelKeyCommand(const CString& sCommand) {
CString sTarget = sCommand.Token(1);
if (!sTarget.empty()) {
if (DelNV(sTarget.AsLower())) {
PutModule("Target [" + sTarget + "] deleted");
} else {
PutModule("Target [" + sTarget + "] not found");
}
} else {
PutModule("Usage DelKey <#chan|Nick>");
}
}
void OnSetKeyCommand(const CString& sCommand) {
CString sTarget = sCommand.Token(1);
CString sKey = sCommand.Token(2, true);
// Strip "cbc:" from beginning of string incase someone pastes directly
// from mircryption
sKey.TrimPrefix("cbc:");
if (!sKey.empty()) {
SetNV(sTarget.AsLower(), sKey);
PutModule("Set encryption key for [" + sTarget + "] to [" + sKey +
"]");
} else {
PutModule("Usage: SetKey <#chan|Nick> <Key>");
}
}
void OnKeyXCommand(const CString& sCommand) {
CString sTarget = sCommand.Token(1);
if (!sTarget.empty()) {
if (DH1080_gen()) {
PutIRC("NOTICE " + sTarget + " :DH1080_INIT " + m_sPubKey + "A");
PutModule("Sent my DH1080 public key to " + sTarget + ", waiting for reply ...");
} else {
PutModule("Error generating our keys, nothing sent.");
}
} else {
PutModule("Usage: KeyX <Nick>");
}
}
void OnGetNickPrefixCommand(const CString& sCommand) {
CString sPrefix = NickPrefix();
PutModule("Nick Prefix" + (sPrefix.empty() ? " disabled." : (": " + sPrefix)));
}
void OnSetNickPrefixCommand(const CString& sCommand) {
CString sPrefix = sCommand.Token(1);
if (sPrefix.StartsWith(":")) {
PutModule("You cannot use :, even followed by other symbols, as Nick Prefix.");
} else {
CString sStatusPrefix = GetUser()->GetStatusPrefix();
size_t sp = sStatusPrefix.size();
size_t np = sPrefix.size();
int min = std::min(sp, np);
if (min > 0 && sStatusPrefix.CaseCmp(sPrefix, min) == 0)
PutModule("Overlap with Status Prefix (" + sStatusPrefix + "), this Nick Prefix will not be used!");
else {
SetNV(NICK_PREFIX_KEY, sPrefix);
if (sPrefix.empty())
PutModule("Disabling Nick Prefix.");
else
PutModule("Setting Nick Prefix to " + sPrefix);
}
}
}
void OnListKeysCommand(const CString& sCommand) {
CTable Table;
Table.AddColumn("Target");
Table.AddColumn("Key");
for (MCString::iterator it = BeginNV(); it != EndNV(); ++it) {
if (!it->first.Equals(NICK_PREFIX_KEY)) {
Table.AddRow();
Table.SetCell("Target", it->first);
Table.SetCell("Key", it->second);
}
}
if (Table.empty())
PutModule("You have no encryption keys set.");
else
PutModule(Table);
}
CString MakeIvec() {
CString sRet;
time_t t;
time(&t);
int r = rand();
sRet.append((char*)&t, 4);
sRet.append((char*)&r, 4);
return sRet;
}
};
template <>
void TModInfo<CCryptMod>(CModInfo& Info) {
Info.SetWikiPage("crypt");
}
NETWORKMODULEDEFS(CCryptMod, "Encryption for channel/private messages")
| evilnet/znc | modules/crypt.cpp | C++ | apache-2.0 | 18,628 |
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
// This source code is also licensed under the GPLv2 license found in the
// COPYING file in the root directory of this source tree.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Introduction of SyncPoint effectively disabled building and running this test
// in Release build.
// which is a pity, it is a good test
#if !defined(ROCKSDB_LITE)
#include "db/db_test_util.h"
#include "port/port.h"
#include "port/stack_trace.h"
namespace rocksdb {
class DBTestDynamicLevel : public DBTestBase {
public:
DBTestDynamicLevel() : DBTestBase("/db_dynamic_level_test") {}
};
TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
if (!Snappy_Supported() || !LZ4_Supported()) {
return;
}
// Use InMemoryEnv, or it would be too slow.
unique_ptr<Env> env(new MockEnv(env_));
const int kNKeys = 1000;
int keys[kNKeys];
auto verify_func = [&]() {
for (int i = 0; i < kNKeys; i++) {
ASSERT_NE("NOT_FOUND", Get(Key(i)));
ASSERT_NE("NOT_FOUND", Get(Key(kNKeys * 2 + i)));
if (i < kNKeys / 10) {
ASSERT_EQ("NOT_FOUND", Get(Key(kNKeys + keys[i])));
} else {
ASSERT_NE("NOT_FOUND", Get(Key(kNKeys + keys[i])));
}
}
};
Random rnd(301);
for (int ordered_insert = 0; ordered_insert <= 1; ordered_insert++) {
for (int i = 0; i < kNKeys; i++) {
keys[i] = i;
}
if (ordered_insert == 0) {
std::random_shuffle(std::begin(keys), std::end(keys));
}
for (int max_background_compactions = 1; max_background_compactions < 4;
max_background_compactions += 2) {
Options options;
options.env = env.get();
options.create_if_missing = true;
options.write_buffer_size = 2048;
options.max_write_buffer_number = 2;
options.level0_file_num_compaction_trigger = 2;
options.level0_slowdown_writes_trigger = 2;
options.level0_stop_writes_trigger = 2;
options.target_file_size_base = 2048;
options.level_compaction_dynamic_level_bytes = true;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_multiplier = 4;
options.soft_rate_limit = 1.1;
options.max_background_compactions = max_background_compactions;
options.num_levels = 5;
options.compression_per_level.resize(3);
options.compression_per_level[0] = kNoCompression;
options.compression_per_level[1] = kLZ4Compression;
options.compression_per_level[2] = kSnappyCompression;
options.env = env_;
DestroyAndReopen(options);
for (int i = 0; i < kNKeys; i++) {
int key = keys[i];
ASSERT_OK(Put(Key(kNKeys + key), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(key), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(kNKeys * 2 + key), RandomString(&rnd, 102)));
ASSERT_OK(Delete(Key(kNKeys + keys[i / 10])));
env_->SleepForMicroseconds(5000);
}
uint64_t int_prop;
ASSERT_TRUE(db_->GetIntProperty("rocksdb.background-errors", &int_prop));
ASSERT_EQ(0U, int_prop);
// Verify DB
for (int j = 0; j < 2; j++) {
verify_func();
if (j == 0) {
Reopen(options);
}
}
// Test compact range works
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
// All data should be in the last level.
ColumnFamilyMetaData cf_meta;
db_->GetColumnFamilyMetaData(&cf_meta);
ASSERT_EQ(5U, cf_meta.levels.size());
for (int i = 0; i < 4; i++) {
ASSERT_EQ(0U, cf_meta.levels[i].files.size());
}
ASSERT_GT(cf_meta.levels[4U].files.size(), 0U);
verify_func();
Close();
}
}
env_->SetBackgroundThreads(1, Env::LOW);
env_->SetBackgroundThreads(1, Env::HIGH);
}
// Test specific cases in dynamic max bytes
TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
Random rnd(301);
int kMaxKey = 1000000;
Options options = CurrentOptions();
options.create_if_missing = true;
options.write_buffer_size = 20480;
options.max_write_buffer_number = 2;
options.level0_file_num_compaction_trigger = 2;
options.level0_slowdown_writes_trigger = 9999;
options.level0_stop_writes_trigger = 9999;
options.target_file_size_base = 9102;
options.level_compaction_dynamic_level_bytes = true;
options.max_bytes_for_level_base = 40960;
options.max_bytes_for_level_multiplier = 4;
options.max_background_compactions = 2;
options.num_levels = 5;
options.max_compaction_bytes = 0; // Force not expanding in compactions
BlockBasedTableOptions table_options;
table_options.block_size = 1024;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options);
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
uint64_t int_prop;
std::string str_prop;
// Initial base level is the last level
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop);
// Put about 28K to L0
for (int i = 0; i < 70; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
Flush();
dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop);
// Insert extra about 28K to L0. After they are compacted to L4, base level
// should be changed to L3.
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
for (int i = 0; i < 70; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
Flush();
dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
ASSERT_EQ("0", str_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop));
ASSERT_EQ("0", str_prop);
// Trigger parallel compaction, and the first one would change the base
// level.
// Hold compaction jobs to make sure
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start",
[&](void* arg) { env_->SleepForMicroseconds(100000); });
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
// Write about 40K more
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
Flush();
// Wait for 200 milliseconds before proceeding compactions to make sure two
// parallel ones are executed.
env_->SleepForMicroseconds(200000);
dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop);
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
// Trigger a condition that the compaction changes base level and L0->Lbase
// happens at the same time.
// We try to make last levels' targets to be 40K, 160K, 640K, add triggers
// another compaction from 40K->160K.
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
// Write about 650K more.
// Each file is about 11KB, with 9KB of data.
for (int i = 0; i < 1300; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
Flush();
dbfull()->TEST_WaitForCompact();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(2U, int_prop);
// A manual compaction will trigger the base level to become L2
// Keep Writing data until base level changed 2->1. There will be L0->L2
// compaction going on at the same time.
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
rocksdb::SyncPoint::GetInstance()->LoadDependency({
{"CompactionJob::Run():Start", "DynamicLevelMaxBytesBase2:0"},
{"DynamicLevelMaxBytesBase2:1", "CompactionJob::Run():End"},
{"DynamicLevelMaxBytesBase2:compact_range_finish",
"FlushJob::WriteLevel0Table"},
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rocksdb::port::Thread thread([this] {
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_start");
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_finish");
});
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0");
for (int i = 0; i < 2; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1");
Flush();
thread.join();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(1U, int_prop);
}
// Test specific cases in dynamic max bytes
TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
Random rnd(301);
int kMaxKey = 1000000;
Options options = CurrentOptions();
options.create_if_missing = true;
options.write_buffer_size = 2048;
options.max_write_buffer_number = 2;
options.level0_file_num_compaction_trigger = 2;
options.level0_slowdown_writes_trigger = 9999;
options.level0_stop_writes_trigger = 9999;
options.target_file_size_base = 2;
options.level_compaction_dynamic_level_bytes = true;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_multiplier = 4;
options.max_background_compactions = 1;
const int kNumLevels = 5;
options.num_levels = kNumLevels;
options.max_compaction_bytes = 1; // Force not expanding in compactions
BlockBasedTableOptions table_options;
table_options.block_size = 1024;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options);
// Compact against empty DB
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
uint64_t int_prop;
std::string str_prop;
// Initial base level is the last level
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop);
// Put about 7K to L0
for (int i = 0; i < 140; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
}
Flush();
dbfull()->TEST_WaitForCompact();
if (NumTableFilesAtLevel(0) == 0) {
// Make sure level 0 is not empty
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
Flush();
}
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop));
ASSERT_EQ("0", str_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop));
ASSERT_EQ("0", str_prop);
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
std::set<int> output_levels;
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionPicker::CompactRange:Return", [&](void* arg) {
Compaction* compaction = reinterpret_cast<Compaction*>(arg);
output_levels.insert(compaction->output_level());
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_EQ(output_levels.size(), 2);
ASSERT_TRUE(output_levels.find(3) != output_levels.end());
ASSERT_TRUE(output_levels.find(4) != output_levels.end());
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level0", &str_prop));
ASSERT_EQ("0", str_prop);
ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level3", &str_prop));
ASSERT_EQ("0", str_prop);
// Base level is still level 3.
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(3U, int_prop);
}
TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.write_buffer_size = 2048;
options.max_write_buffer_number = 2;
options.level0_file_num_compaction_trigger = 2;
options.level0_slowdown_writes_trigger = 2;
options.level0_stop_writes_trigger = 2;
options.target_file_size_base = 2048;
options.level_compaction_dynamic_level_bytes = true;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_multiplier = 4;
options.soft_rate_limit = 1.1;
options.max_background_compactions = 2;
options.num_levels = 5;
options.max_compaction_bytes = 100000000;
DestroyAndReopen(options);
int non_trivial = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCompaction:NonTrivial",
[&](void* arg) { non_trivial++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Random rnd(301);
const int total_keys = 3000;
const int random_part_size = 100;
for (int i = 0; i < total_keys; i++) {
std::string value = RandomString(&rnd, random_part_size);
PutFixed32(&value, static_cast<uint32_t>(i));
ASSERT_OK(Put(Key(i), value));
}
Flush();
dbfull()->TEST_WaitForCompact();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
ASSERT_EQ(non_trivial, 0);
for (int i = 0; i < total_keys; i++) {
std::string value = Get(Key(i));
ASSERT_EQ(DecodeFixed32(value.c_str() + random_part_size),
static_cast<uint32_t>(i));
}
env_->SetBackgroundThreads(1, Env::LOW);
env_->SetBackgroundThreads(1, Env::HIGH);
}
TEST_F(DBTestDynamicLevel, DISABLED_MigrateToDynamicLevelMaxBytesBase) {
Random rnd(301);
const int kMaxKey = 2000;
Options options;
options.create_if_missing = true;
options.write_buffer_size = 2048;
options.max_write_buffer_number = 8;
options.level0_file_num_compaction_trigger = 4;
options.level0_slowdown_writes_trigger = 4;
options.level0_stop_writes_trigger = 8;
options.target_file_size_base = 2048;
options.level_compaction_dynamic_level_bytes = false;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_multiplier = 4;
options.soft_rate_limit = 1.1;
options.num_levels = 8;
DestroyAndReopen(options);
auto verify_func = [&](int num_keys, bool if_sleep) {
for (int i = 0; i < num_keys; i++) {
ASSERT_NE("NOT_FOUND", Get(Key(kMaxKey + i)));
if (i < num_keys / 10) {
ASSERT_EQ("NOT_FOUND", Get(Key(i)));
} else {
ASSERT_NE("NOT_FOUND", Get(Key(i)));
}
if (if_sleep && i % 1000 == 0) {
// Without it, valgrind may choose not to give another
// thread a chance to run before finishing the function,
// causing the test to be extremely slow.
env_->SleepForMicroseconds(1);
}
}
};
int total_keys = 1000;
for (int i = 0; i < total_keys; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
ASSERT_OK(Delete(Key(i / 10)));
}
verify_func(total_keys, false);
dbfull()->TEST_WaitForCompact();
options.level_compaction_dynamic_level_bytes = true;
options.disable_auto_compactions = true;
Reopen(options);
verify_func(total_keys, false);
std::atomic_bool compaction_finished;
compaction_finished = false;
// Issue manual compaction in one thread and still verify DB state
// in main thread.
rocksdb::port::Thread t([&]() {
CompactRangeOptions compact_options;
compact_options.change_level = true;
compact_options.target_level = options.num_levels - 1;
dbfull()->CompactRange(compact_options, nullptr, nullptr);
compaction_finished.store(true);
});
do {
verify_func(total_keys, true);
} while (!compaction_finished.load());
t.join();
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
}));
int total_keys2 = 2000;
for (int i = total_keys; i < total_keys2; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
ASSERT_OK(Delete(Key(i / 10)));
}
verify_func(total_keys2, false);
dbfull()->TEST_WaitForCompact();
verify_func(total_keys2, false);
// Base level is not level 1
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
}
} // namespace rocksdb
#endif // !defined(ROCKSDB_LITE)
int main(int argc, char** argv) {
#if !defined(ROCKSDB_LITE)
rocksdb::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
#else
return 0;
#endif
}
| hkernbach/arangodb | 3rdParty/rocksdb/v5.6.X/db/db_dynamic_level_test.cc | C++ | apache-2.0 | 17,448 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.nio.file.AccessDeniedException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.jar.Attributes;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import java.util.zip.ZipInputStream;
import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A collection of file-processing util methods
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class FileUtil {
private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
/* The error code is defined in winutils to indicate insufficient
* privilege to create symbolic links. This value need to keep in
* sync with the constant of the same name in:
* "src\winutils\common.h"
* */
public static final int SYMLINK_NO_PRIVILEGE = 2;
/**
* Buffer size for copy the content of compressed file to new file.
*/
private static final int BUFFER_SIZE = 8_192;
/**
* convert an array of FileStatus to an array of Path
*
* @param stats
* an array of FileStatus objects
* @return an array of paths corresponding to the input
*/
public static Path[] stat2Paths(FileStatus[] stats) {
if (stats == null)
return null;
Path[] ret = new Path[stats.length];
for (int i = 0; i < stats.length; ++i) {
ret[i] = stats[i].getPath();
}
return ret;
}
/**
* convert an array of FileStatus to an array of Path.
* If stats if null, return path
* @param stats
* an array of FileStatus objects
* @param path
* default path to return in stats is null
* @return an array of paths corresponding to the input
*/
public static Path[] stat2Paths(FileStatus[] stats, Path path) {
if (stats == null)
return new Path[]{path};
else
return stat2Paths(stats);
}
/**
* Register all files recursively to be deleted on exit.
* @param file File/directory to be deleted
*/
public static void fullyDeleteOnExit(final File file) {
file.deleteOnExit();
if (file.isDirectory()) {
File[] files = file.listFiles();
if (files != null) {
for (File child : files) {
fullyDeleteOnExit(child);
}
}
}
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
* (1) If dir is symlink to a file, the symlink is deleted. The file pointed
* to by the symlink is not deleted.
* (2) If dir is symlink to a directory, symlink is deleted. The directory
* pointed to by symlink is not deleted.
* (3) If dir is a normal file, it is deleted.
* (4) If dir is a normal directory, then dir and all its contents recursively
* are deleted.
*/
public static boolean fullyDelete(final File dir) {
return fullyDelete(dir, false);
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
* (1) If dir is symlink to a file, the symlink is deleted. The file pointed
* to by the symlink is not deleted.
* (2) If dir is symlink to a directory, symlink is deleted. The directory
* pointed to by symlink is not deleted.
* (3) If dir is a normal file, it is deleted.
* (4) If dir is a normal directory, then dir and all its contents recursively
* are deleted.
* @param dir the file or directory to be deleted
* @param tryGrantPermissions true if permissions should be modified to delete a file.
* @return true on success false on failure.
*/
public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// try to chmod +rwx the parent folder of the 'dir':
File parent = dir.getParentFile();
grantPermissions(parent);
}
if (deleteImpl(dir, false)) {
// dir is (a) normal file, (b) symlink to a file, (c) empty directory or
// (d) symlink to a directory
return true;
}
// handle nonempty directory deletion
if (!fullyDeleteContents(dir, tryGrantPermissions)) {
return false;
}
return deleteImpl(dir, true);
}
/**
* Returns the target of the given symlink. Returns the empty string if
* the given path does not refer to a symlink or there is an error
* accessing the symlink.
* @param f File representing the symbolic link.
* @return The target of the symbolic link, empty string on error or if not
* a symlink.
*/
public static String readLink(File f) {
/* NB: Use readSymbolicLink in java.nio.file.Path once available. Could
* use getCanonicalPath in File to get the target of the symlink but that
* does not indicate if the given path refers to a symlink.
*/
if (f == null) {
LOG.warn("Can not read a null symLink");
return "";
}
try {
return Shell.execCommand(
Shell.getReadlinkCommand(f.toString())).trim();
} catch (IOException x) {
return "";
}
}
/*
* Pure-Java implementation of "chmod +rwx f".
*/
private static void grantPermissions(final File f) {
FileUtil.setExecutable(f, true);
FileUtil.setReadable(f, true);
FileUtil.setWritable(f, true);
}
private static boolean deleteImpl(final File f, final boolean doLog) {
if (f == null) {
LOG.warn("null file argument.");
return false;
}
final boolean wasDeleted = f.delete();
if (wasDeleted) {
return true;
}
final boolean ex = f.exists();
if (doLog && ex) {
LOG.warn("Failed to delete file or dir ["
+ f.getAbsolutePath() + "]: it still exists.");
}
return !ex;
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
*/
public static boolean fullyDeleteContents(final File dir) {
return fullyDeleteContents(dir, false);
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
* @param tryGrantPermissions if 'true', try grant +rwx permissions to this
* and all the underlying directories before trying to delete their contents.
*/
public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// to be able to list the dir and delete files from it
// we must grant the dir rwx permissions:
grantPermissions(dir);
}
boolean deletionSucceeded = true;
final File[] contents = dir.listFiles();
if (contents != null) {
for (int i = 0; i < contents.length; i++) {
if (contents[i].isFile()) {
if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file
deletionSucceeded = false;
continue; // continue deletion of other files/dirs under dir
}
} else {
// Either directory or symlink to another directory.
// Try deleting the directory as this might be a symlink
boolean b = false;
b = deleteImpl(contents[i], false);
if (b){
//this was indeed a symlink or an empty directory
continue;
}
// if not an empty directory or symlink let
// fullydelete handle it.
if (!fullyDelete(contents[i], tryGrantPermissions)) {
deletionSucceeded = false;
// continue deletion of other files/dirs under dir
}
}
}
}
return deletionSucceeded;
}
/**
* Recursively delete a directory.
*
* @param fs {@link FileSystem} on which the path is present
* @param dir directory to recursively delete
* @throws IOException
* @deprecated Use {@link FileSystem#delete(Path, boolean)}
*/
@Deprecated
public static void fullyDelete(FileSystem fs, Path dir)
throws IOException {
fs.delete(dir, true);
}
//
// If the destination is a subdirectory of the source, then
// generate exception
//
private static void checkDependencies(FileSystem srcFS,
Path src,
FileSystem dstFS,
Path dst)
throws IOException {
if (srcFS == dstFS) {
String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR;
String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR;
if (dstq.startsWith(srcq)) {
if (srcq.length() == dstq.length()) {
throw new IOException("Cannot copy " + src + " to itself.");
} else {
throw new IOException("Cannot copy " + src + " to its subdirectory " +
dst);
}
}
}
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
Configuration conf) throws IOException {
return copy(srcFS, src, dstFS, dst, deleteSource, true, conf);
}
public static boolean copy(FileSystem srcFS, Path[] srcs,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite, Configuration conf)
throws IOException {
boolean gotException = false;
boolean returnVal = true;
StringBuilder exceptions = new StringBuilder();
if (srcs.length == 1)
return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf);
// Check if dest is directory
try {
FileStatus sdst = dstFS.getFileStatus(dst);
if (!sdst.isDirectory())
throw new IOException("copying multiple files, but last argument `" +
dst + "' is not a directory");
} catch (FileNotFoundException e) {
throw new IOException(
"`" + dst + "': specified destination directory " +
"does not exist", e);
}
for (Path src : srcs) {
try {
if (!copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf))
returnVal = false;
} catch (IOException e) {
gotException = true;
exceptions.append(e.getMessage());
exceptions.append("\n");
}
}
if (gotException) {
throw new IOException(exceptions.toString());
}
return returnVal;
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite,
Configuration conf) throws IOException {
FileStatus fileStatus = srcFS.getFileStatus(src);
return copy(srcFS, fileStatus, dstFS, dst, deleteSource, overwrite, conf);
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, FileStatus srcStatus,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite,
Configuration conf) throws IOException {
Path src = srcStatus.getPath();
dst = checkDest(src.getName(), dstFS, dst, overwrite);
if (srcStatus.isDirectory()) {
checkDependencies(srcFS, src, dstFS, dst);
if (!dstFS.mkdirs(dst)) {
return false;
}
FileStatus contents[] = srcFS.listStatus(src);
for (int i = 0; i < contents.length; i++) {
copy(srcFS, contents[i], dstFS,
new Path(dst, contents[i].getPath().getName()),
deleteSource, overwrite, conf);
}
} else {
InputStream in=null;
OutputStream out = null;
try {
in = srcFS.open(src);
out = dstFS.create(dst, overwrite);
IOUtils.copyBytes(in, out, conf, true);
} catch (IOException e) {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
throw e;
}
}
if (deleteSource) {
return srcFS.delete(src, true);
} else {
return true;
}
}
/** Copy local files to a FileSystem. */
public static boolean copy(File src,
FileSystem dstFS, Path dst,
boolean deleteSource,
Configuration conf) throws IOException {
dst = checkDest(src.getName(), dstFS, dst, false);
if (src.isDirectory()) {
if (!dstFS.mkdirs(dst)) {
return false;
}
File contents[] = listFiles(src);
for (int i = 0; i < contents.length; i++) {
copy(contents[i], dstFS, new Path(dst, contents[i].getName()),
deleteSource, conf);
}
} else if (src.isFile()) {
InputStream in = null;
OutputStream out =null;
try {
in = new FileInputStream(src);
out = dstFS.create(dst);
IOUtils.copyBytes(in, out, conf);
} catch (IOException e) {
IOUtils.closeStream( out );
IOUtils.closeStream( in );
throw e;
}
} else if (!src.canRead()) {
throw new IOException(src.toString() +
": Permission denied");
} else {
throw new IOException(src.toString() +
": No such file or directory");
}
if (deleteSource) {
return FileUtil.fullyDelete(src);
} else {
return true;
}
}
/** Copy FileSystem files to local files. */
public static boolean copy(FileSystem srcFS, Path src,
File dst, boolean deleteSource,
Configuration conf) throws IOException {
FileStatus filestatus = srcFS.getFileStatus(src);
return copy(srcFS, filestatus, dst, deleteSource, conf);
}
/** Copy FileSystem files to local files. */
private static boolean copy(FileSystem srcFS, FileStatus srcStatus,
File dst, boolean deleteSource,
Configuration conf) throws IOException {
Path src = srcStatus.getPath();
if (srcStatus.isDirectory()) {
if (!dst.mkdirs()) {
return false;
}
FileStatus contents[] = srcFS.listStatus(src);
for (int i = 0; i < contents.length; i++) {
copy(srcFS, contents[i],
new File(dst, contents[i].getPath().getName()),
deleteSource, conf);
}
} else {
InputStream in = srcFS.open(src);
IOUtils.copyBytes(in, new FileOutputStream(dst), conf);
}
if (deleteSource) {
return srcFS.delete(src, true);
} else {
return true;
}
}
private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
boolean overwrite) throws IOException {
FileStatus sdst;
try {
sdst = dstFS.getFileStatus(dst);
} catch (FileNotFoundException e) {
sdst = null;
}
if (null != sdst) {
if (sdst.isDirectory()) {
if (null == srcName) {
throw new PathIsDirectoryException(dst.toString());
}
return checkDest(null, dstFS, new Path(dst, srcName), overwrite);
} else if (!overwrite) {
throw new PathExistsException(dst.toString(),
"Target " + dst + " already exists");
}
}
return dst;
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param filename The filename to convert
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(String filename) throws IOException {
return filename;
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(File file) throws IOException {
return makeShellPath(file, false);
}
/**
* Convert a os-native filename to a path that works for the shell
* and avoids script injection attacks.
* @param file The filename to convert
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeSecureShellPath(File file) throws IOException {
if (Shell.WINDOWS) {
// Currently it is never called, but it might be helpful in the future.
throw new UnsupportedOperationException("Not implemented for Windows");
} else {
return makeShellPath(file, false).replace("'", "'\\''");
}
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
* @param makeCanonicalPath
* Whether to make canonical path for the file passed
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(File file, boolean makeCanonicalPath)
throws IOException {
if (makeCanonicalPath) {
return makeShellPath(file.getCanonicalPath());
} else {
return makeShellPath(file.toString());
}
}
/**
* Takes an input dir and returns the du on that local directory. Very basic
* implementation.
*
* @param dir
* The input dir to get the disk space of this local dir
* @return The total disk space of the input local directory
*/
public static long getDU(File dir) {
long size = 0;
if (!dir.exists())
return 0;
if (!dir.isDirectory()) {
return dir.length();
} else {
File[] allFiles = dir.listFiles();
if(allFiles != null) {
for (int i = 0; i < allFiles.length; i++) {
boolean isSymLink;
try {
isSymLink = org.apache.commons.io.FileUtils.isSymlink(allFiles[i]);
} catch(IOException ioe) {
isSymLink = true;
}
if(!isSymLink) {
size += getDU(allFiles[i]);
}
}
}
return size;
}
}
/**
* Given a stream input it will unzip the it in the unzip directory.
* passed as the second parameter
* @param inputStream The zip file as input
* @param toDir The unzip directory where to unzip the zip file.
* @throws IOException an exception occurred
*/
public static void unZip(InputStream inputStream, File toDir)
throws IOException {
try (ZipInputStream zip = new ZipInputStream(inputStream)) {
int numOfFailedLastModifiedSet = 0;
String targetDirPath = toDir.getCanonicalPath() + File.separator;
for(ZipEntry entry = zip.getNextEntry();
entry != null;
entry = zip.getNextEntry()) {
if (!entry.isDirectory()) {
File file = new File(toDir, entry.getName());
if (!file.getCanonicalPath().startsWith(targetDirPath)) {
throw new IOException("expanding " + entry.getName()
+ " would create file outside of " + toDir);
}
File parent = file.getParentFile();
if (!parent.mkdirs() &&
!parent.isDirectory()) {
throw new IOException("Mkdirs failed to create " +
parent.getAbsolutePath());
}
try (OutputStream out = new FileOutputStream(file)) {
IOUtils.copyBytes(zip, out, BUFFER_SIZE);
}
if (!file.setLastModified(entry.getTime())) {
numOfFailedLastModifiedSet++;
}
}
}
if (numOfFailedLastModifiedSet > 0) {
LOG.warn("Could not set last modfied time for {} file(s)",
numOfFailedLastModifiedSet);
}
}
}
/**
* Given a File input it will unzip it in the unzip directory.
* passed as the second parameter
* @param inFile The zip file as input
* @param unzipDir The unzip directory where to unzip the zip file.
* @throws IOException An I/O exception has occurred
*/
public static void unZip(File inFile, File unzipDir) throws IOException {
Enumeration<? extends ZipEntry> entries;
ZipFile zipFile = new ZipFile(inFile);
try {
entries = zipFile.entries();
String targetDirPath = unzipDir.getCanonicalPath() + File.separator;
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = zipFile.getInputStream(entry);
try {
File file = new File(unzipDir, entry.getName());
if (!file.getCanonicalPath().startsWith(targetDirPath)) {
throw new IOException("expanding " + entry.getName()
+ " would create file outside of " + unzipDir);
}
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " +
file.getParentFile().toString());
}
}
OutputStream out = new FileOutputStream(file);
try {
byte[] buffer = new byte[8192];
int i;
while ((i = in.read(buffer)) != -1) {
out.write(buffer, 0, i);
}
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
zipFile.close();
}
}
/**
* Run a command and send the contents of an input stream to it.
* @param inputStream Input stream to forward to the shell command
* @param command shell command to run
* @throws IOException read or write failed
* @throws InterruptedException command interrupted
* @throws ExecutionException task submit failed
*/
private static void runCommandOnStream(
InputStream inputStream, String command)
throws IOException, InterruptedException, ExecutionException {
ExecutorService executor = null;
ProcessBuilder builder = new ProcessBuilder();
builder.command(
Shell.WINDOWS ? "cmd" : "bash",
Shell.WINDOWS ? "/c" : "-c",
command);
Process process = builder.start();
int exitCode;
try {
// Consume stdout and stderr, to avoid blocking the command
executor = Executors.newFixedThreadPool(2);
Future output = executor.submit(() -> {
try {
// Read until the output stream receives an EOF and closed.
if (LOG.isDebugEnabled()) {
// Log directly to avoid out of memory errors
try (BufferedReader reader =
new BufferedReader(
new InputStreamReader(process.getInputStream(),
Charset.forName("UTF-8")))) {
String line;
while((line = reader.readLine()) != null) {
LOG.debug(line);
}
}
} else {
org.apache.commons.io.IOUtils.copy(
process.getInputStream(),
new IOUtils.NullOutputStream());
}
} catch (IOException e) {
LOG.debug(e.getMessage());
}
});
Future error = executor.submit(() -> {
try {
// Read until the error stream receives an EOF and closed.
if (LOG.isDebugEnabled()) {
// Log directly to avoid out of memory errors
try (BufferedReader reader =
new BufferedReader(
new InputStreamReader(process.getErrorStream(),
Charset.forName("UTF-8")))) {
String line;
while((line = reader.readLine()) != null) {
LOG.debug(line);
}
}
} else {
org.apache.commons.io.IOUtils.copy(
process.getErrorStream(),
new IOUtils.NullOutputStream());
}
} catch (IOException e) {
LOG.debug(e.getMessage());
}
});
// Pass the input stream to the command to process
try {
org.apache.commons.io.IOUtils.copy(
inputStream, process.getOutputStream());
} finally {
process.getOutputStream().close();
}
// Wait for both stdout and stderr futures to finish
error.get();
output.get();
} finally {
// Clean up the threads
if (executor != null) {
executor.shutdown();
}
// Wait to avoid leaking the child process
exitCode = process.waitFor();
}
if (exitCode != 0) {
throw new IOException(
String.format(
"Error executing command. %s " +
"Process exited with exit code %d.",
command, exitCode));
}
}
/**
* Given a Tar File as input it will untar the file in a the untar directory
* passed as the second parameter
*
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
* @param inputStream The tar file as input.
* @param untarDir The untar directory where to untar the tar file.
* @param gzipped The input stream is gzipped
* TODO Use magic number and PusbackInputStream to identify
* @throws IOException an exception occurred
* @throws InterruptedException command interrupted
* @throws ExecutionException task submit failed
*/
public static void unTar(InputStream inputStream, File untarDir,
boolean gzipped)
throws IOException, InterruptedException, ExecutionException {
if (!untarDir.mkdirs()) {
if (!untarDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + untarDir);
}
}
if(Shell.WINDOWS) {
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inputStream, untarDir, gzipped);
} else {
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inputStream, untarDir, gzipped);
}
}
/**
* Given a Tar File as input it will untar the file in a the untar directory
* passed as the second parameter
*
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
* @param inFile The tar file as input.
* @param untarDir The untar directory where to untar the tar file.
* @throws IOException
*/
public static void unTar(File inFile, File untarDir) throws IOException {
if (!untarDir.mkdirs()) {
if (!untarDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + untarDir);
}
}
boolean gzipped = inFile.toString().endsWith("gz");
if(Shell.WINDOWS) {
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inFile, untarDir, gzipped);
}
else {
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inFile, untarDir, gzipped);
}
}
private static void unTarUsingTar(InputStream inputStream, File untarDir,
boolean gzipped)
throws IOException, InterruptedException, ExecutionException {
StringBuilder untarCommand = new StringBuilder();
if (gzipped) {
untarCommand.append("gzip -dc | (");
}
untarCommand.append("cd '");
untarCommand.append(FileUtil.makeSecureShellPath(untarDir));
untarCommand.append("' && ");
untarCommand.append("tar -x ");
if (gzipped) {
untarCommand.append(")");
}
runCommandOnStream(inputStream, untarCommand.toString());
}
private static void unTarUsingTar(File inFile, File untarDir,
boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
if (gzipped) {
untarCommand.append(" gzip -dc '");
untarCommand.append(FileUtil.makeSecureShellPath(inFile));
untarCommand.append("' | (");
}
untarCommand.append("cd '");
untarCommand.append(FileUtil.makeSecureShellPath(untarDir));
untarCommand.append("' && ");
untarCommand.append("tar -xf ");
if (gzipped) {
untarCommand.append(" -)");
} else {
untarCommand.append(FileUtil.makeSecureShellPath(inFile));
}
String[] shellCmd = { "bash", "-c", untarCommand.toString() };
ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd);
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
throw new IOException("Error untarring file " + inFile +
". Tar process exited with exit code " + exitcode);
}
}
static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
TarArchiveInputStream tis = null;
try {
if (gzipped) {
inputStream = new BufferedInputStream(new GZIPInputStream(
new FileInputStream(inFile)));
} else {
inputStream = new BufferedInputStream(new FileInputStream(inFile));
}
tis = new TarArchiveInputStream(inputStream);
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
} finally {
IOUtils.cleanupWithLogger(LOG, tis, inputStream);
}
}
private static void unTarUsingJava(InputStream inputStream, File untarDir,
boolean gzipped) throws IOException {
TarArchiveInputStream tis = null;
try {
if (gzipped) {
inputStream = new BufferedInputStream(new GZIPInputStream(
inputStream));
} else {
inputStream =
new BufferedInputStream(inputStream);
}
tis = new TarArchiveInputStream(inputStream);
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
} finally {
IOUtils.cleanupWithLogger(LOG, tis, inputStream);
}
}
private static void unpackEntries(TarArchiveInputStream tis,
TarArchiveEntry entry, File outputDir) throws IOException {
String targetDirPath = outputDir.getCanonicalPath() + File.separator;
File outputFile = new File(outputDir, entry.getName());
if (!outputFile.getCanonicalPath().startsWith(targetDirPath)) {
throw new IOException("expanding " + entry.getName()
+ " would create entry outside of " + outputDir);
}
if (entry.isDirectory()) {
File subDir = new File(outputDir, entry.getName());
if (!subDir.mkdirs() && !subDir.isDirectory()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
for (TarArchiveEntry e : entry.getDirectoryEntries()) {
unpackEntries(tis, e, subDir);
}
return;
}
if (entry.isSymbolicLink()) {
// Create symbolic link relative to tar parent dir
Files.createSymbolicLink(FileSystems.getDefault()
.getPath(outputDir.getPath(), entry.getName()),
FileSystems.getDefault().getPath(entry.getLinkName()));
return;
}
if (!outputFile.getParentFile().exists()) {
if (!outputFile.getParentFile().mkdirs()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
}
if (entry.isLink()) {
File src = new File(outputDir, entry.getLinkName());
HardLink.createHardLink(src, outputFile);
return;
}
int count;
byte data[] = new byte[2048];
try (BufferedOutputStream outputStream = new BufferedOutputStream(
new FileOutputStream(outputFile));) {
while ((count = tis.read(data)) != -1) {
outputStream.write(data, 0, count);
}
outputStream.flush();
}
}
/**
* Class for creating hardlinks.
* Supports Unix, WindXP.
* @deprecated Use {@link org.apache.hadoop.fs.HardLink}
*/
@Deprecated
public static class HardLink extends org.apache.hadoop.fs.HardLink {
// This is a stub to assist with coordinated change between
// COMMON and HDFS projects. It will be removed after the
// corresponding change is committed to HDFS.
}
/**
* Create a soft link between a src and destination
* only on a local disk. HDFS does not support this.
* On Windows, when symlink creation fails due to security
* setting, we will log a warning. The return code in this
* case is 2.
*
* @param target the target for symlink
* @param linkname the symlink
* @return 0 on success
*/
public static int symLink(String target, String linkname) throws IOException{
if (target == null || linkname == null) {
LOG.warn("Can not create a symLink with a target = " + target
+ " and link =" + linkname);
return 1;
}
// Run the input paths through Java's File so that they are converted to the
// native OS form
File targetFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(target)).toString());
File linkFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString());
String[] cmd = Shell.getSymlinkCommand(
targetFile.toString(),
linkFile.toString());
ShellCommandExecutor shExec;
try {
if (Shell.WINDOWS &&
linkFile.getParentFile() != null &&
!new Path(target).isAbsolute()) {
// Relative links on Windows must be resolvable at the time of
// creation. To ensure this we run the shell command in the directory
// of the link.
//
shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile());
} else {
shExec = new ShellCommandExecutor(cmd);
}
shExec.execute();
} catch (Shell.ExitCodeException ec) {
int returnVal = ec.getExitCode();
if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
LOG.warn("Fail to create symbolic links on Windows. "
+ "The default security settings in Windows disallow non-elevated "
+ "administrators and all non-administrators from creating symbolic links. "
+ "This behavior can be changed in the Local Security Policy management console");
} else if (returnVal != 0) {
LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
+ returnVal + " with: " + ec.getMessage());
}
return returnVal;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error while create symlink " + linkname + " to " + target
+ "." + " Exception: " + StringUtils.stringifyException(e));
}
throw e;
}
return shExec.getExitCode();
}
/**
* Change the permissions on a filename.
* @param filename the name of the file to change
* @param perm the permission string
* @return the exit code from the command
* @throws IOException
* @throws InterruptedException
*/
public static int chmod(String filename, String perm
) throws IOException, InterruptedException {
return chmod(filename, perm, false);
}
/**
* Change the permissions on a file / directory, recursively, if
* needed.
* @param filename name of the file whose permissions are to change
* @param perm permission string
* @param recursive true, if permissions should be changed recursively
* @return the exit code from the command.
* @throws IOException
*/
public static int chmod(String filename, String perm, boolean recursive)
throws IOException {
String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = new File(filename).getPath();
ShellCommandExecutor shExec = new ShellCommandExecutor(args);
try {
shExec.execute();
}catch(IOException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Error while changing permission : " + filename
+" Exception: " + StringUtils.stringifyException(e));
}
}
return shExec.getExitCode();
}
/**
* Set the ownership on a file / directory. User name and group name
* cannot both be null.
* @param file the file to change
* @param username the new user owner name
* @param groupname the new group owner name
* @throws IOException
*/
public static void setOwner(File file, String username,
String groupname) throws IOException {
if (username == null && groupname == null) {
throw new IOException("username == null && groupname == null");
}
String arg = (username == null ? "" : username)
+ (groupname == null ? "" : ":" + groupname);
String [] cmd = Shell.getSetOwnerCommand(arg);
execCommand(file, cmd);
}
/**
* Platform independent implementation for {@link File#setReadable(boolean)}
* File#setReadable does not work as expected on Windows.
* @param f input file
* @param readable
* @return true on success, false otherwise
*/
public static boolean setReadable(File f, boolean readable) {
if (Shell.WINDOWS) {
try {
String permission = readable ? "u+r" : "u-r";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setReadable(readable);
}
}
/**
* Platform independent implementation for {@link File#setWritable(boolean)}
* File#setWritable does not work as expected on Windows.
* @param f input file
* @param writable
* @return true on success, false otherwise
*/
public static boolean setWritable(File f, boolean writable) {
if (Shell.WINDOWS) {
try {
String permission = writable ? "u+w" : "u-w";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setWritable(writable);
}
}
/**
* Platform independent implementation for {@link File#setExecutable(boolean)}
* File#setExecutable does not work as expected on Windows.
* Note: revoking execute permission on folders does not have the same
* behavior on Windows as on Unix platforms. Creating, deleting or renaming
* a file within that folder will still succeed on Windows.
* @param f input file
* @param executable
* @return true on success, false otherwise
*/
public static boolean setExecutable(File f, boolean executable) {
if (Shell.WINDOWS) {
try {
String permission = executable ? "u+x" : "u-x";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setExecutable(executable);
}
}
/**
* Platform independent implementation for {@link File#canRead()}
* @param f input file
* @return On Unix, same as {@link File#canRead()}
* On Windows, true if process has read access on the path
*/
public static boolean canRead(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_READ);
} catch (IOException e) {
return false;
}
} else {
return f.canRead();
}
}
/**
* Platform independent implementation for {@link File#canWrite()}
* @param f input file
* @return On Unix, same as {@link File#canWrite()}
* On Windows, true if process has write access on the path
*/
public static boolean canWrite(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE);
} catch (IOException e) {
return false;
}
} else {
return f.canWrite();
}
}
/**
* Platform independent implementation for {@link File#canExecute()}
* @param f input file
* @return On Unix, same as {@link File#canExecute()}
* On Windows, true if process has execute access on the path
*/
public static boolean canExecute(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE);
} catch (IOException e) {
return false;
}
} else {
return f.canExecute();
}
}
/**
* Set permissions to the required value. Uses the java primitives instead
* of forking if group == other.
* @param f the file to change
* @param permission the new permissions
* @throws IOException
*/
public static void setPermission(File f, FsPermission permission
) throws IOException {
FsAction user = permission.getUserAction();
FsAction group = permission.getGroupAction();
FsAction other = permission.getOtherAction();
// use the native/fork if the group/other permissions are different
// or if the native is available or on Windows
if (group != other || NativeIO.isAvailable() || Shell.WINDOWS) {
execSetPermission(f, permission);
return;
}
boolean rv = true;
// read perms
rv = f.setReadable(group.implies(FsAction.READ), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.READ) != user.implies(FsAction.READ)) {
rv = f.setReadable(user.implies(FsAction.READ), true);
checkReturnValue(rv, f, permission);
}
// write perms
rv = f.setWritable(group.implies(FsAction.WRITE), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.WRITE) != user.implies(FsAction.WRITE)) {
rv = f.setWritable(user.implies(FsAction.WRITE), true);
checkReturnValue(rv, f, permission);
}
// exec perms
rv = f.setExecutable(group.implies(FsAction.EXECUTE), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.EXECUTE) != user.implies(FsAction.EXECUTE)) {
rv = f.setExecutable(user.implies(FsAction.EXECUTE), true);
checkReturnValue(rv, f, permission);
}
}
private static void checkReturnValue(boolean rv, File p,
FsPermission permission
) throws IOException {
if (!rv) {
throw new IOException("Failed to set permissions of path: " + p +
" to " +
String.format("%04o", permission.toShort()));
}
}
private static void execSetPermission(File f,
FsPermission permission
) throws IOException {
if (NativeIO.isAvailable()) {
NativeIO.POSIX.chmod(f.getCanonicalPath(), permission.toShort());
} else {
execCommand(f, Shell.getSetPermissionCommand(
String.format("%04o", permission.toShort()), false));
}
}
static String execCommand(File f, String... cmd) throws IOException {
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = f.getCanonicalPath();
String output = Shell.execCommand(args);
return output;
}
/**
* Create a tmp file for a base file.
* @param basefile the base file of the tmp
* @param prefix file name prefix of tmp
* @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
* @return a newly created tmp file
* @exception IOException If a tmp file cannot created
* @see java.io.File#createTempFile(String, String, File)
* @see java.io.File#deleteOnExit()
*/
public static final File createLocalTempFile(final File basefile,
final String prefix,
final boolean isDeleteOnExit)
throws IOException {
File tmp = File.createTempFile(prefix + basefile.getName(),
"", basefile.getParentFile());
if (isDeleteOnExit) {
tmp.deleteOnExit();
}
return tmp;
}
/**
* Move the src file to the name specified by target.
* @param src the source file
* @param target the target file
* @exception IOException If this operation fails
*/
public static void replaceFile(File src, File target) throws IOException {
/* renameTo() has two limitations on Windows platform.
* src.renameTo(target) fails if
* 1) If target already exists OR
* 2) If target is already open for reading/writing.
*/
if (!src.renameTo(target)) {
int retries = 5;
while (target.exists() && !target.delete() && retries-- >= 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new IOException("replaceFile interrupted.");
}
}
if (!src.renameTo(target)) {
throw new IOException("Unable to rename " + src +
" to " + target);
}
}
}
/**
* A wrapper for {@link File#listFiles()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#listFiles() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of files or empty list
* @exception IOException for invalid directory or for a bad disk.
*/
public static File[] listFiles(File dir) throws IOException {
File[] files = dir.listFiles();
if(files == null) {
throw new IOException("Invalid directory or I/O error occurred for dir: "
+ dir.toString());
}
return files;
}
/**
* A wrapper for {@link File#list()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#list() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of file names or empty string list
* @exception AccessDeniedException for unreadable directory
* @exception IOException for invalid directory or for bad disk
*/
public static String[] list(File dir) throws IOException {
if (!canRead(dir)) {
throw new AccessDeniedException(dir.toString(), null,
FSExceptionMessages.PERMISSION_DENIED);
}
String[] fileNames = dir.list();
if(fileNames == null) {
throw new IOException("Invalid directory or I/O error occurred for dir: "
+ dir.toString());
}
return fileNames;
}
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Map<String, String> callerEnv) throws IOException {
return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv);
}
/**
* Create a jar file at the given path, containing a manifest with a classpath
* that references all specified entries.
*
* Some platforms may have an upper limit on command line length. For example,
* the maximum command line length on Windows is 8191 characters, but the
* length of the classpath may exceed this. To work around this limitation,
* use this method to create a small intermediate jar with a manifest that
* contains the full classpath. It returns the absolute path to the new jar,
* which the caller may set as the classpath for a new process.
*
* Environment variable evaluation is not supported within a jar manifest, so
* this method expands environment variables before inserting classpath entries
* to the manifest. The method parses environment variables according to
* platform-specific syntax (%VAR% on Windows, or $VAR otherwise). On Windows,
* environment variables are case-insensitive. For example, %VAR% and %var%
* evaluate to the same value.
*
* Specifying the classpath in a jar manifest does not support wildcards, so
* this method expands wildcards internally. Any classpath entry that ends
* with * is translated to all files at that path with extension .jar or .JAR.
*
* @param inputClassPath String input classpath to bundle into the jar manifest
* @param pwd Path to working directory to save jar
* @param targetDir path to where the jar execution will have its working dir
* @param callerEnv Map<String, String> caller's environment variables to use
* for expansion
* @return String[] with absolute path to new jar in position 0 and
* unexpanded wild card entry path in position 1
* @throws IOException if there is an I/O error while writing the jar file
*/
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Path targetDir,
Map<String, String> callerEnv) throws IOException {
// Replace environment variables, case-insensitive on Windows
@SuppressWarnings("unchecked")
Map<String, String> env = Shell.WINDOWS ? new CaseInsensitiveMap(callerEnv) :
callerEnv;
String[] classPathEntries = inputClassPath.split(File.pathSeparator);
for (int i = 0; i < classPathEntries.length; ++i) {
classPathEntries[i] = StringUtils.replaceTokens(classPathEntries[i],
StringUtils.ENV_VAR_PATTERN, env);
}
File workingDir = new File(pwd.toString());
if (!workingDir.mkdirs()) {
// If mkdirs returns false because the working directory already exists,
// then this is acceptable. If it returns false due to some other I/O
// error, then this method will fail later with an IOException while saving
// the jar.
LOG.debug("mkdirs false for " + workingDir + ", execution will continue");
}
StringBuilder unexpandedWildcardClasspath = new StringBuilder();
// Append all entries
List<String> classPathEntryList = new ArrayList<String>(
classPathEntries.length);
for (String classPathEntry: classPathEntries) {
if (classPathEntry.length() == 0) {
continue;
}
if (classPathEntry.endsWith("*")) {
// Append all jars that match the wildcard
List<Path> jars = getJarsInDirectory(classPathEntry);
if (!jars.isEmpty()) {
for (Path jar: jars) {
classPathEntryList.add(jar.toUri().toURL().toExternalForm());
}
} else {
unexpandedWildcardClasspath.append(File.pathSeparator);
unexpandedWildcardClasspath.append(classPathEntry);
}
} else {
// Append just this entry
File fileCpEntry = null;
if(!new Path(classPathEntry).isAbsolute()) {
fileCpEntry = new File(targetDir.toString(), classPathEntry);
}
else {
fileCpEntry = new File(classPathEntry);
}
String classPathEntryUrl = fileCpEntry.toURI().toURL()
.toExternalForm();
// File.toURI only appends trailing '/' if it can determine that it is a
// directory that already exists. (See JavaDocs.) If this entry had a
// trailing '/' specified by the caller, then guarantee that the
// classpath entry in the manifest has a trailing '/', and thus refers to
// a directory instead of a file. This can happen if the caller is
// creating a classpath jar referencing a directory that hasn't been
// created yet, but will definitely be created before running.
if (classPathEntry.endsWith(Path.SEPARATOR) &&
!classPathEntryUrl.endsWith(Path.SEPARATOR)) {
classPathEntryUrl = classPathEntryUrl + Path.SEPARATOR;
}
classPathEntryList.add(classPathEntryUrl);
}
}
String jarClassPath = StringUtils.join(" ", classPathEntryList);
// Create the manifest
Manifest jarManifest = new Manifest();
jarManifest.getMainAttributes().putValue(
Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
jarManifest.getMainAttributes().putValue(
Attributes.Name.CLASS_PATH.toString(), jarClassPath);
// Write the manifest to output JAR file
File classPathJar = File.createTempFile("classpath-", ".jar", workingDir);
try (FileOutputStream fos = new FileOutputStream(classPathJar);
BufferedOutputStream bos = new BufferedOutputStream(fos)) {
JarOutputStream jos = new JarOutputStream(bos, jarManifest);
jos.close();
}
String[] jarCp = {classPathJar.getCanonicalPath(),
unexpandedWildcardClasspath.toString()};
return jarCp;
}
/**
* Returns all jars that are in the directory. It is useful in expanding a
* wildcard path to return all jars from the directory to use in a classpath.
* It operates only on local paths.
*
* @param path the path to the directory. The path may include the wildcard.
* @return the list of jars as URLs, or an empty list if there are no jars, or
* the directory does not exist locally
*/
public static List<Path> getJarsInDirectory(String path) {
return getJarsInDirectory(path, true);
}
/**
* Returns all jars that are in the directory. It is useful in expanding a
* wildcard path to return all jars from the directory to use in a classpath.
*
* @param path the path to the directory. The path may include the wildcard.
* @return the list of jars as URLs, or an empty list if there are no jars, or
* the directory does not exist
*/
public static List<Path> getJarsInDirectory(String path, boolean useLocal) {
List<Path> paths = new ArrayList<>();
try {
// add the wildcard if it is not provided
if (!path.endsWith("*")) {
path += File.separator + "*";
}
Path globPath = new Path(path).suffix("{.jar,.JAR}");
FileContext context = useLocal ?
FileContext.getLocalFSFileContext() :
FileContext.getFileContext(globPath.toUri());
FileStatus[] files = context.util().globStatus(globPath);
if (files != null) {
for (FileStatus file: files) {
paths.add(file.getPath());
}
}
} catch (IOException ignore) {} // return the empty list
return paths;
}
public static boolean compareFs(FileSystem srcFs, FileSystem destFs) {
if (srcFs==null || destFs==null) {
return false;
}
URI srcUri = srcFs.getUri();
URI dstUri = destFs.getUri();
if (srcUri.getScheme()==null) {
return false;
}
if (!srcUri.getScheme().equals(dstUri.getScheme())) {
return false;
}
String srcHost = srcUri.getHost();
String dstHost = dstUri.getHost();
if ((srcHost!=null) && (dstHost!=null)) {
if (srcHost.equals(dstHost)) {
return srcUri.getPort()==dstUri.getPort();
}
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName();
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName();
} catch (UnknownHostException ue) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not compare file-systems. Unknown host: ", ue);
}
return false;
}
if (!srcHost.equals(dstHost)) {
return false;
}
} else if (srcHost==null && dstHost!=null) {
return false;
} else if (srcHost!=null) {
return false;
}
// check for ports
return srcUri.getPort()==dstUri.getPort();
}
}
| GeLiXin/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java | Java | apache-2.0 | 58,538 |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
TABLES_012 = ['resource', 'sourceassoc', 'user',
'project', 'meter', 'source', 'alarm']
TABLES_027 = ['user', 'project', 'alarm']
def upgrade(migrate_engine):
meta = sa.MetaData(bind=migrate_engine)
for table_name in TABLES_027:
try:
sa.Table('dump027_' + table_name, meta, autoload=True)\
.drop(checkfirst=True)
except sa.exc.NoSuchTableError:
pass
for table_name in TABLES_012:
try:
sa.Table('dump_' + table_name, meta, autoload=True)\
.drop(checkfirst=True)
except sa.exc.NoSuchTableError:
pass
def downgrade(migrate_engine):
pass
| NeCTAR-RC/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py | Python | apache-2.0 | 1,278 |
package com.mossle.api.process;
import java.util.Map;
import com.mossle.api.form.FormDTO;
import com.mossle.core.page.Page;
public class MockProcessConnector implements ProcessConnector {
/**
* 获得启动表单.
*/
public FormDTO findStartForm(String processDefinitionId) {
return null;
}
/**
* 获得流程配置.
*/
public ProcessDTO findProcess(String processId) {
return null;
}
public ProcessDTO findProcessByProcessDefinitionId(
String processDefinitionId) {
return null;
}
/**
* 发起流程.
*/
public String startProcess(String userId, String businessKey,
String processDefinitionId, Map<String, Object> processParemeters) {
return null;
}
/**
* 未结流程.
*/
public Page findRunningProcessInstances(String userId, String tenantId,
Page page) {
return null;
}
/**
* 已结流程.
*/
public Page findCompletedProcessInstances(String userId, String tenantId,
Page page) {
return null;
}
/**
* 参与流程.
*/
public Page findInvolvedProcessInstances(String userId, String tenantId,
Page page) {
return null;
}
/**
* 待办任务(个人任务).
*/
public Page findPersonalTasks(String userId, String tenantId, Page page) {
return null;
}
/**
* 代领任务(组任务).
*/
public Page findGroupTasks(String userId, String tenantId, Page page) {
return null;
}
/**
* 已办任务(历史任务).
*/
public Page findHistoryTasks(String userId, String tenantId, Page page) {
return null;
}
/**
* 代理中的任务(代理人还未完成该任务).
*/
public Page findDelegatedTasks(String userId, String tenantId, Page page) {
return null;
}
/**
* 同时返回已领取和未领取的任务.
*/
public Page findCandidateOrAssignedTasks(String userId, String tenantId,
Page page) {
return null;
}
/**
* 流程定义.
*/
public Page findProcessDefinitions(String tenantId, Page page) {
return null;
}
/**
* 流程实例.
*/
public Page findProcessInstances(String tenantId, Page page) {
return null;
}
/**
* 任务.
*/
public Page findTasks(String tenantId, Page page) {
return null;
}
/**
* 部署.
*/
public Page findDeployments(String tenantId, Page page) {
return null;
}
/**
* 历史流程实例.
*/
public Page findHistoricProcessInstances(String tenantId, Page page) {
return null;
}
/**
* 历史节点.
*/
public Page findHistoricActivityInstances(String tenantId, Page page) {
return null;
}
/**
* 历史任务.
*/
public Page findHistoricTaskInstances(String tenantId, Page page) {
return null;
}
/**
* 作业.
*/
public Page findJobs(String tenantId, Page page) {
return null;
}
/**
* 根据processInstanceId获取businessKey.
*/
public String findBusinessKeyByProcessInstanceId(String processInstanceId) {
return null;
}
}
| vigosser/lemon | src/main/java/com/mossle/api/process/MockProcessConnector.java | Java | apache-2.0 | 3,368 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.wicket.markup.html.border;
import org.apache.wicket.Component;
import org.apache.wicket.DequeueContext;
import org.apache.wicket.DequeueTagAction;
import org.apache.wicket.IQueueRegion;
import org.apache.wicket.MarkupContainer;
import org.apache.wicket.markup.ComponentTag;
import org.apache.wicket.markup.IMarkupFragment;
import org.apache.wicket.markup.MarkupElement;
import org.apache.wicket.markup.MarkupException;
import org.apache.wicket.markup.MarkupFragment;
import org.apache.wicket.markup.MarkupStream;
import org.apache.wicket.markup.TagUtils;
import org.apache.wicket.markup.WicketTag;
import org.apache.wicket.markup.html.MarkupUtil;
import org.apache.wicket.markup.html.WebMarkupContainer;
import org.apache.wicket.markup.html.panel.BorderMarkupSourcingStrategy;
import org.apache.wicket.markup.html.panel.IMarkupSourcingStrategy;
import org.apache.wicket.markup.parser.XmlTag.TagType;
import org.apache.wicket.markup.resolver.IComponentResolver;
import org.apache.wicket.model.IModel;
import org.apache.wicket.util.lang.Args;
/**
* A border component has associated markup which is drawn and determines placement of markup and/or
* components nested within the border component.
* <p>
* The portion of the border's associated markup file which is to be used in rendering the border is
* denoted by a <wicket:border> tag. The children of the border component instance are then
* inserted into this markup, replacing the first <wicket:body> tag in the border's associated
* markup.
* <p>
* For example, if a border's associated markup looked like this:
*
* <pre>
* <html>
* <body>
* <wicket:border>
* First <wicket:body/> Last
* </wicket:border>
* </body>
* </html>
* </pre>
*
* And the border was used on a page like this:
*
* <pre>
* <html>
* <body>
* <span wicket:id = "myBorder">
* Middle
* </span>
* </body>
* </html>
* </pre>
*
* Then the resulting HTML would look like this:
*
* <pre>
* <html>
* <body>
* First Middle Last
* </body>
* </html>
* </pre>
*
* In other words, the body of the myBorder component is substituted into the border's associated
* markup at the position indicated by the <wicket:body> tag.
* <p>
* Regarding <wicket:body/> you have two options. Either use <wicket:body/> (open-close
* tag) which will automatically be expanded to <wicket:body>body content</wicket:body>
* or use <wicket:body>preview region</wicket:body> in your border's markup. The preview
* region (everything in between the open and close tag) will automatically be removed.
* <p>
* The border body container will automatically be created for you and added to the border
* container. It is accessible via {@link #getBodyContainer()}. In case the body markup is not an
* immediate child of border (see the example below), then you must use code such as
* <code>someContainer.add(getBodyContainer())</code> to add the body component to the correct
* container.
*
* <pre>
* <html>
* <body>
* <wicket:border>
* <span wicket:id="someContainer">
* <wicket:body/>
* </span>
* </wicket:border>
* </body>
* </html>
* </pre>
*
* The component "someContainer" in the previous example must be added to the border, and not the
* body, which is achieved via {@link #addToBorder(Component...)}.
* <p/>
* {@link #add(Component...)} is an alias to {@code getBodyContainer().add(Component...)} and will
* add a child component to the border body as shown in the example below.
*
* <pre>
* <html>
* <body>
* <span wicket:id = "myBorder">
* <input wicket:id="name"/>
* </span>
* </body>
* </html>
* </pre>
*
* This implementation does not apply any magic with respect to component handling. In doubt think
* simple. But everything you can do with a MarkupContainer or Component, you can do with a Border
* or its Body as well.
* <p/>
*
* Other methods like {@link #remove()}, {@link #get(String)}, {@link #iterator()}, etc. are not
* aliased to work on the border's body and attention must be paid when they need to be used.
*
* @see BorderPanel An alternative implementation based on Panel
* @see BorderBehavior A behavior which adds (raw) markup before and after the component
*
* @author Jonathan Locke
* @author Juergen Donnerstag
*/
public abstract class Border extends WebMarkupContainer implements IComponentResolver, IQueueRegion
{
private static final long serialVersionUID = 1L;
/** */
public static final String BODY = "body";
/** */
public static final String BORDER = "border";
/** The body component associated with <wicket:body> */
private final BorderBodyContainer body;
/**
* @see org.apache.wicket.Component#Component(String)
*/
public Border(final String id)
{
this(id, null);
}
/**
* @see org.apache.wicket.Component#Component(String, IModel)
*/
public Border(final String id, final IModel<?> model)
{
super(id, model);
body = new BorderBodyContainer(id + "_" + BODY);
queueToBorder(body);
}
/**
* Returns the border body container.
*
* NOTE: this component is NOT meant to be directly handled by users, meaning that you
* can not explicitly add it to an arbitrary container or remove it from its original parent container.
*
* @return The border body container
*/
public final BorderBodyContainer getBodyContainer()
{
return body;
}
/**
* This is for all components which have been added to the markup like this:
*
* <pre>
* <span wicket:id="myBorder">
* <input wicket:id="text1" .. />
* ...
* </span>
*
* </pre>
*
* Whereas {@link #addToBorder(Component...)} will add a component associated with the following
* markup:
*
* <pre>
* <wicket:border>
* <form wicket:id="myForm" .. >
* <wicket:body/>
* </form>
* </wicket:border>
*
* </pre>
*
* @see org.apache.wicket.MarkupContainer#add(org.apache.wicket.Component[])
*/
@Override
public Border add(final Component... children)
{
for (Component component : children)
{
if (component == body || component.isAuto())
{
addToBorder(component);
}
else
{
getBodyContainer().add(component);
}
}
return this;
}
@Override
public Border addOrReplace(final Component... children)
{
for (Component component : children)
{
if (component == body)
{
// in this case we do not want to redirect to body
// container but to border's old remove.
super.addOrReplace(component);
}
else
{
getBodyContainer().addOrReplace(component);
}
}
return this;
}
@Override
public Border remove(final Component component)
{
if (component == body)
{
// in this case we do not want to redirect to body
// container but to border's old remove.
removeFromBorder(component);
}
else
{
getBodyContainer().remove(component);
}
return this;
}
@Override
public Border remove(final String id)
{
if (body.getId().equals(id))
{
// in this case we do not want to redirect to body
// container but to border's old remove.
super.remove(id);
}
else
{
getBodyContainer().remove(id);
}
return this;
}
@Override
public Border removeAll()
{
getBodyContainer().removeAll();
return this;
}
@Override
public Border replace(final Component replacement)
{
if (body.getId().equals(replacement.getId()))
{
// in this case we do not want to redirect to body
// container but to border's old remove.
replaceInBorder(replacement);
}
else
{
getBodyContainer().replace(replacement);
}
return this;
}
/**
* Adds children components to the Border itself
*
* @param children
* the children components to add
* @return this
*/
public Border addToBorder(final Component... children)
{
super.add(children);
return this;
}
@Override
public Border queue(Component... components)
{
getBodyContainer().queue(components);
return this;
}
@Override
protected void onConfigure()
{
super.onConfigure();
dequeue();
}
/**
* Queues children components to the Border itself
*
* @param children
* the children components to queue
* @return this
*/
public Border queueToBorder(final Component... children)
{
super.queue(children);
return this;
}
/**
* Removes child from the Border itself
*
* @param child
* @return {@code this}
*/
public Border removeFromBorder(final Component child)
{
super.remove(child);
return this;
}
/**
* Replaces component in the Border itself
*
* @param component
* @return {@code this}
*/
public Border replaceInBorder(final Component component)
{
super.replace(component);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public Component resolve(final MarkupContainer container, final MarkupStream markupStream,
final ComponentTag tag)
{
// make sure nested borders are resolved properly
if (body.rendering == false)
{
// We are only interested in border body tags. The tag ID actually is irrelevant since
// always preset with the same default
if (TagUtils.isWicketBodyTag(tag))
{
return body;
}
}
return null;
}
/**
* {@inheritDoc}
*/
@Override
protected IMarkupSourcingStrategy newMarkupSourcingStrategy()
{
return new BorderMarkupSourcingStrategy();
}
/**
* Search for the child markup in the file associated with the Border. The child markup must in
* between the <wicket:border> tags.
*/
@Override
public IMarkupFragment getMarkup(final Component child)
{
// Border require an associated markup resource file
IMarkupFragment markup = getAssociatedMarkup();
if (markup == null)
{
throw new MarkupException("Unable to find associated markup file for Border: " +
this.toString());
}
// Find <wicket:border>
IMarkupFragment borderMarkup = null;
for (int i = 0; i < markup.size(); i++)
{
MarkupElement elem = markup.get(i);
if (TagUtils.isWicketBorderTag(elem))
{
borderMarkup = new MarkupFragment(markup, i);
break;
}
}
if (borderMarkup == null)
{
throw new MarkupException(markup.getMarkupResourceStream(),
"Unable to find <wicket:border> tag in associated markup file for Border: " +
this.toString());
}
// If child == null, return the markup fragment starting with the <wicket:border> tag
if (child == null)
{
return borderMarkup;
}
// Is child == BorderBody?
if (child == body)
{
// Get the <wicket:body> markup
return body.getMarkup();
}
// Find the markup for the child component
IMarkupFragment childMarkup = borderMarkup.find(child.getId());
if (childMarkup != null)
{
return childMarkup;
}
return ((BorderMarkupSourcingStrategy)getMarkupSourcingStrategy()).findMarkupInAssociatedFileHeader(
this, child);
}
/**
* The container to be associated with the <wicket:body> tag
*/
public class BorderBodyContainer extends WebMarkupContainer implements IQueueRegion
{
private static final long serialVersionUID = 1L;
/** The markup */
private transient IMarkupFragment markup;
// properly resolve borders added to borders
protected boolean rendering;
/**
* Constructor
*
* @param id
*/
public BorderBodyContainer(final String id)
{
super(id);
}
@Override
protected void onComponentTag(final ComponentTag tag)
{
// Convert open-close to open-body-close
if (tag.isOpenClose())
{
tag.setType(TagType.OPEN);
tag.setModified(true);
}
super.onComponentTag(tag);
}
@Override
public void onComponentTagBody(final MarkupStream markupStream, final ComponentTag openTag)
{
// skip the <wicket:body> body
if (markupStream.getPreviousTag().isOpen())
{
// Only RawMarkup is allowed within the preview region,
// which gets stripped from output
markupStream.skipRawMarkup();
}
// Get the <span wicket:id="myBorder"> markup and render that instead
IMarkupFragment markup = Border.this.getMarkup();
MarkupStream stream = new MarkupStream(markup);
ComponentTag tag = stream.getTag();
stream.next();
super.onComponentTagBody(stream, tag);
}
@Override
protected void onRender()
{
rendering = true;
try
{
super.onRender();
}
finally
{
rendering = false;
}
}
/**
* Get the <wicket:body> markup from the body's parent container
*/
@Override
public IMarkupFragment getMarkup()
{
if (markup == null)
{
markup = findByName(getParent().getMarkup(null), BODY);
}
return markup;
}
/**
* Search for <wicket:'name' ...> on the same level, but ignoring other "transparent"
* tags such as <wicket:enclosure> etc.
*
* @param markup
* @param name
* @return null, if not found
*/
private IMarkupFragment findByName(final IMarkupFragment markup, final String name)
{
Args.notEmpty(name, "name");
MarkupStream stream = new MarkupStream(markup);
// Skip any raw markup
stream.skipUntil(ComponentTag.class);
// Skip <wicket:border>
stream.next();
while (stream.skipUntil(ComponentTag.class))
{
ComponentTag tag = stream.getTag();
if (tag.isOpen() || tag.isOpenClose())
{
if (TagUtils.isWicketBodyTag(tag))
{
return stream.getMarkupFragment();
}
}
stream.next();
}
return null;
}
/**
* Get the child markup which must be in between the <span wicktet:id="myBorder"> tags
*/
@Override
public IMarkupFragment getMarkup(final Component child)
{
IMarkupFragment markup = Border.this.getMarkup();
if (markup == null)
{
return null;
}
if (child == null)
{
return markup;
}
return markup.find(child.getId());
}
@Override
public DequeueContext newDequeueContext()
{
Border border = findParent(Border.class);
IMarkupFragment fragment = border.getMarkup();
if (fragment == null)
{
return null;
}
return new DequeueContext(fragment, this, true);
}
@Override
public Component findComponentToDequeue(ComponentTag tag)
{
/*
* the body container is allowed to search for queued components all
* the way to the page even though it is an IQueueRegion so it can
* find components queued below the border
*/
Component component = super.findComponentToDequeue(tag);
if (component != null)
{
return component;
}
MarkupContainer cursor = getParent();
while (cursor != null)
{
component = cursor.findComponentToDequeue(tag);
if (component != null)
{
return component;
}
if (cursor instanceof BorderBodyContainer)
{
// optimization - find call above would've already recursed
// to page
break;
}
cursor = cursor.getParent();
}
return null;
}
}
@Override
protected DequeueTagAction canDequeueTag(ComponentTag tag)
{
if (canDequeueBody(tag))
{
return DequeueTagAction.DEQUEUE;
}
return super.canDequeueTag(tag);
}
@Override
public Component findComponentToDequeue(ComponentTag tag)
{
if (canDequeueBody(tag))
{
//synch the tag id with the one of the body component
tag.setId(body.getId());
}
return super.findComponentToDequeue(tag);
}
private boolean canDequeueBody(ComponentTag tag)
{
boolean isBodyTag = (tag instanceof WicketTag) && ((WicketTag)tag).isBodyTag();
return isBodyTag;
}
@Override
protected void addDequeuedComponent(Component component, ComponentTag tag)
{
// components queued in border get dequeued into the border not into the body container
super.add(component);
}
/**
* Returns the markup inside <wicket:border> tag.
* If such tag is not found, all the markup is returned.
*
* @see IQueueRegion#getRegionMarkup()
*/
@Override
public IMarkupFragment getRegionMarkup()
{
IMarkupFragment markup = super.getRegionMarkup();
if (markup == null)
{
return markup;
}
IMarkupFragment borderMarkup = MarkupUtil.findStartTag(markup, BORDER);
return borderMarkup != null ? borderMarkup : markup;
}
}
| mosoft521/wicket | wicket-core/src/main/java/org/apache/wicket/markup/html/border/Border.java | Java | apache-2.0 | 17,337 |
package cluster // import "github.com/docker/docker/daemon/cluster"
import (
"context"
"fmt"
"path/filepath"
"strings"
"sync"
"time"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/executor/container"
lncluster "github.com/docker/libnetwork/cluster"
swarmapi "github.com/docker/swarmkit/api"
swarmallocator "github.com/docker/swarmkit/manager/allocator/cnmallocator"
swarmnode "github.com/docker/swarmkit/node"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed.
type nodeRunner struct {
nodeState
mu sync.RWMutex
done chan struct{} // closed when swarmNode exits
ready chan struct{} // closed when swarmNode becomes active
reconnectDelay time.Duration
config nodeStartConfig
repeatedRun bool
cancelReconnect func()
stopping bool
cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct
}
// nodeStartConfig holds configuration needed to start a new node. Exported
// fields of this structure are saved to disk in json. Unexported fields
// contain data that shouldn't be persisted between daemon reloads.
type nodeStartConfig struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
// RemoteAddr is the address that was given to "swarm join". It is used
// to find LocalAddr if necessary.
RemoteAddr string
// ListenAddr is the address we bind to, including a port.
ListenAddr string
// AdvertiseAddr is the address other nodes should connect to,
// including a port.
AdvertiseAddr string
// DataPathAddr is the address that has to be used for the data path
DataPathAddr string
// DefaultAddressPool contains list of subnets
DefaultAddressPool []string
// SubnetSize contains subnet size of DefaultAddressPool
SubnetSize uint32
// DataPathPort contains Data path port (VXLAN UDP port) number that is used for data traffic.
DataPathPort uint32
// JoinInProgress is set to true if a join operation has started, but
// not completed yet.
JoinInProgress bool
joinAddr string
forceNewCluster bool
joinToken string
lockKey []byte
autolock bool
availability types.NodeAvailability
}
func (n *nodeRunner) Ready() chan error {
c := make(chan error, 1)
n.mu.RLock()
ready, done := n.ready, n.done
n.mu.RUnlock()
go func() {
select {
case <-ready:
case <-done:
}
select {
case <-ready:
default:
n.mu.RLock()
c <- n.err
n.mu.RUnlock()
}
close(c)
}()
return c
}
func (n *nodeRunner) Start(conf nodeStartConfig) error {
n.mu.Lock()
defer n.mu.Unlock()
n.reconnectDelay = initialReconnectDelay
return n.start(conf)
}
func (n *nodeRunner) start(conf nodeStartConfig) error {
var control string
if isWindows {
control = `\\.\pipe\` + controlSocket
} else {
control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
}
joinAddr := conf.joinAddr
if joinAddr == "" && conf.JoinInProgress {
// We must have been restarted while trying to join a cluster.
// Continue trying to join instead of forming our own cluster.
joinAddr = conf.RemoteAddr
}
// Hostname is not set here. Instead, it is obtained from
// the node description that is reported periodically
swarmnodeConfig := swarmnode.Config{
ForceNewCluster: conf.forceNewCluster,
ListenControlAPI: control,
ListenRemoteAPI: conf.ListenAddr,
AdvertiseRemoteAPI: conf.AdvertiseAddr,
NetworkConfig: &swarmallocator.NetworkConfig{
DefaultAddrPool: conf.DefaultAddressPool,
SubnetSize: conf.SubnetSize,
VXLANUDPPort: conf.DataPathPort,
},
JoinAddr: joinAddr,
StateDir: n.cluster.root,
JoinToken: conf.joinToken,
Executor: container.NewExecutor(
n.cluster.config.Backend,
n.cluster.config.PluginBackend,
n.cluster.config.ImageBackend,
n.cluster.config.VolumeBackend,
),
HeartbeatTick: n.cluster.config.RaftHeartbeatTick,
// Recommended value in etcd/raft is 10 x (HeartbeatTick).
// Lower values were seen to have caused instability because of
// frequent leader elections when running on flakey networks.
ElectionTick: n.cluster.config.RaftElectionTick,
UnlockKey: conf.lockKey,
AutoLockManagers: conf.autolock,
PluginGetter: n.cluster.config.Backend.PluginGetter(),
}
if conf.availability != "" {
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
if !ok {
return fmt.Errorf("invalid Availability: %q", conf.availability)
}
swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
}
node, err := swarmnode.New(&swarmnodeConfig)
if err != nil {
return err
}
if err := node.Start(context.Background()); err != nil {
return err
}
n.done = make(chan struct{})
n.ready = make(chan struct{})
n.swarmNode = node
if conf.joinAddr != "" {
conf.JoinInProgress = true
}
n.config = conf
savePersistentState(n.cluster.root, conf)
ctx, cancel := context.WithCancel(context.Background())
go func() {
n.handleNodeExit(node)
cancel()
}()
go n.handleReadyEvent(ctx, node, n.ready)
go n.handleControlSocketChange(ctx, node)
return nil
}
func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
for conn := range node.ListenControlSocket(ctx) {
n.mu.Lock()
if n.grpcConn != conn {
if conn == nil {
n.controlClient = nil
n.logsClient = nil
} else {
n.controlClient = swarmapi.NewControlClient(conn)
n.logsClient = swarmapi.NewLogsClient(conn)
// push store changes to daemon
go n.watchClusterEvents(ctx, conn)
}
}
n.grpcConn = conn
n.mu.Unlock()
n.cluster.SendClusterEvent(lncluster.EventSocketChange)
}
}
func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) {
client := swarmapi.NewWatchClient(conn)
watch, err := client.Watch(ctx, &swarmapi.WatchRequest{
Entries: []*swarmapi.WatchRequest_WatchEntry{
{
Kind: "node",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "service",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "network",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "secret",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "config",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
},
IncludeOldObject: true,
})
if err != nil {
logrus.WithError(err).Error("failed to watch cluster store")
return
}
for {
msg, err := watch.Recv()
if err != nil {
// store watch is broken
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
logrus.WithError(err).Error("failed to receive changes from store watch API")
}
return
}
select {
case <-ctx.Done():
return
case n.cluster.watchStream <- msg:
}
}
}
func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
select {
case <-node.Ready():
n.mu.Lock()
n.err = nil
if n.config.JoinInProgress {
n.config.JoinInProgress = false
savePersistentState(n.cluster.root, n.config)
}
n.mu.Unlock()
close(ready)
case <-ctx.Done():
}
n.cluster.SendClusterEvent(lncluster.EventNodeReady)
}
func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
err := detectLockedError(node.Err(context.Background()))
if err != nil {
logrus.Errorf("cluster exited with error: %v", err)
}
n.mu.Lock()
n.swarmNode = nil
n.err = err
close(n.done)
select {
case <-n.ready:
n.enableReconnectWatcher()
default:
if n.repeatedRun {
n.enableReconnectWatcher()
}
}
n.repeatedRun = true
n.mu.Unlock()
}
// Stop stops the current swarm node if it is running.
func (n *nodeRunner) Stop() error {
n.mu.Lock()
if n.cancelReconnect != nil { // between restarts
n.cancelReconnect()
n.cancelReconnect = nil
}
if n.swarmNode == nil {
// even though the swarm node is nil we still may need
// to send a node leave event to perform any cleanup required.
if n.cluster != nil {
n.cluster.SendClusterEvent(lncluster.EventNodeLeave)
}
n.mu.Unlock()
return nil
}
n.stopping = true
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
n.mu.Unlock()
if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return err
}
n.cluster.SendClusterEvent(lncluster.EventNodeLeave)
<-n.done
return nil
}
func (n *nodeRunner) State() nodeState {
if n == nil {
return nodeState{status: types.LocalNodeStateInactive}
}
n.mu.RLock()
defer n.mu.RUnlock()
ns := n.nodeState
if ns.err != nil || n.cancelReconnect != nil {
if errors.Cause(ns.err) == errSwarmLocked {
ns.status = types.LocalNodeStateLocked
} else {
ns.status = types.LocalNodeStateError
}
} else {
select {
case <-n.ready:
ns.status = types.LocalNodeStateActive
default:
ns.status = types.LocalNodeStatePending
}
}
return ns
}
func (n *nodeRunner) enableReconnectWatcher() {
if n.stopping {
return
}
n.reconnectDelay *= 2
if n.reconnectDelay > maxReconnectDelay {
n.reconnectDelay = maxReconnectDelay
}
logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
n.cancelReconnect = cancel
go func() {
<-delayCtx.Done()
if delayCtx.Err() != context.DeadlineExceeded {
return
}
n.mu.Lock()
defer n.mu.Unlock()
if n.stopping {
return
}
if err := n.start(n.config); err != nil {
n.err = err
}
}()
}
// nodeState represents information about the current state of the cluster and
// provides access to the grpc clients.
type nodeState struct {
swarmNode *swarmnode.Node
grpcConn *grpc.ClientConn
controlClient swarmapi.ControlClient
logsClient swarmapi.LogsClient
status types.LocalNodeState
actualLocalAddr string
err error
}
// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true.
func (ns nodeState) IsActiveManager() bool {
return ns.controlClient != nil
}
// IsManager returns true if node is a manager.
func (ns nodeState) IsManager() bool {
return ns.swarmNode != nil && ns.swarmNode.Manager() != nil
}
// NodeID returns node's ID or empty string if node is inactive.
func (ns nodeState) NodeID() string {
if ns.swarmNode != nil {
return ns.swarmNode.NodeID()
}
return ""
}
| SvenDowideit/docker | daemon/cluster/noderunner.go | GO | apache-2.0 | 11,055 |
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
// Copyright (c) 2011, 2012 Open Networking Foundation
// Copyright (c) 2012, 2013 Big Switch Networks, Inc.
// This library was generated by the LoxiGen Compiler.
// See the file LICENSE.txt which should have been included in the source distribution
// Automatically generated by LOXI from template const_serializer.java
// Do not modify
package org.projectfloodlight.openflow.protocol.ver14;
import org.projectfloodlight.openflow.protocol.*;
import org.projectfloodlight.openflow.protocol.action.*;
import org.projectfloodlight.openflow.protocol.actionid.*;
import org.projectfloodlight.openflow.protocol.bsntlv.*;
import org.projectfloodlight.openflow.protocol.errormsg.*;
import org.projectfloodlight.openflow.protocol.meterband.*;
import org.projectfloodlight.openflow.protocol.instruction.*;
import org.projectfloodlight.openflow.protocol.instructionid.*;
import org.projectfloodlight.openflow.protocol.match.*;
import org.projectfloodlight.openflow.protocol.stat.*;
import org.projectfloodlight.openflow.protocol.oxm.*;
import org.projectfloodlight.openflow.protocol.oxs.*;
import org.projectfloodlight.openflow.protocol.queueprop.*;
import org.projectfloodlight.openflow.types.*;
import org.projectfloodlight.openflow.util.*;
import org.projectfloodlight.openflow.exceptions.*;
import org.projectfloodlight.openflow.protocol.OFPortModFailedCode;
import io.netty.buffer.ByteBuf;
import com.google.common.hash.PrimitiveSink;
public class OFPortModFailedCodeSerializerVer14 {
public final static short BAD_PORT_VAL = (short) 0x0;
public final static short BAD_HW_ADDR_VAL = (short) 0x1;
public final static short BAD_CONFIG_VAL = (short) 0x2;
public final static short BAD_ADVERTISE_VAL = (short) 0x3;
public final static short EPERM_VAL = (short) 0x4;
public static OFPortModFailedCode readFrom(ByteBuf bb) throws OFParseError {
try {
return ofWireValue(bb.readShort());
} catch (IllegalArgumentException e) {
throw new OFParseError(e);
}
}
public static void writeTo(ByteBuf bb, OFPortModFailedCode e) {
bb.writeShort(toWireValue(e));
}
public static void putTo(OFPortModFailedCode e, PrimitiveSink sink) {
sink.putShort(toWireValue(e));
}
public static OFPortModFailedCode ofWireValue(short val) {
switch(val) {
case BAD_PORT_VAL:
return OFPortModFailedCode.BAD_PORT;
case BAD_HW_ADDR_VAL:
return OFPortModFailedCode.BAD_HW_ADDR;
case BAD_CONFIG_VAL:
return OFPortModFailedCode.BAD_CONFIG;
case BAD_ADVERTISE_VAL:
return OFPortModFailedCode.BAD_ADVERTISE;
case EPERM_VAL:
return OFPortModFailedCode.EPERM;
default:
throw new IllegalArgumentException("Illegal wire value for type OFPortModFailedCode in version 1.4: " + val);
}
}
public static short toWireValue(OFPortModFailedCode e) {
switch(e) {
case BAD_PORT:
return BAD_PORT_VAL;
case BAD_HW_ADDR:
return BAD_HW_ADDR_VAL;
case BAD_CONFIG:
return BAD_CONFIG_VAL;
case BAD_ADVERTISE:
return BAD_ADVERTISE_VAL;
case EPERM:
return EPERM_VAL;
default:
throw new IllegalArgumentException("Illegal enum value for type OFPortModFailedCode in version 1.4: " + e);
}
}
}
| floodlight/loxigen-artifacts | openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/ver14/OFPortModFailedCodeSerializerVer14.java | Java | apache-2.0 | 3,612 |
<meta name="description" content="Letsliftgh is a Multi-level Marketing Company in Ghana.">
<meta name="keywords" content="mlm, matrix, multi-level, marketing, multi-level marketing, business, commission, lets, lift, ghana" />
<link href="img/letsliftgh-fav.png" rel="shortcut icon"> | marthyns/mlmapp | mlm/includes/headtag.php | PHP | apache-2.0 | 290 |
class RenameObjectToChorusObject < ActiveRecord::Migration
def change
rename_table :objects, :chorus_objects
end
end
| jamesblunt/chorus | db/migrate/20150317192400_rename_object_to_chorus_object.rb | Ruby | apache-2.0 | 125 |
/*******************************************************************************
* Copyright 2015 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
var Probe = require('../lib/probe.js');
var aspect = require('../lib/aspect.js');
var request = require('../lib/request.js');
var util = require('util');
var am = require('..');
function SocketioProbe() {
Probe.call(this, 'socket.io');
}
util.inherits(SocketioProbe, Probe);
SocketioProbe.prototype.attach = function(name, target) {
var that = this;
if(name != "socket.io") return target;
/*
* Don't set __ddProbeAttached__ = true as we need to probe and return
* the constructor each time
*/
/*
* Patch the constructor so that we can patch io.sockets.emit() calls
* to broadcast to clients. This also picks up calls to io.emit() as
* they map down to io.socket.emit()
*/
var newtarget = aspect.afterConstructor(target, {},
function(target, methodName, methodArgs, context, server) {
var broadcast = 'broadcast';
aspect.around(server.sockets, 'emit',
function(target, methodName, methodArgs, context){
that.metricsProbeStart(context, broadcast, methodArgs);
that.requestProbeStart(context, broadcast, methodArgs);
},
function(target, methodName, methodArgs, context, rc){
that.metricsProbeEnd(context, broadcast, methodArgs);
that.requestProbeEnd(context, broadcast, methodArgs);
}
);
return server;
}
);
/*
* Remap the listen API to point to new constructor
*/
newtarget.listen = newtarget;
/*
* We patch the constructor every time, but only want to patch prototype
* functions once otherwise we'll generate multiple events
*/
if (!target.__prototypeProbeAttached__) {
target.__prototypeProbeAttached__ = true;
aspect.before(target.prototype, ['on', 'addListener'],
function(target, methodName, methodArgs, context) {
if(methodArgs[0] !== 'connection') return;
if (aspect.findCallbackArg(methodArgs) != undefined) {
aspect.aroundCallback(methodArgs, context, function(target, methodArgs, context) {
var socket = methodArgs[0];
/*
* Patch Socket#emit() calls
*/
aspect.around(socket, 'emit',
function(target, methodName, methodArgs, context){
that.metricsProbeStart(context, methodName, methodArgs);
that.requestProbeStart(context, methodName, methodArgs);
},
function(target, methodName, methodArgs, context, rc){
//Call the transaction link with a name and the callback for strong trace
var callbackPosition = aspect.findCallbackArg(methodArgs);
if (typeof(callbackPosition) != 'undefined') {
aspect.strongTraceTransactionLink('socket.io: ', methodName, methodArgs[callbackPosition]);
}
that.metricsProbeEnd(context, methodName, methodArgs);
that.requestProbeEnd(context, methodName, methodArgs);
return rc;
}
);
/*
* Patch socket.on incoming events
*/
var receive = 'receive';
aspect.before(socket, ['on', 'addListener'],
function(target, methodName, methodArgs, context) {
aspect.aroundCallback(methodArgs, context,
function(target, callbackArgs, context){
that.metricsProbeStart(context, receive, methodArgs);
that.requestProbeStart(context, receive, methodArgs);
},
function (target, callbackArgs, context, rc) {
//Call the transaction link with a name and the callback for strong trace
var callbackPosition = aspect.findCallbackArg(methodArgs);
if (typeof(callbackPosition) != 'undefined') {
aspect.strongTraceTransactionLink('socket.io: ', methodName, methodArgs[callbackPosition]);
}
that.metricsProbeEnd(context, receive, methodArgs);
that.requestProbeEnd(context, receive, methodArgs);
return rc;
}
);
}
);
}
);
}
});
}
return newtarget;
};
/*
* Lightweight metrics probe for Socket.io websocket connections
*
* These provide:
* time: time event started
* method: the type of socket.io action
* event: the event broadcast/emitted/received
* duration: the time for the action to complete
*/
SocketioProbe.prototype.metricsEnd = function(context, methodName, methodArgs) {
if(context && context.timer) {
context.timer.stop();
am.emit('socketio', {time: context.timer.startTimeMillis, method: methodName, event: methodArgs[0], duration: context.timer.timeDelta});
}
};
/*
* Heavyweight request probes for Socket.io websocket connections
*/
SocketioProbe.prototype.requestStart = function (context, methodName, methodArgs) {
/*
* method names are "broadcast", "receive" and "emit"
*/
if (methodName !== 'receive') {
context.req = request.startRequest('socketio', methodName, false, context.timer);
} else {
context.req = request.startRequest('socketio', methodName, true, context.timer);
}
};
SocketioProbe.prototype.requestEnd = function (context, methodName, methodArgs) {
if(context && context.req)
context.req.stop({method: methodName, event: methodArgs[0]});
};
module.exports = SocketioProbe;
| kgriesh/appmetrics | probes/socketio-probe.js | JavaScript | apache-2.0 | 5,878 |
package test;
import junit.framework.Test;
import junit.framework.TestSuite;
public class FileRepoTests {
public static Test suite() {
TestSuite suite = new TestSuite(FileRepoTests.class.getName());
// $JUnit-BEGIN$
suite.addTestSuite(test.deployer.FileRepoTest.class);
// $JUnit-END$
return suite;
}
} | joansmith/bnd | biz.aQute.bndlib.tests/src/test/FileRepoTests.java | Java | apache-2.0 | 317 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.vo;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.network.Network;
import com.cloud.network.Networks;
import com.cloud.offering.NetworkOffering;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name = "network_offering_view")
public class NetworkOfferingJoinVO extends BaseViewVO implements NetworkOffering {
@Id
@Column(name = "id", updatable = false, nullable = false)
private long id;
@Column(name = "uuid")
private String uuid;
@Column(name = "name")
private String name;
@Column(name = "unique_name")
private String uniqueName;
@Column(name = "display_text")
private String displayText;
@Column(name = "nw_rate")
private Integer rateMbps;
@Column(name = "mc_rate")
private Integer multicastRateMbps;
@Column(name = "traffic_type")
@Enumerated(value = EnumType.STRING)
private Networks.TrafficType trafficType;
@Column(name = "tags", length = 4096)
private String tags;
@Column(name = "system_only")
private boolean systemOnly;
@Column(name = "specify_vlan")
private boolean specifyVlan;
@Column(name = "service_offering_id")
private Long serviceOfferingId;
@Column(name = "conserve_mode")
private boolean conserveMode;
@Column(name = GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name = GenericDao.CREATED_COLUMN)
private Date created;
@Column(name = "default")
private boolean isDefault;
@Column(name = "availability")
@Enumerated(value = EnumType.STRING)
NetworkOffering.Availability availability;
@Column(name = "dedicated_lb_service")
private boolean dedicatedLB;
@Column(name = "shared_source_nat_service")
private boolean sharedSourceNat;
@Column(name = "sort_key")
private int sortKey;
@Column(name = "redundant_router_service")
private boolean redundantRouter;
@Column(name = "state")
@Enumerated(value = EnumType.STRING)
private NetworkOffering.State state = NetworkOffering.State.Disabled;
@Column(name = "guest_type")
@Enumerated(value = EnumType.STRING)
private Network.GuestType guestType;
@Column(name = "elastic_ip_service")
private boolean elasticIp;
@Column(name = "eip_associate_public_ip")
private boolean eipAssociatePublicIp;
@Column(name = "elastic_lb_service")
private boolean elasticLb;
@Column(name = "specify_ip_ranges")
private boolean specifyIpRanges = false;
@Column(name = "inline")
private boolean inline;
@Column(name = "is_persistent")
private boolean persistent;
@Column(name = "internal_lb")
private boolean internalLb;
@Column(name = "public_lb")
private boolean publicLb;
@Column(name = "egress_default_policy")
private boolean egressdefaultpolicy;
@Column(name = "concurrent_connections")
private Integer concurrentConnections;
@Column(name = "keep_alive_enabled")
private boolean keepAliveEnabled = false;
@Column(name = "supports_streched_l2")
private boolean supportsStrechedL2 = false;
@Column(name = "supports_public_access")
private boolean supportsPublicAccess = false;
@Column(name = "for_vpc")
private boolean forVpc;
@Column(name = "service_package_id")
private String servicePackageUuid = null;
@Column(name = "domain_id")
private String domainId = null;
@Column(name = "domain_uuid")
private String domainUuid = null;
@Column(name = "domain_name")
private String domainName = null;
@Column(name = "domain_path")
private String domainPath = null;
@Column(name = "zone_id")
private String zoneId = null;
@Column(name = "zone_uuid")
private String zoneUuid = null;
@Column(name = "zone_name")
private String zoneName = null;
public NetworkOfferingJoinVO() {
}
@Override
public long getId() {
return id;
}
@Override
public String getUuid() {
return uuid;
}
public String getName() {
return name;
}
public String getUniqueName() {
return uniqueName;
}
public String getDisplayText() {
return displayText;
}
public Integer getRateMbps() {
return rateMbps;
}
public Integer getMulticastRateMbps() {
return multicastRateMbps;
}
public Networks.TrafficType getTrafficType() {
return trafficType;
}
public String getTags() {
return tags;
}
public boolean isSystemOnly() {
return systemOnly;
}
public boolean isSpecifyVlan() {
return specifyVlan;
}
public Long getServiceOfferingId() {
return serviceOfferingId;
}
public boolean isConserveMode() {
return conserveMode;
}
public Date getCreated() {
return created;
}
public Date getRemoved() {
return removed;
}
public boolean isDefault() {
return isDefault;
}
public NetworkOffering.Availability getAvailability() {
return availability;
}
public boolean isDedicatedLB() {
return dedicatedLB;
}
public boolean isSharedSourceNat() {
return sharedSourceNat;
}
public int getSortKey() {
return sortKey;
}
public boolean isRedundantRouter() {
return redundantRouter;
}
public NetworkOffering.State getState() {
return state;
}
@Override
public void setState(State state) {
this.state = state;
}
public Network.GuestType getGuestType() {
return guestType;
}
public boolean isElasticIp() {
return elasticIp;
}
public boolean isAssociatePublicIP() {
return eipAssociatePublicIp;
}
public boolean isElasticLb() {
return elasticLb;
}
public boolean isSpecifyIpRanges() {
return specifyIpRanges;
}
public boolean isInline() {
return inline;
}
public boolean isPersistent() {
return persistent;
}
public boolean isInternalLb() {
return internalLb;
}
public boolean isPublicLb() {
return publicLb;
}
public boolean isEgressDefaultPolicy() {
return egressdefaultpolicy;
}
public Integer getConcurrentConnections() {
return this.concurrentConnections;
}
public boolean isKeepAliveEnabled() {
return keepAliveEnabled;
}
public boolean isSupportingStrechedL2() {
return supportsStrechedL2;
}
public boolean isSupportingPublicAccess() {
return supportsPublicAccess;
}
public boolean isForVpc() {
return forVpc;
}
public void setForVpc(boolean forVpc) { this.forVpc = forVpc; }
public String getServicePackage() {
return servicePackageUuid;
}
public String getDomainId() {
return domainId;
}
public void setDomainId(String domainId) {
this.domainId = domainId;
}
public String getDomainUuid() {
return domainUuid;
}
public void setDomainUuid(String domainUuid) {
this.domainUuid = domainUuid;
}
public String getDomainName() {
return domainName;
}
public void setDomainName(String domainName) {
this.domainName = domainName;
}
public String getDomainPath() {
return domainPath;
}
public void setDomainPath(String domainPath) {
this.domainPath = domainPath;
}
public String getZoneId() {
return zoneId;
}
public void setZoneId(String zoneId) {
this.zoneId = zoneId;
}
public String getZoneUuid() {
return zoneUuid;
}
public void setZoneUuid(String zoneUuid) {
this.zoneUuid = zoneUuid;
}
public String getZoneName() {
return zoneName;
}
public void setZoneName(String zoneName) {
this.zoneName = zoneName;
}
}
| GabrielBrascher/cloudstack | server/src/main/java/com/cloud/api/query/vo/NetworkOfferingJoinVO.java | Java | apache-2.0 | 8,979 |
package ecs
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// CreateImagePipeline invokes the ecs.CreateImagePipeline API synchronously
func (client *Client) CreateImagePipeline(request *CreateImagePipelineRequest) (response *CreateImagePipelineResponse, err error) {
response = CreateCreateImagePipelineResponse()
err = client.DoAction(request, response)
return
}
// CreateImagePipelineWithChan invokes the ecs.CreateImagePipeline API asynchronously
func (client *Client) CreateImagePipelineWithChan(request *CreateImagePipelineRequest) (<-chan *CreateImagePipelineResponse, <-chan error) {
responseChan := make(chan *CreateImagePipelineResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.CreateImagePipeline(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// CreateImagePipelineWithCallback invokes the ecs.CreateImagePipeline API asynchronously
func (client *Client) CreateImagePipelineWithCallback(request *CreateImagePipelineRequest, callback func(response *CreateImagePipelineResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *CreateImagePipelineResponse
var err error
defer close(result)
response, err = client.CreateImagePipeline(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// CreateImagePipelineRequest is the request struct for api CreateImagePipeline
type CreateImagePipelineRequest struct {
*requests.RpcRequest
BaseImageType string `position:"Query" name:"BaseImageType"`
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
ClientToken string `position:"Query" name:"ClientToken"`
ToRegionId *[]string `position:"Query" name:"ToRegionId" type:"Repeated"`
InternetMaxBandwidthOut requests.Integer `position:"Query" name:"InternetMaxBandwidthOut"`
Description string `position:"Query" name:"Description"`
ResourceGroupId string `position:"Query" name:"ResourceGroupId"`
ImageName string `position:"Query" name:"ImageName"`
SystemDiskSize requests.Integer `position:"Query" name:"SystemDiskSize"`
InstanceType string `position:"Query" name:"InstanceType"`
Tag *[]CreateImagePipelineTag `position:"Query" name:"Tag" type:"Repeated"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
BaseImage string `position:"Query" name:"BaseImage"`
VSwitchId string `position:"Query" name:"VSwitchId"`
AddAccount *[]string `position:"Query" name:"AddAccount" type:"Repeated"`
DeleteInstanceOnFailure requests.Boolean `position:"Query" name:"DeleteInstanceOnFailure"`
Name string `position:"Query" name:"Name"`
BuildContent string `position:"Query" name:"BuildContent"`
}
// CreateImagePipelineTag is a repeated param struct in CreateImagePipelineRequest
type CreateImagePipelineTag struct {
Key string `name:"Key"`
Value string `name:"Value"`
}
// CreateImagePipelineResponse is the response struct for api CreateImagePipeline
type CreateImagePipelineResponse struct {
*responses.BaseResponse
ImagePipelineId string `json:"ImagePipelineId" xml:"ImagePipelineId"`
RequestId string `json:"RequestId" xml:"RequestId"`
}
// CreateCreateImagePipelineRequest creates a request to invoke CreateImagePipeline API
func CreateCreateImagePipelineRequest() (request *CreateImagePipelineRequest) {
request = &CreateImagePipelineRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Ecs", "2014-05-26", "CreateImagePipeline", "ecs", "openAPI")
request.Method = requests.POST
return
}
// CreateCreateImagePipelineResponse creates a response to parse from CreateImagePipeline response
func CreateCreateImagePipelineResponse() (response *CreateImagePipelineResponse) {
response = &CreateImagePipelineResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| tklauser/cilium | vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/ecs/create_image_pipeline.go | GO | apache-2.0 | 5,505 |
// Copyright (C) Copyright 2015 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
es6id: 25.2.1.1
description: Assignment of function `name` attribute
info: >
[...]
3. Return CreateDynamicFunction(C, NewTarget, "generator", args).
ES6 19.2.1.1.1
RuntimeSemantics: CreateDynamicFunction(constructor, newTarget, kind, args)
[...]
29. Perform SetFunctionName(F, "anonymous").
includes: [propertyHelper.js]
---*/
var GeneratorFunction = Object.getPrototypeOf(function* () {}).constructor;
assert.sameValue(GeneratorFunction().name, 'anonymous');
verifyNotEnumerable(GeneratorFunction(), 'name');
verifyNotWritable(GeneratorFunction(), 'name');
verifyConfigurable(GeneratorFunction(), 'name');
| m0ppers/arangodb | 3rdParty/V8/V8-5.0.71.39/test/test262/data/test/built-ins/GeneratorFunction/instance-name.js | JavaScript | apache-2.0 | 794 |
/*
* Copyright 2012-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.autoconfigure.metrics.export.kairos;
import org.junit.Test;
import org.springframework.boot.actuate.autoconfigure.metrics.export.properties.StepRegistryPropertiesConfigAdapterTests;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link KairosPropertiesConfigAdapter}.
*
* @author Stephane Nicoll
*/
public class KairosPropertiesConfigAdapterTests extends
StepRegistryPropertiesConfigAdapterTests<KairosProperties, KairosPropertiesConfigAdapter> {
@Override
protected KairosProperties createProperties() {
return new KairosProperties();
}
@Override
protected KairosPropertiesConfigAdapter createConfigAdapter(
KairosProperties properties) {
return new KairosPropertiesConfigAdapter(properties);
}
@Test
public void whenPropertiesUrisIsSetAdapterUriReturnsIt() {
KairosProperties properties = createProperties();
properties.setUri("https://kairos.example.com:8080/api/v1/datapoints");
assertThat(createConfigAdapter(properties).uri())
.isEqualTo("https://kairos.example.com:8080/api/v1/datapoints");
}
@Test
public void whenPropertiesUserNameIsSetAdapterUserNameReturnsIt() {
KairosProperties properties = createProperties();
properties.setUserName("alice");
assertThat(createConfigAdapter(properties).userName()).isEqualTo("alice");
}
@Test
public void whenPropertiesPasswordIsSetAdapterPasswordReturnsIt() {
KairosProperties properties = createProperties();
properties.setPassword("secret");
assertThat(createConfigAdapter(properties).password()).isEqualTo("secret");
}
}
| hello2009chen/spring-boot | spring-boot-project/spring-boot-actuator-autoconfigure/src/test/java/org/springframework/boot/actuate/autoconfigure/metrics/export/kairos/KairosPropertiesConfigAdapterTests.java | Java | apache-2.0 | 2,222 |
var C = (function () {
function C() {
}
C.prototype.x = function () {
return 1;
};
return C;
})();
var D = (function () {
function D() {
}
D.prototype.x = function (v) {
};
return D;
})();
| popravich/typescript | tests/baselines/reference/propertyAndFunctionWithSameName.js | JavaScript | apache-2.0 | 254 |
#!/usr/bin/env node_modules/mocha/bin/mocha
/* jshint node:true */
/* global describe */
/**
* ares.spec.js -- ARES server test suite
*/
var path = require("path"),
fs = require("graceful-fs"),
npmlog = require('npmlog'),
temp = require("temp"),
mkdirp = require("mkdirp"),
rimraf = require("rimraf");
var knownOpts = {
"config": path,
"help": Boolean,
"level": ['silly', 'verbose', 'info', 'http', 'warn', 'error']
};
var shortHands = {
"c": "--config",
"h": "--help",
"l": "--level",
"v": "--level verbose"
};
var helpString = [
"",
"Ares server tester.",
"Usage: '" + process.argv[0] + " " + process.argv[1] + " [OPTIONS]",
"",
"Options:",
" -c, --config path to ide.json [default: '" + path.resolve(__dirname, "..", "..", "ide.json") + "]",
" -h, --help help message [boolean]",
" -v, --verbose verbose execution mode [boolean]",
" -q, --quiet really quiet [boolean]",
""
];
var argv = require('nopt')(knownOpts, shortHands, process.argv, 2 /*drop 'node' & basename*/);
argv.config = argv.config || path.resolve(__dirname, "..", "..", "ide.json");
if (argv.help) {
helpString.forEach(function(s) { console.log(s); });
process.exit(0);
}
/**********************************************************************/
var log = npmlog;
log.heading = 'ares.spec';
log.level = argv.level || 'error';
/**********************************************************************/
log.verbose("main", "running in verbose mode");
log.verbose("main", "argv:", argv);
var myPort = 9019;
log.verbose("main", "loading " + argv.config);
var config = JSON.parse(fs.readFileSync(argv.config, 'utf8'));
log.verbose("main", "config:", config);
var myTestDir = "_test";
/*
* test suite
*/
function getHome() {
return process.env[(process.platform == 'win32') ? 'USERPROFILE' : 'HOME'];
}
describe("Testing filesystems", function() {
var FsSpec = require("./fs.spec.js");
var dropbox = config.services.filter(function(service) {
return service.id === 'dropbox';
})[0];
if (dropbox && dropbox.auth && dropbox.auth.appKey) {
describe("fsDropbox", function(done) {
var myDropboxApp = 'com.enyojs.ares';
// Assume a user's account grip in the local file-system.
var myTestDirPath = path.join(getHome(), 'Dropbox', 'Apps', myDropboxApp, myTestDir);
rimraf.sync(myTestDirPath);
mkdirp.sync(myTestDirPath);
new FsSpec({
filesystem: "./../../hermes/fsDropbox.js",
pathname: "/",
port: myPort,
dir: myTestDir,
level: argv.level,
auth: dropbox.auth
});
});
}
describe("fsLocal", function() {
var myFsPath = temp.mkdirSync({prefix: 'com.palm.ares.test.fs'});
new FsSpec({
filesystem: "./../../hermes/fsLocal.js",
pathname: "/",
port: myPort,
dir: myTestDir,
level: argv.level,
root: myFsPath
});
});
});
| recurve/ares-ecosystem | test/server/ares.spec.js | JavaScript | apache-2.0 | 2,853 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.hbase.index.covered;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.hbase.index.covered.data.IndexMemStore;
import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
import org.apache.phoenix.hbase.index.covered.update.IndexedColumnGroup;
import org.apache.phoenix.hbase.index.scanner.Scanner;
import org.apache.phoenix.hbase.index.scanner.ScannerBuilder;
/**
* Manage the state of the HRegion's view of the table, for the single row.
* <p>
* Currently, this is a single-use object - you need to create a new one for each row that you need
* to manage. In the future, we could make this object reusable, but for the moment its easier to
* manage as a throw-away object.
* <p>
* This class is <b>not</b> thread-safe - it requires external synchronization is access
* concurrently.
*/
public class LocalTableState implements TableState {
private long ts;
private RegionCoprocessorEnvironment env;
private KeyValueStore memstore;
private LocalHBaseState table;
private Mutation update;
private Set<ColumnTracker> trackedColumns = new HashSet<ColumnTracker>();
private ScannerBuilder scannerBuilder;
private List<KeyValue> kvs = new ArrayList<KeyValue>();
private List<? extends IndexedColumnGroup> hints;
private CoveredColumns columnSet;
public LocalTableState(RegionCoprocessorEnvironment environment, LocalHBaseState table, Mutation update) {
this.env = environment;
this.table = table;
this.update = update;
this.memstore = new IndexMemStore();
this.scannerBuilder = new ScannerBuilder(memstore, update);
this.columnSet = new CoveredColumns();
}
public void addPendingUpdates(KeyValue... kvs) {
if (kvs == null) return;
addPendingUpdates(Arrays.asList(kvs));
}
public void addPendingUpdates(List<KeyValue> kvs) {
if(kvs == null) return;
setPendingUpdates(kvs);
addUpdate(kvs);
}
private void addUpdate(List<KeyValue> list) {
addUpdate(list, true);
}
private void addUpdate(List<KeyValue> list, boolean overwrite) {
if (list == null) return;
for (KeyValue kv : list) {
this.memstore.add(kv, overwrite);
}
}
@Override
public RegionCoprocessorEnvironment getEnvironment() {
return this.env;
}
@Override
public long getCurrentTimestamp() {
return this.ts;
}
@Override
public void setCurrentTimestamp(long timestamp) {
this.ts = timestamp;
}
public void resetTrackedColumns() {
this.trackedColumns.clear();
}
public Set<ColumnTracker> getTrackedColumns() {
return this.trackedColumns;
}
@Override
public Pair<Scanner, IndexUpdate> getIndexedColumnsTableState(
Collection<? extends ColumnReference> indexedColumns) throws IOException {
ensureLocalStateInitialized(indexedColumns);
// filter out things with a newer timestamp and track the column references to which it applies
ColumnTracker tracker = new ColumnTracker(indexedColumns);
synchronized (this.trackedColumns) {
// we haven't seen this set of columns before, so we need to create a new tracker
if (!this.trackedColumns.contains(tracker)) {
this.trackedColumns.add(tracker);
}
}
Scanner scanner =
this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts);
return new Pair<Scanner, IndexUpdate>(scanner, new IndexUpdate(tracker));
}
/**
* Initialize the managed local state. Generally, this will only be called by
* {@link #getNonIndexedColumnsTableState(List)}, which is unlikely to be called concurrently from the outside.
* Even then, there is still fairly low contention as each new Put/Delete will have its own table
* state.
*/
@SuppressWarnings("deprecation")
private synchronized void ensureLocalStateInitialized(
Collection<? extends ColumnReference> columns) throws IOException {
// check to see if we haven't initialized any columns yet
Collection<? extends ColumnReference> toCover = this.columnSet.findNonCoveredColumns(columns);
// we have all the columns loaded, so we are good to go.
if (toCover.isEmpty()) {
return;
}
// add the current state of the row
this.addUpdate(this.table.getCurrentRowState(update, toCover).list(), false);
// add the covered columns to the set
for (ColumnReference ref : toCover) {
this.columnSet.addColumn(ref);
}
}
@Override
public Map<String, byte[]> getUpdateAttributes() {
return this.update.getAttributesMap();
}
@Override
public byte[] getCurrentRowKey() {
return this.update.getRow();
}
public Result getCurrentRowState() {
KeyValueScanner scanner = this.memstore.getScanner();
List<Cell> kvs = new ArrayList<Cell>();
while (scanner.peek() != null) {
try {
kvs.add(scanner.next());
} catch (IOException e) {
// this should never happen - something has gone terribly arwy if it has
throw new RuntimeException("Local MemStore threw IOException!");
}
}
return Result.create(kvs);
}
/**
* Helper to add a {@link Mutation} to the values stored for the current row
* @param pendingUpdate update to apply
*/
public void addUpdateForTesting(Mutation pendingUpdate) {
for (Map.Entry<byte[], List<Cell>> e : pendingUpdate.getFamilyCellMap().entrySet()) {
List<KeyValue> edits = KeyValueUtil.ensureKeyValues(e.getValue());
addUpdate(edits);
}
}
/**
* @param hints
*/
public void setHints(List<? extends IndexedColumnGroup> hints) {
this.hints = hints;
}
@Override
public List<? extends IndexedColumnGroup> getIndexColumnHints() {
return this.hints;
}
@Override
public Collection<KeyValue> getPendingUpdate() {
return this.kvs;
}
/**
* Set the {@link KeyValue}s in the update for which we are currently building an index update,
* but don't actually apply them.
* @param update pending {@link KeyValue}s
*/
public void setPendingUpdates(Collection<KeyValue> update) {
this.kvs.clear();
this.kvs.addAll(update);
}
/**
* Apply the {@link KeyValue}s set in {@link #setPendingUpdates(Collection)}.
*/
public void applyPendingUpdates() {
this.addUpdate(kvs);
}
/**
* Rollback all the given values from the underlying state.
* @param values
*/
public void rollback(Collection<KeyValue> values) {
for (KeyValue kv : values) {
this.memstore.rollback(kv);
}
}
} | cloudera-labs/phoenix | phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java | Java | apache-2.0 | 7,986 |
package org.apereo.cas.services;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import java.io.File;
import java.io.IOException;
import static org.junit.Assert.*;
/**
* @author Misagh Moayyed
* @since 5.0.0
*/
@RunWith(JUnit4.class)
@Slf4j
public class ReturnAllAttributeReleasePolicyTests {
private static final File JSON_FILE = new File(FileUtils.getTempDirectoryPath(), "returnAllAttributeReleasePolicy.json");
private static final ObjectMapper MAPPER = new ObjectMapper();
@Test
public void verifySerializeAReturnAllAttributeReleasePolicyToJson() throws IOException {
final ReturnAllAttributeReleasePolicy policyWritten = new ReturnAllAttributeReleasePolicy();
MAPPER.writeValue(JSON_FILE, policyWritten);
final RegisteredServiceAttributeReleasePolicy policyRead = MAPPER.readValue(JSON_FILE, ReturnAllAttributeReleasePolicy.class);
assertEquals(policyWritten, policyRead);
}
}
| dodok1/cas | core/cas-server-core-authentication-attributes/src/test/java/org/apereo/cas/services/ReturnAllAttributeReleasePolicyTests.java | Java | apache-2.0 | 1,114 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/ec2/model/CapacityReservationSpecificationResponse.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <utility>
using namespace Aws::Utils::Xml;
using namespace Aws::Utils;
namespace Aws
{
namespace EC2
{
namespace Model
{
CapacityReservationSpecificationResponse::CapacityReservationSpecificationResponse() :
m_capacityReservationPreference(CapacityReservationPreference::NOT_SET),
m_capacityReservationPreferenceHasBeenSet(false),
m_capacityReservationTargetHasBeenSet(false)
{
}
CapacityReservationSpecificationResponse::CapacityReservationSpecificationResponse(const XmlNode& xmlNode) :
m_capacityReservationPreference(CapacityReservationPreference::NOT_SET),
m_capacityReservationPreferenceHasBeenSet(false),
m_capacityReservationTargetHasBeenSet(false)
{
*this = xmlNode;
}
CapacityReservationSpecificationResponse& CapacityReservationSpecificationResponse::operator =(const XmlNode& xmlNode)
{
XmlNode resultNode = xmlNode;
if(!resultNode.IsNull())
{
XmlNode capacityReservationPreferenceNode = resultNode.FirstChild("capacityReservationPreference");
if(!capacityReservationPreferenceNode.IsNull())
{
m_capacityReservationPreference = CapacityReservationPreferenceMapper::GetCapacityReservationPreferenceForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(capacityReservationPreferenceNode.GetText()).c_str()).c_str());
m_capacityReservationPreferenceHasBeenSet = true;
}
XmlNode capacityReservationTargetNode = resultNode.FirstChild("capacityReservationTarget");
if(!capacityReservationTargetNode.IsNull())
{
m_capacityReservationTarget = capacityReservationTargetNode;
m_capacityReservationTargetHasBeenSet = true;
}
}
return *this;
}
void CapacityReservationSpecificationResponse::OutputToStream(Aws::OStream& oStream, const char* location, unsigned index, const char* locationValue) const
{
if(m_capacityReservationPreferenceHasBeenSet)
{
oStream << location << index << locationValue << ".CapacityReservationPreference=" << CapacityReservationPreferenceMapper::GetNameForCapacityReservationPreference(m_capacityReservationPreference) << "&";
}
if(m_capacityReservationTargetHasBeenSet)
{
Aws::StringStream capacityReservationTargetLocationAndMemberSs;
capacityReservationTargetLocationAndMemberSs << location << index << locationValue << ".CapacityReservationTarget";
m_capacityReservationTarget.OutputToStream(oStream, capacityReservationTargetLocationAndMemberSs.str().c_str());
}
}
void CapacityReservationSpecificationResponse::OutputToStream(Aws::OStream& oStream, const char* location) const
{
if(m_capacityReservationPreferenceHasBeenSet)
{
oStream << location << ".CapacityReservationPreference=" << CapacityReservationPreferenceMapper::GetNameForCapacityReservationPreference(m_capacityReservationPreference) << "&";
}
if(m_capacityReservationTargetHasBeenSet)
{
Aws::String capacityReservationTargetLocationAndMember(location);
capacityReservationTargetLocationAndMember += ".CapacityReservationTarget";
m_capacityReservationTarget.OutputToStream(oStream, capacityReservationTargetLocationAndMember.c_str());
}
}
} // namespace Model
} // namespace EC2
} // namespace Aws
| aws/aws-sdk-cpp | aws-cpp-sdk-ec2/source/model/CapacityReservationSpecificationResponse.cpp | C++ | apache-2.0 | 3,523 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Mvc;
namespace postalcodefinder.Controllers
{
public class HomeController : Controller
{
public ActionResult Index()
{
ViewBag.Title = "Home Page";
return View();
}
}
}
| TeamArachne/postalcodefinder | postalcodefinder/postalcodefinder/Controllers/HomeController.cs | C# | apache-2.0 | 336 |
var twintyfour=true, $inputbox=$('.timepicker');
var setTimearea=function(meridian){
var $div=$('<div/>'), $input=$('<input type="text"/>');
var lists=['hour', 'min', 'sec'];
$div.clone().addClass('timepicker_wrap').insertAfter($inputbox);
for(var i=0; i< lists.length; i++){
$div.clone().addClass(lists[i]).appendTo('.timepicker_wrap');
$div.clone().addClass('btn prev').appendTo('.'+lists[i]);
$div.clone().addClass('ti_tx').append($input.clone().addClass('in_txt')).appendTo('.'+lists[i]);
$div.clone().addClass('btn next').appendTo('.'+lists[i]);
}
if(meridian){
twintyfour=false;
$div.clone().addClass('meridian').appendTo('.timepicker_wrap');
$div.clone().addClass('btn prev').appendTo('.meridian');
$div.clone().addClass('ti_tx').append($input.clone().addClass('in_txt')).appendTo('.meridian');
$div.clone().addClass('btn next').appendTo('.meridian');
}
};
var checkTime=function(tnum, place){
var $area=$(place.parentElement.parentElement).find('.in_txt'), m, h;
switch(place.parentElement.className){
case 'hour':
if(place.classList[1] === 'prev') {
h=resuceNum(tnum);
$area.eq(0).val(addZero(h, true));
}
else if(place.classList[1] === 'next'){
h=addNum(tnum);
$area.eq(0).val(addZero(h, true));
}
break;
case 'min':
if(place.classList[1] === 'prev') {
m=resuceNum(tnum);
$area.eq(1).val(addZero(m));
}
else if(place.classList[1] === 'next'){
m=addNum(tnum);
$area.eq(1).val(addZero(m));
}
break;
case 'sec':
if(place.classList[1] === 'prev') {
sec=resuceNum(tnum);
$area.eq(2).val(addZero(sec));
}
else if(place.classList[1] === 'next'){
sec=addNum(tnum);
$area.eq(2).val(addZero(sec));
}
break;
case 'meridian':
if($area.eq(3).val() === 'AM') $area.eq(3).val('PM');
else $area.eq(3).val('AM');
break;
default:
alert('get fail');
}
};
function addZero(i, hours) {
if(hours){
if(i>24) i=1;
else if (i<1) i=24;
!twintyfour ? i>12 ? i-=12 : '':'';
}
else{
if(i>59) i=0;
else if(i < 0) i=59;
}
if (i < 10) {
i = "0" + i;
}
return i;
}
function setInit(inputbox){
var $area=$(inputbox[0].nextElementSibling).find('.in_txt');
var date=new Date(), tz='AM';
var list=[addZero(date.getHours(), true), addZero(date.getMinutes()), addZero(date.getSeconds()), tz];
if(inputbox.val().length===0){
for(var i=0; i<$area.length; i++) $($area[i]).val(list[i]);
setValue(inputbox, $area);
}else {
var formateTime=inputbox.val().split(':');
for(var i=0; i<$area.length; i++) $($area[i]).val(formateTime[i]);
}
}
function isSetTimeArea(dom){
var open=false;
if($('body').find('.timepicker_wrap')[1] !==undefined)
open=$.contains($('body').find('.timepicker_wrap')[0],dom)|| $.contains($('body').find('.timepicker_wrap')[1],dom)
else open=$.contains($('body').find('.timepicker_wrap')[0],dom)
return open;
}
function setValue(inputbox, area){
area.eq(3).val()===undefined ?
inputbox.val(area.eq(0).val()+':'+area.eq(1).val()+':'+area.eq(2).val()) :
inputbox.val(area.eq(0).val()+':'+area.eq(1).val()+':'+area.eq(2).val()+':'+area.eq(3).val());
}
function addNum(i){
return ++i;
}
function resuceNum(i){
return --i;
}
function closeIt() {
$tab=$('.timepicker_wrap');
$tab.stop().fadeOut(1000);
}
window.onLoad=setTimearea(false); //show merdian or not; Empty to hide merdian select
!function (){
'use strict';
var $submit=$('input[type=submit]');
$inputbox.on('focus', function(){
var input = $(this),$tab=$(this.nextElementSibling);
if (input.is($inputbox)) input.select();
$tab.stop().fadeIn(1000);
setInit(input);
});
$(document).on('click', function(e){
var _this=e.target;
setTimeout(function(){
var focused_element = $(document.activeElement);
if (!focused_element.is(':input') && !isSetTimeArea(_this)){
for(var i= 0, l=focused_element.find('.in_txt:visible').length; i<l; i++){
if(focused_element.find('.in_txt:visible')[i].value!== 'AM' && focused_element.find('.in_txt:visible')[i].value!=='PM'){
if(focused_element.find('.in_txt:visible')[i].value!==undefined){
$(focused_element.find('.in_txt:visible')[i]).val((addZero(parseInt(focused_element.find('.in_txt:visible')[i].value))));
}
}
}
focused_element.find('.timepicker_wrap:visible')[0] !==undefined ? setValue($(focused_element.find('.timepicker_wrap:visible')[0].parentElement).find('.timepicker'), $(focused_element.find('.in_txt:visible'))): '';
closeIt();
}
}, 0);
});
$('.prev').on('click', function(e){
var $area=$(this.parentElement.parentElement).find('.in_txt');
checkTime($(e.target.nextElementSibling.children).val(), e.target);
setValue($(this.parentNode.parentElement.previousElementSibling), $area);
});
$('.next').on('click', function(e){
var $area=$(this.parentElement.parentElement).find('.in_txt');
checkTime($(e.target.previousElementSibling.children).val(), e.target);
setValue($(this.parentNode.parentElement.previousElementSibling), $area);
});
}(window, document);
| sunitha-ramesh/CodeDeployGitHubDemo | codedeploydemopro/scripts/pickerjs.js | JavaScript | apache-2.0 | 5,874 |
=begin
vcenter_create_folder_check.rb
Author: Kevin Morey <kevin@redhat.com>
Description: This method checks to ensure that vcenter_folder_path has been created
-------------------------------------------------------------------------------
Copyright 2016 Kevin Morey <kevin@redhat.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
=end
def log(level, msg, update_message=false)
$evm.log(level, "#{msg}")
@task.message = msg if @task && (update_message || level == 'error')
end
def retry_method(retry_time=10.seconds, msg='INFO', update_message=false)
log(:info, "#{msg} - retrying in #{retry_time} seconds}", update_message)
$evm.root['ae_result'] = 'retry'
$evm.root['ae_retry_interval'] = retry_time
exit MIQ_OK
end
def get_task_option(task_option, result=nil)
return nil unless @task
ws_values = @task.options.fetch(:ws_values, {})
result = ws_values[task_option.to_sym] || @task.get_option(task_option.to_sym)
unless result.nil?
log(:info, "Found task option: {#{task_option}=>#{result}}")
end
result
end
begin
case $evm.root['vmdb_object_type']
when 'miq_provision'
@task = $evm.root['miq_provision']
log(:info, "Task: #{@task.id} Request: #{@task.miq_provision_request.id} Type: #{@task.type}")
unless @task.get_option(:placement_folder_name).nil?
log(:info, "Provisioning object {:placement_folder_name=>#{@task.options[:placement_folder_name]}} already set")
exit MIQ_OK
end
vm = prov.vm_template
when 'vm'
vm = $evm.root['vm']
end
if @task
vcenter_folder_path = get_task_option(:vcenter_folder_path)
vcenter_folder_path ||= $evm.root['dialog_vcenter_folder_path']
vcenter_folder_path ||= $evm.object['vcenter_folder_path']
unless vcenter_folder_path.blank?
log(:info, "vcenter_folder_path: #{vcenter_folder_path}")
vsphere_folder_path_obj = @task.get_folder_paths.detect {|key, path| vcenter_folder_path == path }
unless vsphere_folder_path_obj.blank?
@task.set_option(:placement_folder_name, vsphere_folder_path_obj)
log(:info, "Provisioning object :placement_folder_name updated with #{@task.options[:placement_folder_name]}")
else
retry_method(15.seconds, "Waiting for vcenter_folder_path #{vcenter_folder_path} to be created", true )
end
end
else
# Waiting on BZ1302082 to get checked in before we can cleanly check folder path creation
# so for now we are going to use the vm custom attribute
vcenter_folder_path = vm.custom_get(:vcenter_folder_path)
vcenter_folder_ref = vm.custom_get(:last_vcenter_folder_ref)
log(:info, "vcenter_folder_path: #{vcenter_folder_path} \t vcenter_folder_ref: #{vcenter_folder_ref}")
unless vcenter_folder_ref.nil?
provder = vm.ext_management_system
vsphere_folder_path_obj = provder.ems_folders.detect {|ef| ef[:ems_ref] == vcenter_folder_ref }
if vsphere_folder_path_obj.nil?
retry_method(15.seconds, "Waiting for vcenter_folder_path #{vcenter_folder_path} to be created" )
else
log(:info, "vcenter_folder_path: #{vcenter_folder_path} successfully created")
end
end
end
rescue => err
log(:error, "[#{err}]\n#{err.backtrace.join("\n")}")
exit MIQ_ABORT
end
| tjyang/ManageIQ_Essentials | automate/CloudForms_Essentials/Integration/VMware/vCenter/Methods.class/__methods__/vcenter_create_folder_check.rb | Ruby | apache-2.0 | 3,836 |
package com.cardshifter.api.incoming;
import com.cardshifter.api.messages.Message;
/**
* Request to start a new game.
* <p>
* This is sent from the Client to the Server when this player invites another player (including AI) to start a new game of a chosen type.
*/
public class StartGameRequest extends Message {
private final int opponent;
private final String gameType;
/** Constructor. (no params) */
public StartGameRequest() {
this(-1, "");
}
/**
* Constructor.
* @param opponent The Id of the player entity being invited by this player
* @param gameType The type / mod of the game chosen by this player
*/
public StartGameRequest(int opponent, String gameType) {
super("startgame");
this.opponent = opponent;
this.gameType = gameType;
}
/** @return The Id of the player entity being invited by this player */
public int getOpponent() {
return opponent;
}
/** @return The type / mod of the game chosen by this player */
public String getGameType() {
return gameType;
}
}
| June92/Cardshifter | cardshifter-api/src/main/java/com/cardshifter/api/incoming/StartGameRequest.java | Java | apache-2.0 | 1,025 |
/**
* @license
* Copyright 2019 The FOAM Authors. All Rights Reserved.
* http://www.apache.org/licenses/LICENSE-2.0
*/
foam.CLASS({
package: 'foam.u2',
name: 'Dialog',
extends: 'foam.u2.Element',
documentation: `This class is a basic dialog container: it has a heading,
a body, and a set of actions. Generally, use
$$DOC{ref:"foam.u2.EasyDialog"} to easily handle simple cases. For
more complex cases, you can put any Element you like into a
$$DOC{ref:"foam.u2.ModalOverlay"}.`,
requires: [
'Action'
],
imports: [
'overlay'
],
properties: [
'title',
'body',
{
type: 'Array',
name: 'buttons',
documentation: `An array of buttons. Each is a [function, label] pair
or an Action. These will be displayed in <em>reverse</em> order
as MD buttons at the bottom of the dialog. The default is a
single "OK" button that closes the dialog.`,
factory: function() {
return [[function() { this.overlay.close(); }.bind(this), 'OK']];
},
adapt: function(old, nu) {
if ( nu ) {
for ( var i = 0 ; i < nu.length ; i++ ) {
if ( ! this.Action.isInstance(nu[i]) ) {
nu[i] = this.Action.create({
name: nu[i][1],
label: nu[i][1],
code: nu[i][0]
});
}
}
}
return nu;
}
},
{
class: 'Boolean',
name: 'padding',
documetation: 'Controls the padding inside the dialog.',
attribute: true,
value: true
},
],
methods: [
function initE() {
this.SUPER();
this.addClass(this.myClass());
if ( this.title ) {
this.start()
.addClass(this.myClass('header'))
.enableClass(this.myClass('padding'), this.padding$)
.add(this.title)
.end();
}
this.start()
.addClass(this.myClass('body'))
.enableClass(this.myClass('padding'), this.padding$)
.add(this.body)
.end();
this.start().addClass(this.myClass('buttons')).add(this.buttons).end();
}
],
css: `
^ {
background-color: #fff;
display: block;
margin: 10px;
overflow: hidden;
}
^header {
font-size: 20px;
font-weight: 500;
}
^padding {
margin: 24px;
}
^buttons {
display: flex;
flex-direction: row-reverse;
}
`
});
| jacksonic/vjlofvhjfgm | src/foam/u2/Dialog.js | JavaScript | apache-2.0 | 2,464 |
import orjson
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import WebhookTestCase
from zerver.models import Recipient, get_realm, get_user_by_delivery_email
from zerver.webhooks.teamcity.view import MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE
class TeamCityHookTests(WebhookTestCase):
STREAM_NAME = "teamcity"
URL_TEMPLATE = "/api/v1/external/teamcity?stream={stream}&api_key={api_key}"
TOPIC = "Project :: Compile"
WEBHOOK_DIR_NAME = "teamcity"
def test_teamcity_success(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 was successful! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("success", self.TOPIC, expected_message)
def test_teamcity_success_branch(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 was successful! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
expected_topic = "Project :: Compile (MyBranch)"
self.check_webhook("success_branch", expected_topic, expected_message)
def test_teamcity_broken(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 is broken with status Exit code 1 (new)! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("broken", self.TOPIC, expected_message)
def test_teamcity_failure(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 is still broken with status Exit code 1! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("failure", self.TOPIC, expected_message)
def test_teamcity_fixed(self) -> None:
expected_message = "Project :: Compile build 5535 - CL 123456 has been fixed! :thumbs_up: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
self.check_webhook("fixed", self.TOPIC, expected_message)
def test_teamcity_personal(self) -> None:
expected_message = "Your personal build for Project :: Compile build 5535 - CL 123456 is broken with status Exit code 1 (new)! :thumbs_down: See [changes](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952&tab=buildChangesDiv) and [build log](http://teamcity/viewLog.html?buildTypeId=Project_Compile&buildId=19952)."
payload = orjson.dumps(
orjson.loads(self.webhook_fixture_data(self.WEBHOOK_DIR_NAME, "personal"))
)
self.client_post(self.url, payload, content_type="application/json")
msg = self.get_last_message()
self.assertEqual(msg.content, expected_message)
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
def test_non_generic_payload_ignore_pm_notification(self) -> None:
expected_message = MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE.format(
bot_name=get_user_by_delivery_email(
"webhook-bot@zulip.com", get_realm("zulip")
).full_name,
support_email=FromAddress.SUPPORT,
).strip()
payload = self.get_body("slack_non_generic_payload")
self.client_post(self.url, payload, content_type="application/json")
msg = self.get_last_message()
self.assertEqual(msg.content, expected_message)
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
| eeshangarg/zulip | zerver/webhooks/teamcity/tests.py | Python | apache-2.0 | 4,058 |
<?php
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/channel/v1/service.proto
namespace Google\Cloud\Channel\V1;
use Google\Protobuf\Internal\GPBType;
use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
* Request message for [CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements]
*
* Generated from protobuf message <code>google.cloud.channel.v1.ListEntitlementsRequest</code>
*/
class ListEntitlementsRequest extends \Google\Protobuf\Internal\Message
{
/**
* Required. The resource name of the reseller's customer account to list
* entitlements for.
* Parent uses the format: accounts/{account_id}/customers/{customer_id}
*
* Generated from protobuf field <code>string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
*/
private $parent = '';
/**
* Optional. Requested page size. Server might return fewer results than requested.
* If unspecified, return at most 50 entitlements.
* The maximum value is 100; the server will coerce values above 100.
*
* Generated from protobuf field <code>int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL];</code>
*/
private $page_size = 0;
/**
* Optional. A token for a page of results other than the first page.
* Obtained using
* [ListEntitlementsResponse.next_page_token][google.cloud.channel.v1.ListEntitlementsResponse.next_page_token] of the previous
* [CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements] call.
*
* Generated from protobuf field <code>string page_token = 3 [(.google.api.field_behavior) = OPTIONAL];</code>
*/
private $page_token = '';
/**
* Constructor.
*
* @param array $data {
* Optional. Data for populating the Message object.
*
* @type string $parent
* Required. The resource name of the reseller's customer account to list
* entitlements for.
* Parent uses the format: accounts/{account_id}/customers/{customer_id}
* @type int $page_size
* Optional. Requested page size. Server might return fewer results than requested.
* If unspecified, return at most 50 entitlements.
* The maximum value is 100; the server will coerce values above 100.
* @type string $page_token
* Optional. A token for a page of results other than the first page.
* Obtained using
* [ListEntitlementsResponse.next_page_token][google.cloud.channel.v1.ListEntitlementsResponse.next_page_token] of the previous
* [CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements] call.
* }
*/
public function __construct($data = NULL) {
\GPBMetadata\Google\Cloud\Channel\V1\Service::initOnce();
parent::__construct($data);
}
/**
* Required. The resource name of the reseller's customer account to list
* entitlements for.
* Parent uses the format: accounts/{account_id}/customers/{customer_id}
*
* Generated from protobuf field <code>string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
* @return string
*/
public function getParent()
{
return $this->parent;
}
/**
* Required. The resource name of the reseller's customer account to list
* entitlements for.
* Parent uses the format: accounts/{account_id}/customers/{customer_id}
*
* Generated from protobuf field <code>string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
* @param string $var
* @return $this
*/
public function setParent($var)
{
GPBUtil::checkString($var, True);
$this->parent = $var;
return $this;
}
/**
* Optional. Requested page size. Server might return fewer results than requested.
* If unspecified, return at most 50 entitlements.
* The maximum value is 100; the server will coerce values above 100.
*
* Generated from protobuf field <code>int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL];</code>
* @return int
*/
public function getPageSize()
{
return $this->page_size;
}
/**
* Optional. Requested page size. Server might return fewer results than requested.
* If unspecified, return at most 50 entitlements.
* The maximum value is 100; the server will coerce values above 100.
*
* Generated from protobuf field <code>int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL];</code>
* @param int $var
* @return $this
*/
public function setPageSize($var)
{
GPBUtil::checkInt32($var);
$this->page_size = $var;
return $this;
}
/**
* Optional. A token for a page of results other than the first page.
* Obtained using
* [ListEntitlementsResponse.next_page_token][google.cloud.channel.v1.ListEntitlementsResponse.next_page_token] of the previous
* [CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements] call.
*
* Generated from protobuf field <code>string page_token = 3 [(.google.api.field_behavior) = OPTIONAL];</code>
* @return string
*/
public function getPageToken()
{
return $this->page_token;
}
/**
* Optional. A token for a page of results other than the first page.
* Obtained using
* [ListEntitlementsResponse.next_page_token][google.cloud.channel.v1.ListEntitlementsResponse.next_page_token] of the previous
* [CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements] call.
*
* Generated from protobuf field <code>string page_token = 3 [(.google.api.field_behavior) = OPTIONAL];</code>
* @param string $var
* @return $this
*/
public function setPageToken($var)
{
GPBUtil::checkString($var, True);
$this->page_token = $var;
return $this;
}
}
| googleapis/google-cloud-php-channel | src/V1/ListEntitlementsRequest.php | PHP | apache-2.0 | 6,332 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
IMPL = LazyPluggable('backend', sqlalchemy='senlin.db.sqlalchemy.api')
| openstack/senlin | senlin/db/utils.py | Python | apache-2.0 | 1,415 |
// Copyright 2011 Traceur Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
suite('parser.js', function() {
var errorReporter = {
reportError: function(position, message) {
throw new chai.AssertionError({message: message + ', ' + position});
}
};
test('Module', function() {
var program = 'module Foo { export var x = 42; ' +
'module M from \'url\'; ' +
'import z from \'x\'.y; ' +
'import * from M; ' +
'import {a:b,c} from M.x;' +
'};\n';
var sourceFile = new traceur.syntax.SourceFile('Name', program);
var parser = new traceur.syntax.Parser(errorReporter, sourceFile);
parser.parseProgram(true);
});
}); | rwaldron/traceur-todomvc | test/unit/syntax/parser.js | JavaScript | apache-2.0 | 1,265 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.query.groupby;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.MoreExecutors;
import io.druid.collections.BlockingPool;
import io.druid.collections.ReferenceCountingResourceHolder;
import io.druid.collections.StupidPool;
import io.druid.data.input.Row;
import io.druid.java.util.common.granularity.Granularities;
import io.druid.query.DruidProcessingConfig;
import io.druid.query.InsufficientResourcesException;
import io.druid.query.QueryContextKeys;
import io.druid.query.QueryDataSource;
import io.druid.query.QueryInterruptedException;
import io.druid.query.QueryRunner;
import io.druid.query.QueryRunnerTestHelper;
import io.druid.query.ResourceLimitExceededException;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.LongSumAggregatorFactory;
import io.druid.query.dimension.DefaultDimensionSpec;
import io.druid.query.dimension.DimensionSpec;
import io.druid.query.groupby.strategy.GroupByStrategySelector;
import io.druid.query.groupby.strategy.GroupByStrategyV1;
import io.druid.query.groupby.strategy.GroupByStrategyV2;
import org.bouncycastle.util.Integers;
import org.hamcrest.CoreMatchers;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeoutException;
@RunWith(Parameterized.class)
public class GroupByQueryRunnerFailureTest
{
private static final DruidProcessingConfig DEFAULT_PROCESSING_CONFIG = new DruidProcessingConfig()
{
@Override
public String getFormatString()
{
return null;
}
@Override
public int intermediateComputeSizeBytes()
{
return 10 * 1024 * 1024;
}
@Override
public int getNumMergeBuffers()
{
return 1;
}
@Override
public int getNumThreads()
{
return 2;
}
};
@Rule
public ExpectedException expectedException = ExpectedException.none();
private static GroupByQueryRunnerFactory makeQueryRunnerFactory(
final ObjectMapper mapper,
final GroupByQueryConfig config
)
{
final Supplier<GroupByQueryConfig> configSupplier = Suppliers.ofInstance(config);
final StupidPool<ByteBuffer> bufferPool = new StupidPool<>(
"GroupByQueryEngine-bufferPool",
new Supplier<ByteBuffer>()
{
@Override
public ByteBuffer get()
{
return ByteBuffer.allocateDirect(DEFAULT_PROCESSING_CONFIG.intermediateComputeSizeBytes());
}
}
);
final GroupByStrategySelector strategySelector = new GroupByStrategySelector(
configSupplier,
new GroupByStrategyV1(
configSupplier,
new GroupByQueryEngine(configSupplier, bufferPool),
QueryRunnerTestHelper.NOOP_QUERYWATCHER,
bufferPool
),
new GroupByStrategyV2(
DEFAULT_PROCESSING_CONFIG,
configSupplier,
bufferPool,
mergeBufferPool,
mapper,
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
);
final GroupByQueryQueryToolChest toolChest = new GroupByQueryQueryToolChest(
strategySelector,
QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()
);
return new GroupByQueryRunnerFactory(
strategySelector,
toolChest
);
}
private final static BlockingPool<ByteBuffer> mergeBufferPool = new BlockingPool<>(
new Supplier<ByteBuffer>()
{
@Override
public ByteBuffer get ()
{
return ByteBuffer.allocateDirect(DEFAULT_PROCESSING_CONFIG.intermediateComputeSizeBytes());
}
},
DEFAULT_PROCESSING_CONFIG.getNumMergeBuffers()
);
private static final GroupByQueryRunnerFactory factory = makeQueryRunnerFactory(
GroupByQueryRunnerTest.DEFAULT_MAPPER,
new GroupByQueryConfig()
{
public String getDefaultStrategy()
{
return "v2";
}
}
);
private QueryRunner<Row> runner;
@Parameters(name = "{0}")
public static Collection<Object[]> constructorFeeder() throws IOException
{
final List<Object[]> args = Lists.newArrayList();
for (QueryRunner<Row> runner : QueryRunnerTestHelper.makeQueryRunners(factory)) {
args.add(new Object[]{runner});
}
return args;
}
public GroupByQueryRunnerFailureTest(QueryRunner<Row> runner)
{
this.runner = factory.mergeRunners(MoreExecutors.sameThreadExecutor(), ImmutableList.of(runner));
}
@Test(timeout = 10000)
public void testNotEnoughMergeBuffersOnQueryable() throws IOException
{
expectedException.expect(QueryInterruptedException.class);
expectedException.expectCause(CoreMatchers.<Throwable>instanceOf(TimeoutException.class));
final GroupByQuery query = GroupByQuery
.builder()
.setDataSource(
new QueryDataSource(
GroupByQuery.builder()
.setDataSource(QueryRunnerTestHelper.dataSource)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setGranularity(Granularities.ALL)
.setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias")))
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(QueryRunnerTestHelper.rowsCount))
.build()
)
)
.setGranularity(Granularities.ALL)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
.setContext(ImmutableMap.<String, Object>of(QueryContextKeys.TIMEOUT, Integers.valueOf(500)))
.build();
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
}
@Test(timeout = 10000)
public void testResourceLimitExceededOnBroker()
{
expectedException.expect(ResourceLimitExceededException.class);
final GroupByQuery query = GroupByQuery
.builder()
.setDataSource(
new QueryDataSource(
GroupByQuery.builder()
.setDataSource(
GroupByQuery.builder()
.setDataSource(QueryRunnerTestHelper.dataSource)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setGranularity(Granularities.ALL)
.setDimensions(Lists.<DimensionSpec>newArrayList(
new DefaultDimensionSpec("quality", "alias"),
new DefaultDimensionSpec("market", null)
))
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(QueryRunnerTestHelper.rowsCount))
.build()
)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setGranularity(Granularities.ALL)
.setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias")))
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(QueryRunnerTestHelper.rowsCount))
.build()
)
)
.setGranularity(Granularities.ALL)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
.setContext(ImmutableMap.<String, Object>of(QueryContextKeys.TIMEOUT, Integers.valueOf(500)))
.build();
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
}
@Test(timeout = 10000, expected = InsufficientResourcesException.class)
public void testInsufficientResourcesOnBroker() throws IOException
{
final ReferenceCountingResourceHolder<List<ByteBuffer>> holder = mergeBufferPool.takeBatch(1, 10);
final GroupByQuery query = GroupByQuery
.builder()
.setDataSource(
new QueryDataSource(
GroupByQuery.builder()
.setDataSource(QueryRunnerTestHelper.dataSource)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setGranularity(Granularities.ALL)
.setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias")))
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(QueryRunnerTestHelper.rowsCount))
.build()
)
)
.setGranularity(Granularities.ALL)
.setInterval(QueryRunnerTestHelper.firstToThird)
.setAggregatorSpecs(Lists.<AggregatorFactory>newArrayList(new LongSumAggregatorFactory("rows", "rows")))
.setContext(ImmutableMap.<String, Object>of(QueryContextKeys.TIMEOUT, Integers.valueOf(500)))
.build();
try {
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
} finally {
holder.close();
}
}
}
| zhihuij/druid | processing/src/test/java/io/druid/query/groupby/GroupByQueryRunnerFailureTest.java | Java | apache-2.0 | 10,590 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Data.Custom import *
from QuantConnect.Algorithm import *
### <summary>
### Basic template algorithm simply initializes the date range and cash. This is a skeleton
### framework you can use for designing an algorithm.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class IndicatorSuiteAlgorithm(QCAlgorithm):
'''Demonstration algorithm of popular indicators and plotting them.'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.symbol = "SPY"
self.customSymbol = "WIKI/FB"
self.price = None
self.SetStartDate(2013, 1, 1) #Set Start Date
self.SetEndDate(2014, 12, 31) #Set End Date
self.SetCash(25000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity(self.symbol, Resolution.Daily)
self.AddData(Quandl, self.customSymbol, Resolution.Daily)
# Set up default Indicators, these indicators are defined on the Value property of incoming data (except ATR and AROON which use the full TradeBar object)
self.indicators = {
'BB' : self.BB(self.symbol, 20, 1, MovingAverageType.Simple, Resolution.Daily),
'RSI' : self.RSI(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily),
'EMA' : self.EMA(self.symbol, 14, Resolution.Daily),
'SMA' : self.SMA(self.symbol, 14, Resolution.Daily),
'MACD' : self.MACD(self.symbol, 12, 26, 9, MovingAverageType.Simple, Resolution.Daily),
'MOM' : self.MOM(self.symbol, 20, Resolution.Daily),
'MOMP' : self.MOMP(self.symbol, 20, Resolution.Daily),
'STD' : self.STD(self.symbol, 20, Resolution.Daily),
# by default if the symbol is a tradebar type then it will be the min of the low property
'MIN' : self.MIN(self.symbol, 14, Resolution.Daily),
# by default if the symbol is a tradebar type then it will be the max of the high property
'MAX' : self.MAX(self.symbol, 14, Resolution.Daily),
'ATR' : self.ATR(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily),
'AROON' : self.AROON(self.symbol, 20, Resolution.Daily)
}
# Here we're going to define indicators using 'selector' functions. These 'selector' functions will define what data gets sent into the indicator
# These functions have a signature like the following: decimal Selector(BaseData baseData), and can be defined like: baseData => baseData.Value
# We'll define these 'selector' functions to select the Low value
#
# For more information on 'anonymous functions' see: http:#en.wikipedia.org/wiki/Anonymous_function
# https:#msdn.microsoft.com/en-us/library/bb397687.aspx
#
self.selectorIndicators = {
'BB' : self.BB(self.symbol, 20, 1, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'RSI' :self.RSI(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'EMA' :self.EMA(self.symbol, 14, Resolution.Daily, Field.Low),
'SMA' :self.SMA(self.symbol, 14, Resolution.Daily, Field.Low),
'MACD' : self.MACD(self.symbol, 12, 26, 9, MovingAverageType.Simple, Resolution.Daily, Field.Low),
'MOM' : self.MOM(self.symbol, 20, Resolution.Daily, Field.Low),
'MOMP' : self.MOMP(self.symbol, 20, Resolution.Daily, Field.Low),
'STD' : self.STD(self.symbol, 20, Resolution.Daily, Field.Low),
'MIN' : self.MIN(self.symbol, 14, Resolution.Daily, Field.High),
'MAX' : self.MAX(self.symbol, 14, Resolution.Daily, Field.Low),
# ATR and AROON are special in that they accept a TradeBar instance instead of a decimal, we could easily project and/or transform the input TradeBar
# before it gets sent to the ATR/AROON indicator, here we use a function that will multiply the input trade bar by a factor of two
'ATR' : self.ATR(self.symbol, 14, MovingAverageType.Simple, Resolution.Daily, Func[IBaseData, IBaseDataBar](self.selector_double_TradeBar)),
'AROON' : self.AROON(self.symbol, 20, Resolution.Daily, Func[IBaseData, IBaseDataBar](self.selector_double_TradeBar))
}
# Custom Data Indicator:
self.rsiCustom = self.RSI(self.customSymbol, 14, MovingAverageType.Simple, Resolution.Daily)
self.minCustom = self.MIN(self.customSymbol, 14, Resolution.Daily)
self.maxCustom = self.MAX(self.customSymbol, 14, Resolution.Daily)
# in addition to defining indicators on a single security, you can all define 'composite' indicators.
# these are indicators that require multiple inputs. the most common of which is a ratio.
# suppose we seek the ratio of BTC to SPY, we could write the following:
spyClose = Identity(self.symbol)
fbClose = Identity(self.customSymbol)
# this will create a new indicator whose value is FB/SPY
self.ratio = IndicatorExtensions.Over(fbClose, spyClose)
# we can also easily plot our indicators each time they update using th PlotIndicator function
self.PlotIndicator("Ratio", self.ratio)
# The following methods will add multiple charts to the algorithm output.
# Those chatrs names will be used later to plot different series in a particular chart.
# For more information on Lean Charting see: https://www.quantconnect.com/docs#Charting
Chart('BB')
Chart('STD')
Chart('ATR')
Chart('AROON')
Chart('MACD')
Chart('Averages')
# Here we make use of the Schelude method to update the plots once per day at market close.
self.Schedule.On(self.DateRules.EveryDay(), self.TimeRules.BeforeMarketClose(self.symbol), self.update_plots)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if (#not data.Bars.ContainsKey(self.symbol) or
not self.indicators['BB'].IsReady or
not self.indicators['RSI'].IsReady):
return
self.price = data[self.symbol].Close
if not self.Portfolio.HoldStock:
quantity = int(self.Portfolio.Cash / self.price)
self.Order(self.symbol, quantity)
self.Debug('Purchased SPY on ' + self.Time.strftime('%Y-%m-%d'))
def update_plots(self):
if not self.indicators['BB'].IsReady or not self.indicators['STD'].IsReady:
return
# Plots can also be created just with this one line command.
self.Plot('RSI', self.indicators['RSI'])
# Custom data indicator
self.Plot('RSI-FB', self.rsiCustom)
# Here we make use of the chats decalred in the Initialize method, plotting multiple series
# in each chart.
self.Plot('STD', 'STD', self.indicators['STD'].Current.Value)
self.Plot('BB', 'Price', self.price)
self.Plot('BB', 'BollingerUpperBand', self.indicators['BB'].UpperBand.Current.Value)
self.Plot('BB', 'BollingerMiddleBand', self.indicators['BB'].MiddleBand.Current.Value)
self.Plot('BB', 'BollingerLowerBand', self.indicators['BB'].LowerBand.Current.Value)
self.Plot('AROON', 'Aroon', self.indicators['AROON'].Current.Value)
self.Plot('AROON', 'AroonUp', self.indicators['AROON'].AroonUp.Current.Value)
self.Plot('AROON', 'AroonDown', self.indicators['AROON'].AroonDown.Current.Value)
# The following Plot method calls are commented out because of the 10 series limit for backtests
#self.Plot('ATR', 'ATR', self.indicators['ATR'].Current.Value)
#self.Plot('ATR', 'ATRDoubleBar', self.selectorIndicators['ATR'].Current.Value)
#self.Plot('Averages', 'SMA', self.indicators['SMA'].Current.Value)
#self.Plot('Averages', 'EMA', self.indicators['EMA'].Current.Value)
#self.Plot('MOM', self.indicators['MOM'].Current.Value)
#self.Plot('MOMP', self.indicators['MOMP'].Current.Value)
#self.Plot('MACD', 'MACD', self.indicators['MACD'].Current.Value)
#self.Plot('MACD', 'MACDSignal', self.indicators['MACD'].Signal.Current.Value)
def selector_double_TradeBar(self, bar):
trade_bar = TradeBar()
trade_bar.Close = 2 * bar.Close
trade_bar.DataType = bar.DataType
trade_bar.High = 2 * bar.High
trade_bar.Low = 2 * bar.Low
trade_bar.Open = 2 * bar.Open
trade_bar.Symbol = bar.Symbol
trade_bar.Time = bar.Time
trade_bar.Value = 2 * bar.Value
trade_bar.Period = bar.Period
return trade_bar | AnshulYADAV007/Lean | Algorithm.Python/IndicatorSuiteAlgorithm.py | Python | apache-2.0 | 10,805 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.parse.repl.load.message;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
import org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc;
import org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPropertiesDesc;
import org.apache.hadoop.hive.ql.ddl.database.create.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.parse.EximUtil;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
import java.io.IOException;
import java.io.Serializable;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
public class CreateDatabaseHandler extends AbstractMessageHandler {
@Override
public List<Task<?>> handle(Context context)
throws SemanticException {
MetaData metaData;
try {
FileSystem fs = FileSystem.get(new Path(context.location).toUri(), context.hiveConf);
metaData = EximUtil.readMetaData(fs, new Path(context.location, EximUtil.METADATA_NAME));
} catch (IOException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
}
Database db = metaData.getDatabase();
String destinationDBName =
context.dbName == null ? db.getName() : context.dbName;
CreateDatabaseDesc createDatabaseDesc =
new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, null, true, db.getParameters());
Task<DDLWork> createDBTask = TaskFactory.get(
new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc), context.hiveConf);
if (!db.getParameters().isEmpty()) {
AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(destinationDBName,
db.getParameters(), context.eventOnlyReplicationSpec());
Task<DDLWork> alterDbProperties = TaskFactory
.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), context.hiveConf);
createDBTask.addDependentTask(alterDbProperties);
}
if (StringUtils.isNotEmpty(db.getOwnerName())) {
AlterDatabaseSetOwnerDesc alterDbOwner = new AlterDatabaseSetOwnerDesc(destinationDBName,
new PrincipalDesc(db.getOwnerName(), db.getOwnerType()),
context.eventOnlyReplicationSpec());
Task<DDLWork> alterDbTask = TaskFactory
.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner), context.hiveConf);
createDBTask.addDependentTask(alterDbTask);
}
updatedMetadata
.set(context.dmd.getEventTo().toString(), destinationDBName, null, null);
return Collections.singletonList(createDBTask);
}
}
| vineetgarg02/hive | ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java | Java | apache-2.0 | 3,808 |
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"bytes"
"encoding/json"
"strings"
)
type TreeCache struct {
ChildNodes map[string]*TreeCache
Entries map[string]interface{}
}
func NewTreeCache() *TreeCache {
return &TreeCache{
ChildNodes: make(map[string]*TreeCache),
Entries: make(map[string]interface{}),
}
}
func (cache *TreeCache) Serialize() (string, error) {
b, err := json.Marshal(cache)
if err != nil {
return "", err
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, b, "", "\t")
if err != nil {
return "", err
}
return string(prettyJSON.Bytes()), nil
}
func (cache *TreeCache) setEntry(key string, val interface{}, path ...string) {
node := cache.ensureChildNode(path...)
node.Entries[key] = val
}
func (cache *TreeCache) getSubCache(path ...string) *TreeCache {
childCache := cache
for _, subpath := range path {
childCache = childCache.ChildNodes[subpath]
if childCache == nil {
return nil
}
}
return childCache
}
func (cache *TreeCache) setSubCache(key string, subCache *TreeCache, path ...string) {
node := cache.ensureChildNode(path...)
node.ChildNodes[key] = subCache
}
func (cache *TreeCache) getEntry(key string, path ...string) (interface{}, bool) {
childNode := cache.getSubCache(path...)
val, ok := childNode.Entries[key]
return val, ok
}
func (cache *TreeCache) getValuesForPathWithWildcards(path ...string) []interface{} {
retval := []interface{}{}
nodesToExplore := []*TreeCache{cache}
for idx, subpath := range path {
nextNodesToExplore := []*TreeCache{}
if idx == len(path)-1 {
// if path ends on an entry, instead of a child node, add the entry
for _, node := range nodesToExplore {
if subpath == "*" {
nextNodesToExplore = append(nextNodesToExplore, node)
} else {
if val, ok := node.Entries[subpath]; ok {
retval = append(retval, val)
} else {
childNode := node.ChildNodes[subpath]
if childNode != nil {
nextNodesToExplore = append(nextNodesToExplore, childNode)
}
}
}
}
nodesToExplore = nextNodesToExplore
break
}
if subpath == "*" {
for _, node := range nodesToExplore {
for subkey, subnode := range node.ChildNodes {
if !strings.HasPrefix(subkey, "_") {
nextNodesToExplore = append(nextNodesToExplore, subnode)
}
}
}
} else {
for _, node := range nodesToExplore {
childNode := node.ChildNodes[subpath]
if childNode != nil {
nextNodesToExplore = append(nextNodesToExplore, childNode)
}
}
}
nodesToExplore = nextNodesToExplore
}
for _, node := range nodesToExplore {
for _, val := range node.Entries {
retval = append(retval, val)
}
}
return retval
}
func (cache *TreeCache) deletePath(path ...string) bool {
if len(path) == 0 {
return false
}
if parentNode := cache.getSubCache(path[:len(path)-1]...); parentNode != nil {
if _, ok := parentNode.ChildNodes[path[len(path)-1]]; ok {
delete(parentNode.ChildNodes, path[len(path)-1])
return true
}
}
return false
}
func (cache *TreeCache) deleteEntry(key string, path ...string) bool {
childNode := cache.getSubCache(path...)
if childNode == nil {
return false
}
if _, ok := childNode.Entries[key]; ok {
delete(childNode.Entries, key)
return true
}
return false
}
func (cache *TreeCache) appendValues(recursive bool, ref [][]interface{}) {
for _, value := range cache.Entries {
ref[0] = append(ref[0], value)
}
if recursive {
for _, node := range cache.ChildNodes {
node.appendValues(recursive, ref)
}
}
}
func (cache *TreeCache) ensureChildNode(path ...string) *TreeCache {
childNode := cache
for _, subpath := range path {
newNode, ok := childNode.ChildNodes[subpath]
if !ok {
newNode = NewTreeCache()
childNode.ChildNodes[subpath] = newNode
}
childNode = newNode
}
return childNode
}
// unused function. keeping it around in commented-fashion
// in the future, we might need some form of this function so that
// we can serialize to a file in a mounted empty dir..
//const (
// dataFile = "data.dat"
// crcFile = "data.crc"
//)
//func (cache *TreeCache) Serialize(dir string) (string, error) {
// cache.m.RLock()
// defer cache.m.RUnlock()
// b, err := json.Marshal(cache)
// if err != nil {
// return "", err
// }
//
// if err := ensureDir(dir, os.FileMode(0755)); err != nil {
// return "", err
// }
// if err := ioutil.WriteFile(path.Join(dir, dataFile), b, 0644); err != nil {
// return "", err
// }
// if err := ioutil.WriteFile(path.Join(dir, crcFile), getMD5(b), 0644); err != nil {
// return "", err
// }
// return string(b), nil
//}
//func ensureDir(path string, perm os.FileMode) error {
// s, err := os.Stat(path)
// if err != nil || !s.IsDir() {
// return os.Mkdir(path, perm)
// }
// return nil
//}
//func getMD5(b []byte) []byte {
// h := md5.New()
// h.Write(b)
// return []byte(fmt.Sprintf("%x", h.Sum(nil)))
//}
// unused function. keeping it around in commented-fashion
// in the future, we might need some form of this function so that
// we can restart kube-dns, deserialize the tree and have a cache
// without having to wait for kube-dns to reach out to API server.
//func Deserialize(dir string) (*TreeCache, error) {
// b, err := ioutil.ReadFile(path.Join(dir, dataFile))
// if err != nil {
// return nil, err
// }
//
// hash, err := ioutil.ReadFile(path.Join(dir, crcFile))
// if err != nil {
// return nil, err
// }
// if !reflect.DeepEqual(hash, getMD5(b)) {
// return nil, fmt.Errorf("Checksum failed")
// }
//
// var cache TreeCache
// err = json.Unmarshal(b, &cache)
// if err != nil {
// return nil, err
// }
// cache.m = &sync.RWMutex{}
// return &cache, nil
//}
| rajdeepd/kubernetes | pkg/dns/treecache.go | GO | apache-2.0 | 6,249 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid.layers as layers
from paddle.fluid.contrib.decoder.beam_search_decoder import *
def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
target_dict_dim, is_generating, beam_size, max_length):
def encoder():
# Encoder implementation of RNN translation
src_word = layers.data(
name="src_word", shape=[1], dtype='int64', lod_level=1)
src_embedding = layers.embedding(
input=src_word,
size=[source_dict_dim, embedding_dim],
dtype='float32',
is_sparse=True)
fc1 = layers.fc(input=src_embedding, size=encoder_size * 4, act='tanh')
lstm_hidden0, lstm_0 = layers.dynamic_lstm(
input=fc1, size=encoder_size * 4)
encoder_out = layers.sequence_last_step(input=lstm_hidden0)
return encoder_out
def decoder_state_cell(context):
# Decoder state cell, specifies the hidden state variable and its updater
h = InitState(init=context, need_reorder=True)
state_cell = StateCell(
inputs={'x': None}, states={'h': h}, out_state='h')
@state_cell.state_updater
def updater(state_cell):
current_word = state_cell.get_input('x')
prev_h = state_cell.get_state('h')
# make sure lod of h heritted from prev_h
h = layers.fc(input=[prev_h, current_word],
size=decoder_size,
act='tanh')
state_cell.set_state('h', h)
return state_cell
def decoder_train(state_cell):
# Decoder for training implementation of RNN translation
trg_word = layers.data(
name="target_word", shape=[1], dtype='int64', lod_level=1)
trg_embedding = layers.embedding(
input=trg_word,
size=[target_dict_dim, embedding_dim],
dtype='float32',
is_sparse=True)
# A training decoder
decoder = TrainingDecoder(state_cell)
# Define the computation in each RNN step done by decoder
with decoder.block():
current_word = decoder.step_input(trg_embedding)
decoder.state_cell.compute_state(inputs={'x': current_word})
current_score = layers.fc(input=decoder.state_cell.get_state('h'),
size=target_dict_dim,
act='softmax')
decoder.state_cell.update_states()
decoder.output(current_score)
return decoder()
def decoder_infer(state_cell):
# Decoder for inference implementation
init_ids = layers.data(
name="init_ids", shape=[1], dtype="int64", lod_level=2)
init_scores = layers.data(
name="init_scores", shape=[1], dtype="float32", lod_level=2)
# A beam search decoder for inference
decoder = BeamSearchDecoder(
state_cell=state_cell,
init_ids=init_ids,
init_scores=init_scores,
target_dict_dim=target_dict_dim,
word_dim=embedding_dim,
input_var_dict={},
topk_size=50,
sparse_emb=True,
max_len=max_length,
beam_size=beam_size,
end_id=1,
name=None)
decoder.decode()
translation_ids, translation_scores = decoder()
return translation_ids, translation_scores
context = encoder()
state_cell = decoder_state_cell(context)
if not is_generating:
label = layers.data(
name="target_next_word", shape=[1], dtype='int64', lod_level=1)
rnn_out = decoder_train(state_cell)
cost = layers.cross_entropy(input=rnn_out, label=label)
avg_cost = layers.mean(x=cost)
feeding_list = ['src_word', 'target_word', 'target_next_word']
return avg_cost, feeding_list
else:
translation_ids, translation_scores = decoder_infer(state_cell)
feeding_list = ['src_word']
return translation_ids, translation_scores, feeding_list
| lcy-seso/models | fluid/neural_machine_translation/rnn_search/no_attention_model.py | Python | apache-2.0 | 4,829 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya.core.internal.converters;
import org.apache.tamaya.spi.PropertyConverter;
import java.util.Locale;
import java.util.Objects;
import java.util.logging.Logger;
/**
* Converter, converting from String to Float, using the Java number syntax:
* (-)?[0-9]*\.[0-9]*. In case of error the value given also is tried being parsed as integral number using
* {@link LongConverter}. Additionally the following values are supported:
* <ul>
* <li>NaN (ignoring case)</li>
* <li>POSITIVE_INFINITY (ignoring case)</li>
* <li>NEGATIVE_INFINITY (ignoring case)</li>
* </ul>
*/
public class FloatConverter implements PropertyConverter<Float> {
/**
* The logger.
*/
private static final Logger LOG = Logger.getLogger(FloatConverter.class.getName());
/**
* The converter used, when floating point parse failed.
*/
private IntegerConverter integerConverter = new IntegerConverter();
@Override
public Float convert(String value) {
String trimmed = Objects.requireNonNull(value).trim();
switch(trimmed.toUpperCase(Locale.ENGLISH)){
case "POSITIVE_INFINITY":
return Float.POSITIVE_INFINITY;
case "NEGATIVE_INFINITY":
return Float.NEGATIVE_INFINITY;
case "NAN":
return Float.NaN;
case "MIN_VALUE":
case "MIN":
return Float.MIN_VALUE;
case "MAX_VALUE":
case "MAX":
return Float.MAX_VALUE;
default:
try {
return Float.valueOf(trimmed);
} catch(Exception e){
// OK perhaps we have an integral number that must be converted to the double type...
LOG.finest("Parsing of float as floating number failed, trying parsing integral" +
" number/hex instead...");
}
Integer val = integerConverter.convert(trimmed);
if(val!=null) {
return val.floatValue();
}
LOG.finest("Unparseable float value: " + trimmed);
return null;
}
}
}
| syzer/incubator-tamaya | java7/core/src/main/java/org/apache/tamaya/core/internal/converters/FloatConverter.java | Java | apache-2.0 | 3,020 |
package com.google.maps.android.data.kml;
import com.google.android.gms.maps.model.LatLng;
import junit.framework.TestCase;
import java.util.ArrayList;
public class KmlLineStringTest extends TestCase {
KmlLineString kmlLineString;
public KmlLineString createSimpleLineString() {
ArrayList<LatLng> coordinates = new ArrayList<LatLng>();
coordinates.add(new LatLng(0, 0));
coordinates.add(new LatLng(50, 50));
coordinates.add(new LatLng(100, 100));
return new KmlLineString(coordinates);
}
public KmlLineString createLoopedLineString() {
ArrayList<LatLng> coordinates = new ArrayList<LatLng>();
coordinates.add(new LatLng(0, 0));
coordinates.add(new LatLng(50, 50));
coordinates.add(new LatLng(0, 0));
return new KmlLineString(coordinates);
}
public void testGetType() throws Exception {
kmlLineString = createSimpleLineString();
assertNotNull(kmlLineString);
assertNotNull(kmlLineString.getGeometryType());
assertEquals("LineString", kmlLineString.getGeometryType());
kmlLineString = createLoopedLineString();
assertNotNull(kmlLineString);
assertNotNull(kmlLineString.getGeometryType());
assertEquals("LineString", kmlLineString.getGeometryType());
}
public void testGetKmlGeometryObject() throws Exception {
kmlLineString = createSimpleLineString();
assertNotNull(kmlLineString);
assertNotNull(kmlLineString.getGeometryObject());
assertEquals(kmlLineString.getGeometryObject().size(), 3);
assertEquals(kmlLineString.getGeometryObject().get(0).latitude, 0.0);
assertEquals(kmlLineString.getGeometryObject().get(1).latitude, 50.0);
assertEquals(kmlLineString.getGeometryObject().get(2).latitude, 90.0);
kmlLineString = createLoopedLineString();
assertNotNull(kmlLineString);
assertNotNull(kmlLineString.getGeometryObject());
assertEquals(kmlLineString.getGeometryObject().size(), 3);
assertEquals(kmlLineString.getGeometryObject().get(0).latitude, 0.0);
assertEquals(kmlLineString.getGeometryObject().get(1).latitude, 50.0);
assertEquals(kmlLineString.getGeometryObject().get(2).latitude, 0.0);
}
} | stephenmcd/android-maps-utils | library/tests/src/com/google/maps/android/data/kml/KmlLineStringTest.java | Java | apache-2.0 | 2,297 |
/*!
* gulp-csscomb | https://github.com/koistya/gulp-csscomb
* Copyright (c) Konstantin Tarkus (@koistya). See LICENSE.txt
*/
'use strict';
var Comb = require('csscomb');
var fs = require('fs');
var gutil = require('gulp-util');
var path = require('path');
var through = require('through2');
var PluginError = gutil.PluginError;
// Constants
var PLUGIN_NAME = 'gulp-csscomb';
var SUPPORTED_EXTENSIONS = ['.css', '.sass', '.scss', '.less'];
// Plugin level function (dealing with files)
function Plugin(configPath, options) {
if (arguments.length == 1 && typeof configPath === 'object') {
options = configPath;
configPath = options.configPath;
} else if (arguments.length == 2 && typeof options === 'boolean') {
options = { verbose: options }; // for backward compatibility
}
options = options || {};
configPath = configPath || null;
var verbose = options.verbose || false;
//var lint = options.lint || false; // TODO: Report about found issues in style sheets
// Create a stream through which each file will pass
var stream = through.obj(function(file, enc, cb) {
if (file.isNull()) {
// Do nothing
} else if (file.isStream()) {
this.emit('error', new PluginError(PLUGIN_NAME, 'Streams are not supported!'));
return cb();
} else if (file.isBuffer() && SUPPORTED_EXTENSIONS.indexOf(path.extname(file.path)) !== -1) {
if (verbose) {
gutil.log(PLUGIN_NAME, 'Processing ' + gutil.colors.magenta(file.path));
}
if (configPath && !fs.existsSync(configPath)) {
this.emit('error', new PluginError(PLUGIN_NAME, 'Configuration file not found: ' + gutil.colors.magenta(configPath)));
return cb();
}
configPath = Comb.getCustomConfigPath(configPath || path.join(path.dirname(file.path), '.csscomb.json'));
var config = Comb.getCustomConfig(configPath);
if (verbose) {
gutil.log(PLUGIN_NAME, 'Using configuration file ' + gutil.colors.magenta(configPath));
}
var comb = new Comb(config || 'csscomb');
var syntax = options.syntax || file.path.split('.').pop();
try {
var output = comb.processString(
file.contents.toString('utf8'), {
syntax: syntax,
filename: file.path
});
file.contents = new Buffer(output);
} catch (err) {
this.emit('error', new PluginError(PLUGIN_NAME, file.path + '\n' + err));
}
}
// make sure the file goes through the next gulp plugin
this.push(file);
// tell the stream engine that we are done with this file
return cb();
});
// Return the file stream
return stream;
}
// Export the plugin main function
module.exports = Plugin;
| athena-github/athena-github.github.io | xiangmuu--weizhan/node_modules/.3.0.8@gulp-csscomb/index.js | JavaScript | apache-2.0 | 2,726 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.solr.repository.cdi;
/**
* @see DATASOLR-187
* @author Mark Paluch
*/
class SamplePersonRepositoryImpl implements SamplePersonRepositoryCustom {
@Override
public int returnOne() {
return 1;
}
}
| xindongzhang/spring-data-solr | src/test/java/org/springframework/data/solr/repository/cdi/SamplePersonRepositoryImpl.java | Java | apache-2.0 | 854 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.watcher.AckWatchRequest;
import org.elasticsearch.client.watcher.AckWatchResponse;
import org.elasticsearch.client.watcher.ActionStatus;
import org.elasticsearch.client.watcher.ActionStatus.AckStatus;
import org.elasticsearch.client.watcher.ActivateWatchRequest;
import org.elasticsearch.client.watcher.ActivateWatchResponse;
import org.elasticsearch.client.watcher.DeactivateWatchRequest;
import org.elasticsearch.client.watcher.DeactivateWatchResponse;
import org.elasticsearch.client.watcher.DeleteWatchRequest;
import org.elasticsearch.client.watcher.DeleteWatchResponse;
import org.elasticsearch.client.watcher.ExecuteWatchRequest;
import org.elasticsearch.client.watcher.ExecuteWatchResponse;
import org.elasticsearch.client.watcher.PutWatchRequest;
import org.elasticsearch.client.watcher.PutWatchResponse;
import org.elasticsearch.client.watcher.StartWatchServiceRequest;
import org.elasticsearch.client.watcher.StopWatchServiceRequest;
import org.elasticsearch.client.watcher.WatcherState;
import org.elasticsearch.client.watcher.WatcherStatsRequest;
import org.elasticsearch.client.watcher.WatcherStatsResponse;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ObjectPath;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import java.util.Map;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.not;
public class WatcherIT extends ESRestHighLevelClientTestCase {
public void testStartWatchService() throws Exception {
AcknowledgedResponse response =
highLevelClient().watcher().startWatchService(new StartWatchServiceRequest(), RequestOptions.DEFAULT);
assertTrue(response.isAcknowledged());
WatcherStatsResponse stats = highLevelClient().watcher().watcherStats(new WatcherStatsRequest(), RequestOptions.DEFAULT);
assertFalse(stats.getWatcherMetaData().manuallyStopped());
assertThat(stats.getNodes(), not(empty()));
for(WatcherStatsResponse.Node node : stats.getNodes()) {
assertEquals(WatcherState.STARTED, node.getWatcherState());
}
}
public void testStopWatchService() throws Exception {
AcknowledgedResponse response =
highLevelClient().watcher().stopWatchService(new StopWatchServiceRequest(), RequestOptions.DEFAULT);
assertTrue(response.isAcknowledged());
WatcherStatsResponse stats = highLevelClient().watcher().watcherStats(new WatcherStatsRequest(), RequestOptions.DEFAULT);
assertTrue(stats.getWatcherMetaData().manuallyStopped());
}
public void testPutWatch() throws Exception {
String watchId = randomAlphaOfLength(10);
PutWatchResponse putWatchResponse = createWatch(watchId);
assertThat(putWatchResponse.isCreated(), is(true));
assertThat(putWatchResponse.getId(), is(watchId));
assertThat(putWatchResponse.getVersion(), is(1L));
}
private static final String WATCH_JSON = "{ \n" +
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
" \"input\": { \"none\": {} },\n" +
" \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" +
"}";
private PutWatchResponse createWatch(String watchId) throws Exception {
BytesReference bytesReference = new BytesArray(WATCH_JSON);
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
return highLevelClient().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
}
public void testDeactivateWatch() throws Exception {
// Deactivate a watch that exists
String watchId = randomAlphaOfLength(10);
createWatch(watchId);
DeactivateWatchResponse response = highLevelClient().watcher().deactivateWatch(
new DeactivateWatchRequest(watchId), RequestOptions.DEFAULT);
assertThat(response.getStatus().state().isActive(), is(false));
}
public void testDeactivateWatch404() throws Exception {
// Deactivate a watch that does not exist
String watchId = randomAlphaOfLength(10);
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
() -> highLevelClient().watcher().deactivateWatch(new DeactivateWatchRequest(watchId), RequestOptions.DEFAULT));
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
public void testDeleteWatch() throws Exception {
// delete watch that exists
{
String watchId = randomAlphaOfLength(10);
createWatch(watchId);
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
RequestOptions.DEFAULT);
assertThat(deleteWatchResponse.getId(), is(watchId));
assertThat(deleteWatchResponse.getVersion(), is(2L));
assertThat(deleteWatchResponse.isFound(), is(true));
}
// delete watch that does not exist
{
String watchId = randomAlphaOfLength(10);
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
RequestOptions.DEFAULT);
assertThat(deleteWatchResponse.getId(), is(watchId));
assertThat(deleteWatchResponse.getVersion(), is(1L));
assertThat(deleteWatchResponse.isFound(), is(false));
}
}
public void testAckWatch() throws Exception {
String watchId = randomAlphaOfLength(10);
String actionId = "logme";
PutWatchResponse putWatchResponse = createWatch(watchId);
assertThat(putWatchResponse.isCreated(), is(true));
AckWatchResponse response = highLevelClient().watcher().ackWatch(
new AckWatchRequest(watchId, actionId), RequestOptions.DEFAULT);
ActionStatus actionStatus = response.getStatus().actionStatus(actionId);
assertEquals(AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION, actionStatus.ackStatus().state());
// TODO: use the high-level REST client here once it supports 'execute watch'.
Request executeWatchRequest = new Request("POST", "_xpack/watcher/watch/" + watchId + "/_execute");
executeWatchRequest.setJsonEntity("{ \"record_execution\": true }");
Response executeResponse = client().performRequest(executeWatchRequest);
assertEquals(RestStatus.OK.getStatus(), executeResponse.getStatusLine().getStatusCode());
response = highLevelClient().watcher().ackWatch(
new AckWatchRequest(watchId, actionId), RequestOptions.DEFAULT);
actionStatus = response.getStatus().actionStatus(actionId);
assertEquals(AckStatus.State.ACKED, actionStatus.ackStatus().state());
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
() -> highLevelClient().watcher().ackWatch(
new AckWatchRequest("nonexistent"), RequestOptions.DEFAULT));
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
public void testActivateWatchThatExists() throws Exception {
String watchId = randomAlphaOfLength(10);
createWatch(watchId);
ActivateWatchResponse activateWatchResponse1 = highLevelClient().watcher().activateWatch(new ActivateWatchRequest(watchId),
RequestOptions.DEFAULT);
assertThat(activateWatchResponse1.getStatus().state().isActive(), is(true));
ActivateWatchResponse activateWatchResponse2 = highLevelClient().watcher().activateWatch(new ActivateWatchRequest(watchId),
RequestOptions.DEFAULT);
assertThat(activateWatchResponse2.getStatus().state().isActive(), is(true));
assertThat(activateWatchResponse1.getStatus().state().getTimestamp(),
lessThan(activateWatchResponse2.getStatus().state().getTimestamp()));
}
public void testActivateWatchThatDoesNotExist() throws Exception {
String watchId = randomAlphaOfLength(10);
// exception when activating a not existing watcher
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
highLevelClient().watcher().activateWatch(new ActivateWatchRequest(watchId), RequestOptions.DEFAULT));
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
public void testExecuteWatchById() throws Exception {
String watchId = randomAlphaOfLength(10);
createWatch(watchId);
ExecuteWatchResponse response = highLevelClient().watcher()
.executeWatch(ExecuteWatchRequest.byId(watchId), RequestOptions.DEFAULT);
assertThat(response.getRecordId(), containsString(watchId));
Map<String, Object> source = response.getRecordAsMap();
assertThat(ObjectPath.eval("trigger_event.type", source), is("manual"));
}
public void testExecuteWatchThatDoesNotExist() throws Exception {
String watchId = randomAlphaOfLength(10);
// exception when activating a not existing watcher
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
highLevelClient().watcher().executeWatch(ExecuteWatchRequest.byId(watchId), RequestOptions.DEFAULT));
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
public void testExecuteInlineWatch() throws Exception {
ExecuteWatchResponse response = highLevelClient().watcher()
.executeWatch(ExecuteWatchRequest.inline(WATCH_JSON), RequestOptions.DEFAULT);
assertThat(response.getRecordId(), containsString("_inlined_"));
Map<String, Object> source = response.getRecordAsMap();
assertThat(ObjectPath.eval("trigger_event.type", source), is("manual"));
}
public void testWatcherStatsMetrics() throws Exception {
boolean includeCurrent = randomBoolean();
boolean includeQueued = randomBoolean();
WatcherStatsRequest request = new WatcherStatsRequest(includeCurrent, includeQueued);
WatcherStatsResponse stats = highLevelClient().watcher().watcherStats(request, RequestOptions.DEFAULT);
assertThat(stats.getNodes(), not(empty()));
assertEquals(includeCurrent, stats.getNodes().get(0).getSnapshots() != null);
assertEquals(includeQueued, stats.getNodes().get(0).getQueuedWatches() != null);
}
}
| strapdata/elassandra | client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java | Java | apache-2.0 | 11,678 |
// Copyright (c) Microsoft Open Technologies, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
namespace Roslyn.Utilities
{
internal partial class SpecializedCollections
{
private partial class Empty
{
internal class Set<T> : Collection<T>, ISet<T>
{
public static readonly new ISet<T> Instance = new Set<T>();
protected Set()
{
}
public new bool Add(T item)
{
throw new NotImplementedException();
}
public void ExceptWith(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public void IntersectWith(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool IsProperSubsetOf(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool IsProperSupersetOf(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool IsSubsetOf(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool IsSupersetOf(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool Overlaps(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public bool SetEquals(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public void SymmetricExceptWith(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public void UnionWith(IEnumerable<T> other)
{
throw new NotImplementedException();
}
public new System.Collections.IEnumerator GetEnumerator()
{
return Set<T>.Instance.GetEnumerator();
}
}
}
}
} | binsys/roslyn_java | Src/Compilers/Core/Source/InternalUtilities/SpecializedCollections.Empty.Set.cs | C# | apache-2.0 | 2,500 |
package com.planet_ink.coffee_mud.Behaviors;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2004-2015 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class CombatAssister extends StdBehavior
{
@Override public String ID(){return "CombatAssister";}
@Override
public String accountForYourself()
{
if(getParms().length()>0)
return "protecting of "+CMLib.masking().maskDesc(getParms(),true);
else
return "protecting of others";
}
@Override
public void executeMsg(Environmental affecting, CMMsg msg)
{
super.executeMsg(affecting,msg);
if((msg.target()==null)||(!(msg.target() instanceof MOB)))
return;
final MOB mob=msg.source();
final MOB monster=(MOB)affecting;
final MOB target=(MOB)msg.target();
if((mob!=monster)
&&(target!=monster)
&&(mob!=target)
&&(CMath.bset(msg.targetMajor(),CMMsg.MASK_MALICIOUS))
&&(!monster.isInCombat())
&&(CMLib.flags().canBeSeenBy(mob,monster))
&&(CMLib.flags().canBeSeenBy(target,monster))
&&(CMLib.masking().maskCheck(getParms(),target,false)))
Aggressive.startFight(monster,mob,true,false,null);
}
}
| MaxRau/CoffeeMud | com/planet_ink/coffee_mud/Behaviors/CombatAssister.java | Java | apache-2.0 | 2,475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor;
import java.util.List;
import org.apache.camel.Navigate;
import org.apache.camel.Processor;
import org.apache.camel.Route;
import org.apache.camel.model.LoadBalanceDefinition;
import org.apache.camel.model.ProcessorDefinition;
import org.apache.camel.model.RouteDefinition;
import org.apache.camel.model.SendDefinition;
import org.apache.camel.model.loadbalancer.RandomLoadBalancerDefinition;
import org.apache.camel.processor.channel.DefaultChannel;
import org.junit.Test;
/**
* A crude unit test to navigate the route and build a Java DSL from the route
* definition
*/
public class RandomLoadBalanceJavaDSLBuilderTest extends RandomLoadBalanceTest {
@Test
public void testNavigateRouteAsJavaDSLWithNavigate() throws Exception {
// this one navigate using the runtime route using the
// Navigate<Processor>
StringBuilder sb = new StringBuilder();
Route route = context.getRoutes().get(0);
// the start of the route
sb.append("from(\"" + route.getEndpoint().getEndpointUri() + "\")");
// navigate the route and add Java DSL to the sb
Navigate<Processor> nav = route.navigate();
navigateRoute(nav, sb);
// output the Java DSL
assertEquals("from(\"direct://start\").loadBalance().random().to(\"mock://x\").to(\"mock://y\").to(\"mock://z\")", sb.toString());
}
@Test
public void testNavigateRouteAsJavaDSL() throws Exception {
// this one navigate using the route definition
StringBuilder sb = new StringBuilder();
RouteDefinition route = context.getRouteDefinitions().get(0);
// the start of the route
sb.append("from(\"" + route.getInput().getUri() + "\")");
// navigate the route and add Java DSL to the sb
navigateDefinition(route, sb);
// output the Java DSL
assertEquals("from(\"direct://start\").loadBalance().random().to(\"mock://x\").to(\"mock://y\").to(\"mock://z\")", sb.toString());
}
private void navigateRoute(Navigate<Processor> nav, StringBuilder sb) {
if (nav instanceof Pipeline) {
nav = (Navigate<Processor>) nav.next().get(0);
}
if (!nav.hasNext()) {
return;
}
if (nav instanceof DefaultChannel) {
DefaultChannel channel = (DefaultChannel)nav;
ProcessorDefinition<?> def = (ProcessorDefinition<?>)channel.getProcessorDefinition();
navigateDefinition(def, sb);
}
}
private void navigateDefinition(ProcessorDefinition<?> def, StringBuilder sb) {
// must do this ugly cast to avoid compiler error on HP-UX
ProcessorDefinition<?> defn = (ProcessorDefinition<?>)def;
if (defn instanceof LoadBalanceDefinition) {
sb.append(".loadBalance()");
LoadBalanceDefinition lbd = (LoadBalanceDefinition)defn;
if (lbd.getLoadBalancerType() instanceof RandomLoadBalancerDefinition) {
sb.append(".random()");
}
}
if (defn instanceof SendDefinition) {
SendDefinition<?> send = (SendDefinition<?>)defn;
sb.append(".to(\"" + send.getUri() + "\")");
}
List<ProcessorDefinition<?>> children = defn.getOutputs();
if (children == null || children.isEmpty()) {
return;
}
for (ProcessorDefinition<?> child : children) {
navigateDefinition(child, sb);
}
}
}
| DariusX/camel | core/camel-core/src/test/java/org/apache/camel/processor/RandomLoadBalanceJavaDSLBuilderTest.java | Java | apache-2.0 | 4,322 |
//// [emitSuperCallBeforeEmitParameterPropertyDeclaration1.ts]
class A {
blub = 6;
}
class B extends A {
constructor(public x: number) {
"use strict";
'someStringForEgngInject';
super()
}
}
//// [emitSuperCallBeforeEmitParameterPropertyDeclaration1.js]
var __extends = (this && this.__extends) || function (d, b) {
for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p];
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
var A = (function () {
function A() {
this.blub = 6;
}
return A;
}());
var B = (function (_super) {
__extends(B, _super);
function B(x) {
"use strict";
'someStringForEgngInject';
var _this = _super.call(this) || this;
_this.x = x;
return _this;
}
return B;
}(A));
| jeremyepling/TypeScript | tests/baselines/reference/emitSuperCallBeforeEmitParameterPropertyDeclaration1.js | JavaScript | apache-2.0 | 917 |
/*!
* Copyright 2014 Apereo Foundation (AF) Licensed under the
* Educational Community License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
var passport = require('passport');
var ConfigAPI = require('oae-config');
var OAE = require('oae-util/lib/oae');
var AuthenticationConfig = ConfigAPI.config('oae-authentication');
var AuthenticationConstants = require('oae-authentication/lib/constants').AuthenticationConstants;
var AuthenticationUtil = require('oae-authentication/lib/util');
/**
* @REST postAuthGoogle
*
* Log in using Google authentication
*
* @Server tenant
* @Method POST
* @Path /auth/google
* @Return {void}
* @HttpResponse 302 The user will be redirected to Google where they can log in
* @HttpResponse 400 The authentication strategy is disabled for this tenant
*/
OAE.tenantRouter.on('post', '/api/auth/google', function(req, res, next) {
// Get the ID under which we registered this strategy for this tenant
var strategyId = AuthenticationUtil.getStrategyId(req.tenant, AuthenticationConstants.providers.GOOGLE);
var options = {
// To avoid authenticating with the wrong Google account, we give the user the opportunity to select or add
// the correct account during the OAuth authentication cycle
'prompt': 'select_account'
};
// If there's only one allowed domain, add that to options as the hosted domain
// @see https://developers.google.com/identity/protocols/OpenIDConnect#authenticationuriparameters
var domains = AuthenticationConfig.getValue(req.tenant.alias, AuthenticationConstants.providers.GOOGLE, 'domains').split(',');
if (domains && domains.length === 1) {
options['hd'] = domains[0];
}
// Perform the initial authentication step
AuthenticationUtil.handleExternalSetup(strategyId, options, req, res, next);
});
/**
* @REST getAuthGoogleCallback
*
* Callback URL after the user has logged in using Google authentication
*
* @Api private
* @Server tenant
* @Method POST
* @Path /auth/google/callback
* @Return {void}
*/
OAE.tenantRouter.on('get', '/api/auth/google/callback', function(req, res, next) {
// Get the ID under which we registered this strategy for this tenant
var strategyId = AuthenticationUtil.getStrategyId(req.tenant, AuthenticationConstants.providers.GOOGLE);
// Log the user in
AuthenticationUtil.handleExternalCallback(strategyId, req, res, next);
});
| nicolaasmatthijs/Hilary | node_modules/oae-authentication/lib/strategies/google/rest.js | JavaScript | apache-2.0 | 2,999 |
package integration
import (
"testing"
kapi "k8s.io/kubernetes/pkg/api"
kapierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"github.com/openshift/origin/pkg/client"
policy "github.com/openshift/origin/pkg/cmd/admin/policy"
configapi "github.com/openshift/origin/pkg/cmd/server/api"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
deployapi "github.com/openshift/origin/pkg/deploy/api"
pluginapi "github.com/openshift/origin/pkg/scheduler/admission/podnodeconstraints/api"
testutil "github.com/openshift/origin/test/util"
testserver "github.com/openshift/origin/test/util/server"
)
func TestPodNodeConstraintsAdmissionPluginSetNodeNameClusterAdmin(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
oclient, kclientset := setupClusterAdminPodNodeConstraintsTest(t, &pluginapi.PodNodeConstraintsConfig{})
testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, cluster admin", kclientset, oclient, "nodename.example.com", nil, false)
}
func TestPodNodeConstraintsAdmissionPluginSetNodeNameNonAdmin(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
config := &pluginapi.PodNodeConstraintsConfig{}
oclient, kclientset := setupUserPodNodeConstraintsTest(t, config, "derples")
testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, regular user", kclientset, oclient, "nodename.example.com", nil, true)
}
func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorClusterAdmin(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
config := &pluginapi.PodNodeConstraintsConfig{
NodeSelectorLabelBlacklist: []string{"hostname"},
}
oclient, kclientset := setupClusterAdminPodNodeConstraintsTest(t, config)
testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, cluster admin", kclientset, oclient, "", map[string]string{"hostname": "foo"}, false)
}
func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorNonAdmin(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
config := &pluginapi.PodNodeConstraintsConfig{
NodeSelectorLabelBlacklist: []string{"hostname"},
}
oclient, kclientset := setupUserPodNodeConstraintsTest(t, config, "derples")
testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, regular user", kclientset, oclient, "", map[string]string{"hostname": "foo"}, true)
}
func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclientset.Clientset) {
testutil.RequireEtcd(t)
masterConfig, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatalf("error creating config: %v", err)
}
cfg := map[string]configapi.AdmissionPluginConfig{
"PodNodeConstraints": {
Configuration: pluginConfig,
},
}
masterConfig.AdmissionConfig.PluginConfig = cfg
masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg
kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
if err != nil {
t.Fatalf("error starting server: %v", err)
}
kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
if err != nil {
t.Fatalf("error getting client: %v", err)
}
openShiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
if err != nil {
t.Fatalf("error getting client: %v", err)
}
ns := &kapi.Namespace{}
ns.Name = testutil.Namespace()
_, err = kubeClientset.Core().Namespaces().Create(ns)
if err != nil {
t.Fatalf("error creating namespace: %v", err)
}
if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
t.Fatalf("unexpected error: %v", err)
}
return openShiftClient, kubeClientset
}
func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclientset.Clientset) {
testutil.RequireEtcd(t)
masterConfig, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatalf("error creating config: %v", err)
}
cfg := map[string]configapi.AdmissionPluginConfig{
"PodNodeConstraints": {
Configuration: pluginConfig,
},
}
masterConfig.AdmissionConfig.PluginConfig = cfg
masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg
kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
if err != nil {
t.Fatalf("error starting server: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
userClient, userkubeClientset, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user)
if err != nil {
t.Fatalf("error getting user/kube client: %v", err)
}
kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
if err != nil {
t.Fatalf("error getting kube client: %v", err)
}
ns := &kapi.Namespace{}
ns.Name = testutil.Namespace()
_, err = kubeClientset.Core().Namespaces().Create(ns)
if err != nil {
t.Fatalf("error creating namespace: %v", err)
}
if err := testserver.WaitForServiceAccounts(kubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
addUser := &policy.RoleModificationOptions{
RoleNamespace: ns.Name,
RoleName: bootstrappolicy.AdminRoleName,
RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient),
Users: []string{user},
}
if err := addUser.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
return userClient, userkubeClientset
}
func testPodNodeConstraintsPodSpec(nodeName string, nodeSelector map[string]string) kapi.PodSpec {
spec := kapi.PodSpec{}
spec.RestartPolicy = kapi.RestartPolicyAlways
spec.NodeName = nodeName
spec.NodeSelector = nodeSelector
spec.Containers = []kapi.Container{
{
Name: "container",
Image: "test/image",
},
}
return spec
}
func testPodNodeConstraintsPod(nodeName string, nodeSelector map[string]string) *kapi.Pod {
pod := &kapi.Pod{}
pod.Name = "testpod"
pod.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
return pod
}
func testPodNodeConstraintsReplicationController(nodeName string, nodeSelector map[string]string) *kapi.ReplicationController {
rc := &kapi.ReplicationController{}
rc.Name = "testrc"
rc.Spec.Replicas = 1
rc.Spec.Selector = map[string]string{"foo": "bar"}
rc.Spec.Template = &kapi.PodTemplateSpec{}
rc.Spec.Template.Labels = map[string]string{"foo": "bar"}
rc.Spec.Template.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
return rc
}
func testPodNodeConstraintsDeployment(nodeName string, nodeSelector map[string]string) *extensions.Deployment {
d := &extensions.Deployment{}
d.Name = "testdeployment"
d.Spec.Replicas = 1
d.Spec.Template.Labels = map[string]string{"foo": "bar"}
d.Spec.Template.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
d.Spec.Selector = &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
}
return d
}
func testPodNodeConstraintsReplicaSet(nodeName string, nodeSelector map[string]string) *extensions.ReplicaSet {
rs := &extensions.ReplicaSet{}
rs.Name = "testrs"
rs.Spec.Replicas = 1
rs.Spec.Template = kapi.PodTemplateSpec{}
rs.Spec.Template.Labels = map[string]string{"foo": "bar"}
rs.Spec.Template.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
rs.Spec.Selector = &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
}
return rs
}
func testPodNodeConstraintsJob(nodeName string, nodeSelector map[string]string) *batch.Job {
job := &batch.Job{}
job.Name = "testjob"
job.Spec.Template.Labels = map[string]string{"foo": "bar"}
job.Spec.Template.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
job.Spec.Template.Spec.RestartPolicy = kapi.RestartPolicyNever
// Matching selector is now generated automatically
// job.Spec.Selector = ...
return job
}
func testPodNodeConstraintsDeploymentConfig(nodeName string, nodeSelector map[string]string) *deployapi.DeploymentConfig {
dc := &deployapi.DeploymentConfig{}
dc.Name = "testdc"
dc.Spec.Replicas = 1
dc.Spec.Template = &kapi.PodTemplateSpec{}
dc.Spec.Template.Labels = map[string]string{"foo": "bar"}
dc.Spec.Template.Spec = testPodNodeConstraintsPodSpec(nodeName, nodeSelector)
dc.Spec.Selector = map[string]string{"foo": "bar"}
return dc
}
// testPodNodeConstraintsObjectCreationWithPodTemplate attempts to create different object types that contain pod templates
// using the passed in nodeName and nodeSelector. It will use the expectError flag to determine if an error should be returned or not
func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclientset kclientset.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) {
checkForbiddenErr := func(objType string, err error) {
if err == nil && expectError {
t.Errorf("%s (%s): expected forbidden error but did not receive one", name, objType)
return
}
if err != nil && !expectError {
t.Errorf("%s (%s): got error but did not expect one: %v", name, objType, err)
return
}
if err != nil && expectError && !kapierrors.IsForbidden(err) {
t.Errorf("%s (%s): did not get an expected forbidden error: %v", name, objType, err)
return
}
}
// Pod
pod := testPodNodeConstraintsPod(nodeName, nodeSelector)
_, err := kclientset.Core().Pods(testutil.Namespace()).Create(pod)
checkForbiddenErr("pod", err)
// ReplicationController
rc := testPodNodeConstraintsReplicationController(nodeName, nodeSelector)
_, err = kclientset.Core().ReplicationControllers(testutil.Namespace()).Create(rc)
checkForbiddenErr("rc", err)
// TODO: Enable when the deployments endpoint is supported in Origin
// Deployment
// d := testPodNodeConstraintsDeployment(nodeName, nodeSelector)
// _, err = kclientset.Extensions().Deployments(testutil.Namespace()).Create(d)
// checkForbiddenErr("deployment", err)
// ReplicaSet
rs := testPodNodeConstraintsReplicaSet(nodeName, nodeSelector)
_, err = kclientset.Extensions().ReplicaSets(testutil.Namespace()).Create(rs)
checkForbiddenErr("replicaset", err)
// Job
job := testPodNodeConstraintsJob(nodeName, nodeSelector)
_, err = kclientset.Batch().Jobs(testutil.Namespace()).Create(job)
checkForbiddenErr("job", err)
// DeploymentConfig
dc := testPodNodeConstraintsDeploymentConfig(nodeName, nodeSelector)
_, err = client.DeploymentConfigs(testutil.Namespace()).Create(dc)
checkForbiddenErr("dc", err)
}
| chmouel/origin | test/integration/pod_node_constraints_test.go | GO | apache-2.0 | 10,828 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.storagegateway.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import java.math.*;
import java.nio.ByteBuffer;
import com.amazonaws.services.storagegateway.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* TapeArchive JSON Unmarshaller
*/
public class TapeArchiveJsonUnmarshaller implements
Unmarshaller<TapeArchive, JsonUnmarshallerContext> {
public TapeArchive unmarshall(JsonUnmarshallerContext context)
throws Exception {
TapeArchive tapeArchive = new TapeArchive();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL)
return null;
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("TapeARN", targetDepth)) {
context.nextToken();
tapeArchive.setTapeARN(context
.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("TapeBarcode", targetDepth)) {
context.nextToken();
tapeArchive.setTapeBarcode(context.getUnmarshaller(
String.class).unmarshall(context));
}
if (context.testExpression("TapeSizeInBytes", targetDepth)) {
context.nextToken();
tapeArchive.setTapeSizeInBytes(context.getUnmarshaller(
Long.class).unmarshall(context));
}
if (context.testExpression("CompletionTime", targetDepth)) {
context.nextToken();
tapeArchive.setCompletionTime(context.getUnmarshaller(
java.util.Date.class).unmarshall(context));
}
if (context.testExpression("RetrievedTo", targetDepth)) {
context.nextToken();
tapeArchive.setRetrievedTo(context.getUnmarshaller(
String.class).unmarshall(context));
}
if (context.testExpression("TapeStatus", targetDepth)) {
context.nextToken();
tapeArchive.setTapeStatus(context.getUnmarshaller(
String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null
|| context.getLastParsedParentElement().equals(
currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return tapeArchive;
}
private static TapeArchiveJsonUnmarshaller instance;
public static TapeArchiveJsonUnmarshaller getInstance() {
if (instance == null)
instance = new TapeArchiveJsonUnmarshaller();
return instance;
}
}
| flofreud/aws-sdk-java | aws-java-sdk-storagegateway/src/main/java/com/amazonaws/services/storagegateway/model/transform/TapeArchiveJsonUnmarshaller.java | Java | apache-2.0 | 4,151 |
<?php
global $addslashes;
//escape all strings
$rep_name = AT_print($this->rep_name, 'input.text');
$rep_title = AT_print($this->rep_title, 'input.text');
$rep_phone = AT_print($this->rep_phone, 'input.text');
$rep_email = AT_print($this->rep_email, 'input.text');
$rep_address = AT_print($this->rep_address, 'input.text');
?>
<div class="headingbox"><h3><?php if($_GET['id']){echo _AT('edit_representation');}else{echo _AT('add_new_representation');}?></h3></div>
<div class="contentbox">
<form method="post" action="<?php echo url_rewrite(AT_SOCIAL_BASENAME.'edit_profile.php'); ?>">
<dl id="public-profile">
<dt><label for="rep_name"><?php echo _AT('name'); ?></label></dt>
<dd><input type="text" id="rep_name" name="rep_name" value="<?php echo $rep_name; ?>" /></dd>
<dt><label for="rep_title"><?php echo _AT('title'); ?></label></dt>
<dd><input type="text" id="rep_title" name="rep_title" value="<?php echo $rep_title; ?>" /></dd>
<dt><label for="rep_phone"><?php echo _AT('phone'); ?></label></dt>
<dd><input type="text" id="rep_phone" name="rep_phone" value="<?php echo $rep_phone; ?>" /></dd>
<dt><label for="rep_email"><?php echo _AT('email'); ?></label></dt>
<dd><input type="text" id="rep_email" name="rep_email" value="<?php echo $rep_email; ?>" /></dd>
<dt><label for="rep_address"><?php echo _AT('street_address'); ?></label></dt>
<dd><textarea name="rep_address" id="rep_address" cols="40" rows="5"><?php echo $rep_address; ?></textarea></dd>
</dl>
<input type="hidden" name="id" value="<?php echo $this->id; ?>" />
<?php if($_GET['id']){ ?>
<input type="hidden" name="edit" value="representation" />
<?php }else { ?>
<input type="hidden" name="add" value="representation" />
<?php } ?>
<input type="submit" name="submit" class="button" value="<?php echo _AT('save'); ?>" />
<input type="submit" name="cancel" class="button" value="<?php echo _AT('cancel'); ?>" />
</form>
</div> | CaviereFabien/Test | ATutor/themes/themes/default/social/edit_profile/edit_representation.tmpl.php | PHP | apache-2.0 | 2,115 |
/*
* Copyright 2011 <a href="mailto:lincolnbaxter@gmail.com">Lincoln Baxter, III</a>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ocpsoft.rewrite.transform.markup;
import java.util.Arrays;
import java.util.List;
import org.jruby.embed.ScriptingContainer;
import org.ocpsoft.rewrite.transform.Transformer;
import org.ocpsoft.rewrite.transform.markup.impl.JRubyTransformer;
/**
* A {@link Transformer} that translates SASS files into CSS.
*
* @author Christian Kaltepoth
*/
public class Sass extends JRubyTransformer<Sass>
{
private static final String SCRIPT = "require 'sass'\n" +
"engine = Sass::Engine.new(input, :syntax => :scss, :cache => false)\n" +
"engine.render\n";
/**
* Create a {@link Transformer} that compiles SASS files into CSS.
*/
public static Sass compiler()
{
return new Sass();
}
protected Sass()
{}
@Override
public List<String> getLoadPaths()
{
return Arrays.asList("ruby/sass/lib");
}
@Override
public Object runScript(ScriptingContainer container)
{
return container.runScriptlet(SCRIPT);
}
@Override
public Sass self()
{
return this;
}
@Override
protected void prepareContainer(ScriptingContainer container)
{}
@Override
protected Class<Sass> getTransformerType()
{
return Sass.class;
}
}
| ocpsoft/rewrite | transform-markup/src/main/java/org/ocpsoft/rewrite/transform/markup/Sass.java | Java | apache-2.0 | 1,903 |
/*
* Copyright 2002-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sf.json.filters;
import net.sf.json.util.PropertyFilter;
/**
* @author Andres Almiray <aalmiray@users.sourceforge.net>
*/
public class FalsePropertyFilter implements PropertyFilter {
public boolean apply( Object source, String name, Object value ) {
return false;
}
} | flytreeleft/Json-lib | src/main/java/net/sf/json/filters/FalsePropertyFilter.java | Java | apache-2.0 | 917 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.query.groupby.orderby;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Maps;
import com.metamx.common.guava.Sequence;
import com.metamx.common.guava.Sequences;
import io.druid.data.input.MapBasedRow;
import io.druid.data.input.Row;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.LongSumAggregatorFactory;
import io.druid.query.aggregation.PostAggregator;
import io.druid.query.aggregation.post.ArithmeticPostAggregator;
import io.druid.query.aggregation.post.ConstantPostAggregator;
import io.druid.query.dimension.DefaultDimensionSpec;
import io.druid.query.dimension.DimensionSpec;
import io.druid.query.ordering.StringComparators;
import io.druid.segment.TestHelper;
import org.joda.time.DateTime;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
*/
public class DefaultLimitSpecTest
{
private final List<Row> testRowsList;
private final Sequence<Row> testRowsSequence;
public DefaultLimitSpecTest()
{
testRowsList = ImmutableList.of(
createRow("2011-04-01", "k1", 10.0, "k2", 1L, "k3", 2L),
createRow("2011-04-01", "k1", 20.0, "k2", 3L, "k3", 1L),
createRow("2011-04-01", "k1", 9.0, "k2", 2L, "k3", 3L)
);
testRowsSequence = Sequences.simple(testRowsList);
}
@Test
public void testSerde() throws Exception
{
ObjectMapper mapper = TestHelper.getObjectMapper();
//defaults
String json = "{\"type\": \"default\"}";
DefaultLimitSpec spec = mapper.readValue(
mapper.writeValueAsString(mapper.readValue(json, DefaultLimitSpec.class)),
DefaultLimitSpec.class
);
Assert.assertEquals(
new DefaultLimitSpec(null, null),
spec
);
//non-defaults
json = "{\n"
+ " \"type\":\"default\",\n"
+ " \"columns\":[{\"dimension\":\"d\",\"direction\":\"DESCENDING\", \"dimensionOrder\":\"numeric\"}],\n"
+ " \"limit\":10\n"
+ "}";
spec = mapper.readValue(
mapper.writeValueAsString(mapper.readValue(json, DefaultLimitSpec.class)),
DefaultLimitSpec.class
);
Assert.assertEquals(
new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("d", OrderByColumnSpec.Direction.DESCENDING,
StringComparators.NUMERIC)), 10),
spec
);
json = "{\n"
+ " \"type\":\"default\",\n"
+ " \"columns\":[{\"dimension\":\"d\",\"direction\":\"DES\", \"dimensionOrder\":\"numeric\"}],\n"
+ " \"limit\":10\n"
+ "}";
spec = mapper.readValue(
mapper.writeValueAsString(mapper.readValue(json, DefaultLimitSpec.class)),
DefaultLimitSpec.class
);
Assert.assertEquals(
new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("d", OrderByColumnSpec.Direction.DESCENDING,
StringComparators.NUMERIC)), 10),
spec
);
json = "{\n"
+ " \"type\":\"default\",\n"
+ " \"columns\":[{\"dimension\":\"d\"}],\n"
+ " \"limit\":10\n"
+ "}";
spec = mapper.readValue(
mapper.writeValueAsString(mapper.readValue(json, DefaultLimitSpec.class)),
DefaultLimitSpec.class
);
Assert.assertEquals(
new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("d", OrderByColumnSpec.Direction.ASCENDING,
StringComparators.LEXICOGRAPHIC)), 10),
spec
);
json = "{\n"
+ " \"type\":\"default\",\n"
+ " \"columns\":[\"d\"],\n"
+ " \"limit\":10\n"
+ "}";
spec = mapper.readValue(
mapper.writeValueAsString(mapper.readValue(json, DefaultLimitSpec.class)),
DefaultLimitSpec.class
);
Assert.assertEquals(
new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("d", OrderByColumnSpec.Direction.ASCENDING,
StringComparators.LEXICOGRAPHIC)), 10),
spec
);
}
@Test
public void testBuildSimple()
{
DefaultLimitSpec limitSpec = new DefaultLimitSpec(
ImmutableList.<OrderByColumnSpec>of(),
2
);
Function<Sequence<Row>, Sequence<Row>> limitFn = limitSpec.build(
ImmutableList.<DimensionSpec>of(),
ImmutableList.<AggregatorFactory>of(),
ImmutableList.<PostAggregator>of()
);
Assert.assertEquals(
ImmutableList.of(testRowsList.get(0), testRowsList.get(1)),
Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>())
);
}
@Test
public void testSortDimensionDescending()
{
DefaultLimitSpec limitSpec = new DefaultLimitSpec(
ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.DESCENDING)),
2
);
Function<Sequence<Row>, Sequence<Row>> limitFn = limitSpec.build(
ImmutableList.<DimensionSpec>of(new DefaultDimensionSpec("k1", "k1")),
ImmutableList.<AggregatorFactory>of(),
ImmutableList.<PostAggregator>of()
);
// Note: This test encodes the fact that limitSpec sorts numbers like strings; we might want to change this
// in the future.
Assert.assertEquals(
ImmutableList.of(testRowsList.get(2), testRowsList.get(1)),
Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>())
);
}
@Test
public void testBuildWithExplicitOrder()
{
DefaultLimitSpec limitSpec = new DefaultLimitSpec(
ImmutableList.of(
new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING)
),
2
);
Function<Sequence<Row>, Sequence<Row>> limitFn = limitSpec.build(
ImmutableList.<DimensionSpec>of(
new DefaultDimensionSpec("k1", "k1")
),
ImmutableList.<AggregatorFactory>of(
new LongSumAggregatorFactory("k2", "k2")
),
ImmutableList.<PostAggregator>of(
new ConstantPostAggregator("k3", 1L)
)
);
Assert.assertEquals(
ImmutableList.of(testRowsList.get(0), testRowsList.get(1)),
Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>())
);
// if there is an aggregator with same name then that is used to build ordering
limitFn = limitSpec.build(
ImmutableList.<DimensionSpec>of(
new DefaultDimensionSpec("k1", "k1")
),
ImmutableList.<AggregatorFactory>of(
new LongSumAggregatorFactory("k1", "k1")
),
ImmutableList.<PostAggregator>of(
new ConstantPostAggregator("k3", 1L)
)
);
Assert.assertEquals(
ImmutableList.of(testRowsList.get(2), testRowsList.get(0)),
Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>())
);
// if there is a post-aggregator with same name then that is used to build ordering
limitFn = limitSpec.build(
ImmutableList.<DimensionSpec>of(
new DefaultDimensionSpec("k1", "k1")
),
ImmutableList.<AggregatorFactory>of(
new LongSumAggregatorFactory("k2", "k2")
),
ImmutableList.<PostAggregator>of(
new ArithmeticPostAggregator(
"k1",
"+",
ImmutableList.<PostAggregator>of(
new ConstantPostAggregator("x", 1),
new ConstantPostAggregator("y", 1))
)
)
);
Assert.assertEquals(
ImmutableList.of(testRowsList.get(2), testRowsList.get(0)),
Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>())
);
}
private Row createRow(String timestamp, Object... vals)
{
Preconditions.checkArgument(vals.length % 2 == 0);
Map<String, Object> theVals = Maps.newHashMap();
for (int i = 0; i < vals.length; i += 2) {
theVals.put(vals[i].toString(), vals[i + 1]);
}
DateTime ts = new DateTime(timestamp);
return new MapBasedRow(ts, theVals);
}
}
| Saligia-eva/mobvista_druid | processing/src/test/java/io/druid/query/groupby/orderby/DefaultLimitSpecTest.java | Java | apache-2.0 | 9,169 |
declare namespace compareVersions {
/**
* Allowed arithmetic operators
*/
type CompareOperator = '>' | '>=' | '=' | '<' | '<=';
}
declare const compareVersions: {
/**
* Compare [semver](https://semver.org/) version strings to find greater, equal or lesser.
* This library supports the full semver specification, including comparing versions with different number of digits like `1.0.0`, `1.0`, `1`, and pre-release versions like `1.0.0-alpha`.
* @param firstVersion - First version to compare
* @param secondVersion - Second version to compare
* @returns Numeric value compatible with the [Array.sort(fn) interface](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#Parameters).
*/
(firstVersion: string, secondVersion: string): 1 | 0 | -1;
/**
* Compare [semver](https://semver.org/) version strings using the specified operator.
*
* @param firstVersion First version to compare
* @param secondVersion Second version to compare
* @param operator Allowed arithmetic operator to use
* @returns `true` if the comparison between the firstVersion and the secondVersion satisfies the operator, `false` otherwise.
*
* @example
* ```
* compareVersions.compare('10.1.8', '10.0.4', '>'); // return true
* compareVersions.compare('10.0.1', '10.0.1', '='); // return true
* compareVersions.compare('10.1.1', '10.2.2', '<'); // return true
* compareVersions.compare('10.1.1', '10.2.2', '<='); // return true
* compareVersions.compare('10.1.1', '10.2.2', '>='); // return false
* ```
*/
compare(
firstVersion: string,
secondVersion: string,
operator: compareVersions.CompareOperator
): boolean;
};
export = compareVersions; | ChromeDevTools/devtools-node-modules | third_party/node_modules/compare-versions/index.d.ts | TypeScript | apache-2.0 | 1,755 |
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dfp.axis.v201505.orderservice;
import com.google.api.ads.common.lib.auth.OfflineCredentials;
import com.google.api.ads.common.lib.auth.OfflineCredentials.Api;
import com.google.api.ads.dfp.axis.factory.DfpServices;
import com.google.api.ads.dfp.axis.utils.v201505.StatementBuilder;
import com.google.api.ads.dfp.axis.v201505.Order;
import com.google.api.ads.dfp.axis.v201505.OrderPage;
import com.google.api.ads.dfp.axis.v201505.OrderServiceInterface;
import com.google.api.ads.dfp.lib.client.DfpSession;
import com.google.api.client.auth.oauth2.Credential;
/**
* This example gets all orders. To create orders, run CreateOrders.java.
*
* Credentials and properties in {@code fromFile()} are pulled from the
* "ads.properties" file. See README for more info.
*
* Tags: OrderService.getOrdersByStatement
*
* @author Adam Rogal
*/
public class GetAllOrders {
public static void runExample(DfpServices dfpServices, DfpSession session) throws Exception {
// Get the OrderService.
OrderServiceInterface orderService =
dfpServices.get(session, OrderServiceInterface.class);
// Create a statement to select all orders.
StatementBuilder statementBuilder = new StatementBuilder()
.orderBy("id ASC")
.limit(StatementBuilder.SUGGESTED_PAGE_LIMIT);
// Default for total result set size.
int totalResultSetSize = 0;
do {
// Get orders by statement.
OrderPage page =
orderService.getOrdersByStatement(statementBuilder.toStatement());
if (page.getResults() != null) {
totalResultSetSize = page.getTotalResultSetSize();
int i = page.getStartIndex();
for (Order order : page.getResults()) {
System.out.printf(
"%d) Order with ID \"%d\" and name \"%s\" was found.\n", i++,
order.getId(), order.getName());
}
}
statementBuilder.increaseOffsetBy(StatementBuilder.SUGGESTED_PAGE_LIMIT);
} while (statementBuilder.getOffset() < totalResultSetSize);
System.out.printf("Number of results found: %d\n", totalResultSetSize);
}
public static void main(String[] args) throws Exception {
// Generate a refreshable OAuth2 credential.
Credential oAuth2Credential = new OfflineCredentials.Builder()
.forApi(Api.DFP)
.fromFile()
.build()
.generateCredential();
// Construct a DfpSession.
DfpSession session = new DfpSession.Builder()
.fromFile()
.withOAuth2Credential(oAuth2Credential)
.build();
DfpServices dfpServices = new DfpServices();
runExample(dfpServices, session);
}
}
| stoksey69/googleads-java-lib | examples/dfp_axis/src/main/java/dfp/axis/v201505/orderservice/GetAllOrders.java | Java | apache-2.0 | 3,244 |
# Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file contains helper functions for generating cURL examples
# based on Zulip's OpenAPI definitions, as well as test setup and
# fetching of appropriate parameter values to use when running the
# cURL examples as part of the tools/test-api test suite.
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
do_add_linkifier,
do_add_reaction,
do_add_realm_playground,
do_create_user,
update_user_presence,
)
from zerver.lib.events import do_events_register
from zerver.lib.initial_password import initial_password
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.users import get_api_key
from zerver.models import Client, Message, UserGroup, UserPresence, get_realm, get_user
GENERATOR_FUNCTIONS: Dict[str, Callable[[], Dict[str, object]]] = {}
REGISTERED_GENERATOR_FUNCTIONS: Set[str] = set()
CALLED_GENERATOR_FUNCTIONS: Set[str] = set()
# This is a List rather than just a string in order to make it easier
# to write to it from another module.
AUTHENTICATION_LINE: List[str] = [""]
helpers = ZulipTestCase()
def openapi_param_value_generator(
endpoints: List[str],
) -> Callable[[Callable[[], Dict[str, object]]], Callable[[], Dict[str, object]]]:
"""This decorator is used to register OpenAPI param value genarator functions
with endpoints. Example usage:
@openapi_param_value_generator(["/messages/render:post"])
def ...
"""
def wrapper(generator_func: Callable[[], Dict[str, object]]) -> Callable[[], Dict[str, object]]:
@wraps(generator_func)
def _record_calls_wrapper() -> Dict[str, object]:
CALLED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
return generator_func()
REGISTERED_GENERATOR_FUNCTIONS.add(generator_func.__name__)
for endpoint in endpoints:
GENERATOR_FUNCTIONS[endpoint] = _record_calls_wrapper
return _record_calls_wrapper
return wrapper
def assert_all_helper_functions_called() -> None:
"""Throws an exception if any registered helpers were not called by tests"""
if REGISTERED_GENERATOR_FUNCTIONS == CALLED_GENERATOR_FUNCTIONS:
return
uncalled_functions = str(REGISTERED_GENERATOR_FUNCTIONS - CALLED_GENERATOR_FUNCTIONS)
raise Exception(f"Registered curl API generators were not called: {uncalled_functions}")
def patch_openapi_example_values(
entry: str,
params: List[Dict[str, Any]],
request_body: Optional[Dict[str, Any]] = None,
) -> Tuple[List[Dict[str, object]], Optional[Dict[str, object]]]:
if entry not in GENERATOR_FUNCTIONS:
return params, request_body
func = GENERATOR_FUNCTIONS[entry]
realm_example_values: Dict[str, object] = func()
for param in params:
param_name = param["name"]
if param_name in realm_example_values:
if "content" in param:
param["content"]["application/json"]["example"] = realm_example_values[param_name]
else:
param["example"] = realm_example_values[param_name]
if request_body is not None:
properties = request_body["content"]["multipart/form-data"]["schema"]["properties"]
for key, property in properties.items():
if key in realm_example_values:
property["example"] = realm_example_values[key]
return params, request_body
@openapi_param_value_generator(["/fetch_api_key:post"])
def fetch_api_key() -> Dict[str, object]:
email = helpers.example_email("iago")
password = initial_password(email)
return {
"username": email,
"password": password,
}
@openapi_param_value_generator(
[
"/messages/{message_id}:get",
"/messages/{message_id}/history:get",
"/messages/{message_id}:patch",
"/messages/{message_id}:delete",
]
)
def iago_message_id() -> Dict[str, object]:
return {
"message_id": helpers.send_stream_message(helpers.example_user("iago"), "Denmark"),
}
@openapi_param_value_generator(["/messages/{message_id}/reactions:delete"])
def add_emoji_to_message() -> Dict[str, object]:
user_profile = helpers.example_user("iago")
# from OpenAPI format data in zulip.yaml
message_id = 43
emoji_name = "octopus"
emoji_code = "1f419"
reaction_type = "unicode_emoji"
message = Message.objects.select_related().get(id=message_id)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
return {}
@openapi_param_value_generator(["/messages/flags:post"])
def update_flags_message_ids() -> Dict[str, object]:
stream_name = "Venice"
helpers.subscribe(helpers.example_user("iago"), stream_name)
messages = []
for _ in range(3):
messages.append(helpers.send_stream_message(helpers.example_user("iago"), stream_name))
return {
"messages": messages,
}
@openapi_param_value_generator(["/mark_stream_as_read:post", "/users/me/{stream_id}/topics:get"])
def get_venice_stream_id() -> Dict[str, object]:
return {
"stream_id": helpers.get_stream_id("Venice"),
}
@openapi_param_value_generator(["/streams/{stream_id}:patch"])
def update_stream() -> Dict[str, object]:
stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 1")
return {
"stream_id": stream.id,
}
@openapi_param_value_generator(["/streams/{stream_id}:delete"])
def create_temp_stream_and_get_id() -> Dict[str, object]:
stream = helpers.subscribe(helpers.example_user("iago"), "temp_stream 2")
return {
"stream_id": stream.id,
}
@openapi_param_value_generator(["/mark_topic_as_read:post"])
def get_denmark_stream_id_and_topic() -> Dict[str, object]:
stream_name = "Denmark"
topic_name = "Tivoli Gardens"
helpers.subscribe(helpers.example_user("iago"), stream_name)
helpers.send_stream_message(helpers.example_user("hamlet"), stream_name, topic_name=topic_name)
return {
"stream_id": helpers.get_stream_id(stream_name),
"topic_name": topic_name,
}
@openapi_param_value_generator(["/users/me/subscriptions/properties:post"])
def update_subscription_data() -> Dict[str, object]:
profile = helpers.example_user("iago")
helpers.subscribe(profile, "Verona")
helpers.subscribe(profile, "social")
return {
"subscription_data": [
{"stream_id": helpers.get_stream_id("Verona"), "property": "pin_to_top", "value": True},
{"stream_id": helpers.get_stream_id("social"), "property": "color", "value": "#f00f00"},
],
}
@openapi_param_value_generator(["/users/me/subscriptions:delete"])
def delete_subscription_data() -> Dict[str, object]:
iago = helpers.example_user("iago")
zoe = helpers.example_user("ZOE")
helpers.subscribe(iago, "Verona")
helpers.subscribe(iago, "social")
helpers.subscribe(zoe, "Verona")
helpers.subscribe(zoe, "social")
return {}
@openapi_param_value_generator(["/events:get"])
def get_events() -> Dict[str, object]:
profile = helpers.example_user("iago")
helpers.subscribe(profile, "Verona")
client = Client.objects.create(name="curl-test-client-1")
response = do_events_register(profile, client, event_types=["message", "realm_emoji"])
helpers.send_stream_message(helpers.example_user("hamlet"), "Verona")
return {
"queue_id": response["queue_id"],
"last_event_id": response["last_event_id"],
}
@openapi_param_value_generator(["/events:delete"])
def delete_event_queue() -> Dict[str, object]:
profile = helpers.example_user("iago")
client = Client.objects.create(name="curl-test-client-2")
response = do_events_register(profile, client, event_types=["message"])
return {
"queue_id": response["queue_id"],
"last_event_id": response["last_event_id"],
}
@openapi_param_value_generator(["/users/{user_id_or_email}/presence:get"])
def get_user_presence() -> Dict[str, object]:
iago = helpers.example_user("iago")
client = Client.objects.create(name="curl-test-client-3")
update_user_presence(iago, client, timezone_now(), UserPresence.ACTIVE, False)
return {}
@openapi_param_value_generator(["/users:post"])
def create_user() -> Dict[str, object]:
return {
"email": helpers.nonreg_email("test"),
}
@openapi_param_value_generator(["/user_groups/create:post"])
def create_user_group_data() -> Dict[str, object]:
return {
"members": [helpers.example_user("hamlet").id, helpers.example_user("othello").id],
}
@openapi_param_value_generator(
["/user_groups/{user_group_id}:patch", "/user_groups/{user_group_id}:delete"]
)
def get_temp_user_group_id() -> Dict[str, object]:
user_group, _ = UserGroup.objects.get_or_create(name="temp", realm=get_realm("zulip"))
return {
"user_group_id": user_group.id,
}
@openapi_param_value_generator(["/realm/filters/{filter_id}:delete"])
def remove_realm_filters() -> Dict[str, object]:
filter_id = do_add_linkifier(
get_realm("zulip"), "#(?P<id>[0-9]{2,8})", "https://github.com/zulip/zulip/pull/%(id)s"
)
return {
"filter_id": filter_id,
}
@openapi_param_value_generator(["/realm/emoji/{emoji_name}:post", "/user_uploads:post"])
def upload_custom_emoji() -> Dict[str, object]:
return {
"filename": "zerver/tests/images/animated_img.gif",
}
@openapi_param_value_generator(["/realm/playgrounds:post"])
def add_realm_playground() -> Dict[str, object]:
return {
"name": "Python2 playground",
"pygments_language": "Python2",
"url_prefix": "https://python2.example.com",
}
@openapi_param_value_generator(["/realm/playgrounds/{playground_id}:delete"])
def remove_realm_playground() -> Dict[str, object]:
playground_info = dict(
name="Python playground",
pygments_language="Python",
url_prefix="https://python.example.com",
)
playground_id = do_add_realm_playground(get_realm("zulip"), **playground_info)
return {
"playground_id": playground_id,
}
@openapi_param_value_generator(["/users/{user_id}:delete"])
def deactivate_user() -> Dict[str, object]:
user_profile = do_create_user(
email="testuser@zulip.com",
password=None,
full_name="test_user",
realm=get_realm("zulip"),
acting_user=None,
)
return {"user_id": user_profile.id}
@openapi_param_value_generator(["/users/me:delete"])
def deactivate_own_user() -> Dict[str, object]:
test_user_email = "delete-test@zulip.com"
deactivate_test_user = do_create_user(
test_user_email,
"secret",
get_realm("zulip"),
"Mr. Delete",
role=200,
acting_user=None,
)
realm = get_realm("zulip")
test_user = get_user(test_user_email, realm)
test_user_api_key = get_api_key(test_user)
# change authentication line to allow test_client to delete itself.
AUTHENTICATION_LINE[0] = f"{deactivate_test_user.email}:{test_user_api_key}"
return {}
| punchagan/zulip | zerver/openapi/curl_param_value_generators.py | Python | apache-2.0 | 11,278 |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
goruntime "runtime"
"sort"
"strconv"
"testing"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
"k8s.io/kubernetes/pkg/kubelet/pleg"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/rand"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/term"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/host_path"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
func init() {
utilruntime.ReallyCrash = true
}
const (
testKubeletHostname = "127.0.0.1"
testReservationCPU = "200m"
testReservationMemory = "100M"
maxImageTagsForTest = 3
// TODO(harry) any global place for these two?
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
minImgSize int64 = 23 * 1024 * 1024
maxImgSize int64 = 1000 * 1024 * 1024
)
type TestKubelet struct {
kubelet *Kubelet
fakeRuntime *containertest.FakeRuntime
fakeCadvisor *cadvisortest.Mock
fakeKubeClient *fake.Clientset
fakeMirrorClient *podtest.FakeMirrorClient
fakeClock *util.FakeClock
mounter mount.Interface
volumePlugin *volumetest.FakeVolumePlugin
}
// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
Size: 123,
},
{
ID: "efg",
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled)
}
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(util.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
// 1. sort the imageList by size
sort.Sort(byImageSize(imageList))
// 2. convert sorted imageList to api.ContainerImage list
var expectedImageList []api.ContainerImage
for _, kubeImage := range imageList {
apiImage := api.ContainerImage{
Names: kubeImage.RepoTags,
SizeBytes: kubeImage.Size,
}
expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
return imageList, expectedImageList[0:maxImagesInNodeStatus]
}
func generateImageTags() []string {
var tagList []string
count := rand.IntnRange(1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
}
return tagList
}
func newTestKubeletWithImageList(
t *testing.T,
imageList []kubecontainer.Image,
controllerAttachDetachEnabled bool) *TestKubelet {
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = imageList
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &fake.Clientset{}
kubelet := &Kubelet{}
kubelet.recorder = fakeRecorder
kubelet.kubeClient = fakeKubeClient
kubelet.os = &containertest.FakeOS{}
kubelet.hostname = testKubeletHostname
kubelet.nodeName = testKubeletHostname
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil)
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR)
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
} else {
kubelet.rootDirectory = tempDir
}
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
}
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
kubelet.masterServiceNamespace = api.NamespaceDefault
kubelet.serviceLister = testServiceLister{}
kubelet.nodeLister = testNodeLister{}
kubelet.nodeInfo = testNodeInfo{}
kubelet.recorder = fakeRecorder
if err := kubelet.setupDataDirs(); err != nil {
t.Fatalf("can't initialize kubelet data dirs: %v", err)
}
kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{}
mockCadvisor := &cadvisortest.Mock{}
kubelet.cadvisor = mockCadvisor
fakeMirrorClient := podtest.NewFakeMirrorClient()
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil {
t.Fatalf("can't initialize disk space manager: %v", err)
}
kubelet.diskSpaceManager = diskSpaceManager
kubelet.containerRuntime = fakeRuntime
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
kubelet.reasonCache = NewReasonCache()
kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: kubelet.syncPod,
cache: kubelet.podCache,
t: t,
}
kubelet.probeManager = probetest.FakeManager{}
kubelet.livenessManager = proberesults.NewManager()
kubelet.containerManager = cm.NewStubContainerManager()
fakeNodeRef := &api.ObjectReference{
Kind: "Node",
Name: testKubeletHostname,
UID: types.UID(testKubeletHostname),
Namespace: "",
}
fakeImageGCPolicy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
fakeClock := util.NewFakeClock(time.Now())
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock
kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
kubelet.resyncInterval = 10 * time.Second
kubelet.reservation = kubetypes.Reservation{
Kubernetes: api.ResourceList{
api.ResourceCPU: resource.MustParse(testReservationCPU),
api.ResourceMemory: resource.MustParse(testReservationMemory),
},
}
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
// Relist period does not affect the tests.
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{})
kubelet.clock = fakeClock
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
nodeRef := &api.ObjectReference{
Kind: "Node",
Name: kubelet.nodeName,
UID: types.UID(kubelet.nodeName),
Namespace: "",
}
// setup eviction manager
evictionManager, evictionAdmitHandler, err := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers), fakeRecorder, nodeRef, kubelet.clock)
if err != nil {
t.Fatalf("failed to initialize eviction manager: %v", err)
}
kubelet.evictionManager = evictionManager
kubelet.AddPodAdmitHandler(evictionAdmitHandler)
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr, err =
NewInitializedVolumePluginMgr(kubelet, []volume.VolumePlugin{plug})
if err != nil {
t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
}
kubelet.mounter = &mount.FakeMounter{}
kubelet.volumeManager, err = kubeletvolume.NewVolumeManager(
controllerAttachDetachEnabled,
kubelet.hostname,
kubelet.podManager,
fakeKubeClient,
kubelet.volumePluginMgr,
fakeRuntime,
kubelet.mounter)
if err != nil {
t.Fatalf("failed to initialize volume manager: %v", err)
}
// enable active deadline handler
activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
if err != nil {
t.Fatalf("can't initialize active deadline handler: %v", err)
}
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
kubelet.AddPodSyncHandler(activeDeadlineHandler)
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
}
func newTestPods(count int) []*api.Pod {
pods := make([]*api.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &api.Pod{
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
ObjectMeta: api.ObjectMeta{
UID: types.UID(10000 + i),
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
func TestSyncLoopTimeUpdate(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
loopTime1 := kubelet.LatestLoopEntryTime()
if !loopTime1.IsZero() {
t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1)
}
// Start sync ticker.
syncCh := make(chan time.Time, 1)
housekeepingCh := make(chan time.Time, 1)
plegCh := make(chan *pleg.PodLifecycleEvent)
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime2 := kubelet.LatestLoopEntryTime()
if loopTime2.IsZero() {
t.Errorf("Unexpected sync loop time: 0, expected non-zero value.")
}
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime3 := kubelet.LatestLoopEntryTime()
if !loopTime3.After(loopTime1) {
t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp")
}
}
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.runtimeState.setRuntimeSync(time.Now())
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
// the channel close
kubelet.resyncInterval = time.Second * 30
ch := make(chan kubetypes.PodUpdate)
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
if ok {
t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed")
}
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*api.Pod{
podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
}),
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
}
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
}},
}
kubelet.HandlePodCleanups()
// Sources are not ready yet. Don't remove any pods.
fakeRuntime.AssertKilledPods([]string{})
ready = true
kubelet.HandlePodCleanups()
// Sources are ready. Remove unwanted pods.
fakeRuntime.AssertKilledPods([]string{"12345678"})
}
func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
// Verify volumes attached
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
err = waitForVolumeUnmount(kubelet.volumeManager, pod)
if err != nil {
t.Error(err)
}
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
t.Errorf("Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
}
err = volumetest.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Verify volumes detached and no longer reported as in use
err = waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)
if err != nil {
t.Error(err)
}
if testKubelet.volumePlugin.GetNewDetacherCallCount() < 1 {
t.Errorf("Expected plugin NewDetacher to be called at least once")
}
err = volumetest.VerifyDetachCallCount(
1 /* expectedDetachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
api.UniqueVolumeName("fake/vol1"),
stopCh,
kubelet.volumeManager)
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
api.UniqueVolumeName("fake/vol1"),
stopCh,
kubelet.volumeManager)
// Verify volumes attached
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
err = waitForVolumeUnmount(kubelet.volumeManager, pod)
if err != nil {
t.Error(err)
}
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
t.Errorf("Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
}
err = volumetest.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Verify volumes detached and no longer reported as in use
err = waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)
if err != nil {
t.Error(err)
}
if testKubelet.volumePlugin.GetNewDetacherCallCount() < 1 {
t.Errorf("Expected plugin NewDetacher to be called at least once")
}
err = volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestPodVolumesExist(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "pod2",
UID: "pod2uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol2",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device2",
},
},
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol3",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device3",
},
},
},
},
},
},
}
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods(pods)
for _, pod := range pods {
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
}
for _, pod := range pods {
podVolumesExist := kubelet.podVolumesExist(pod.UID)
if !podVolumesExist {
t.Errorf(
"Expected to find volumes for pod %q, but podVolumesExist returned false",
pod.UID)
}
}
}
type stubVolume struct {
path string
volume.MetricsNil
}
func (f *stubVolume) GetPath() string {
return f.path
}
func (f *stubVolume) GetAttributes() volume.Attributes {
return volume.Attributes{}
}
func (f *stubVolume) SetUp(fsGroup *int64) error {
return nil
}
func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error {
return nil
}
func TestMakeVolumeMounts(t *testing.T) {
container := api.Container{
VolumeMounts: []api.VolumeMount{
{
MountPath: "/etc/hosts",
Name: "disk",
ReadOnly: false,
},
{
MountPath: "/mnt/path3",
Name: "disk",
ReadOnly: true,
},
{
MountPath: "/mnt/path4",
Name: "disk4",
ReadOnly: false,
},
{
MountPath: "/mnt/path5",
Name: "disk5",
ReadOnly: false,
},
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}},
"disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}},
"disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}},
}
pod := api.Pod{
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes)
expectedMounts := []kubecontainer.Mount{
{
"disk",
"/etc/hosts",
"/mnt/disk",
false,
false,
},
{
"disk",
"/mnt/path3",
"/mnt/disk",
true,
false,
},
{
"disk4",
"/mnt/path4",
"/mnt/host",
false,
false,
},
{
"disk5",
"/mnt/path5",
"/var/lib/kubelet/podID/volumes/empty/disk5",
false,
false,
},
}
if !reflect.DeepEqual(mounts, expectedMounts) {
t.Errorf("Unexpected mounts: Expected %#v got %#v. Container was: %#v", expectedMounts, mounts, container)
}
}
type fakeContainerCommandRunner struct {
Cmd []string
ID kubecontainer.ContainerID
PodID types.UID
E error
Stdin io.Reader
Stdout io.WriteCloser
Stderr io.WriteCloser
TTY bool
Port uint16
Stream io.ReadWriteCloser
}
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error {
f.Cmd = cmd
f.ID = id
f.Stdin = in
f.Stdout = out
f.Stderr = err
f.TTY = tty
return f.E
}
func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
f.PodID = pod.ID
f.Port = port
f.Stream = stream
return nil
}
func TestRunInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{}
podName := "podFoo"
podNamespace := "nsFoo"
containerName := "containerFoo"
output, err := kubelet.RunInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerName,
[]string{"ls"})
if output != nil {
t.Errorf("unexpected non-nil command: %v", output)
}
if err == nil {
t.Error("unexpected non-error")
}
}
func TestRunInContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: containerID,
},
},
}},
}
cmd := []string{"ls"}
_, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
if fakeCommandRunner.ID != containerID {
t.Errorf("unexpected Name: %s", fakeCommandRunner.ID)
}
if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) {
t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd)
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestDNSConfigurationParams(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clusterNS := "203.0.113.1"
kubelet.clusterDomain = "kubernetes.io"
kubelet.clusterDNS = net.ParseIP(clusterNS)
pods := newTestPods(2)
pods[0].Spec.DNSPolicy = api.DNSClusterFirst
pods[1].Spec.DNSPolicy = api.DNSDefault
options := make([]*kubecontainer.RunContainerOptions, 2)
for i, pod := range pods {
var err error
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
}
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
}
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
}
kubelet.resolverConfig = "/etc/resolv.conf"
for i, pod := range pods {
var err error
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
t.Logf("nameservers %+v", options[1].DNS)
if len(options[0].DNS) != 1 {
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
} else if options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
}
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
}
type testServiceLister struct {
services []api.Service
}
func (ls testServiceLister) List() (api.ServiceList, error) {
return api.ServiceList{
Items: ls.services,
}, nil
}
type testNodeLister struct {
nodes []api.Node
}
type testNodeInfo struct {
nodes []api.Node
}
func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
for _, node := range ls.nodes {
if node.Name == id {
return &node, nil
}
}
return nil, fmt.Errorf("Node with name: %s does not exist", id)
}
func (ls testNodeLister) List() (api.NodeList, error) {
return api.NodeList{
Items: ls.nodes,
}, nil
}
type envs []kubecontainer.EnvVar
func (e envs) Len() int {
return len(e)
}
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
func buildService(name, namespace, clusterIP, protocol string, port int) api.Service {
return api.Service{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: api.Protocol(protocol),
Port: int32(port),
}},
ClusterIP: clusterIP,
},
}
}
func TestMakeEnvironmentVariables(t *testing.T) {
services := []api.Service{
buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081),
buildService("test", "test1", "1.2.3.3", "TCP", 8083),
buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
buildService("test", "test2", "1.2.3.5", "TCP", 8085),
buildService("test", "test2", "None", "TCP", 8085),
buildService("test", "test2", "", "TCP", 8085),
buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086),
buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088),
buildService("not-special", "kubernetes", "None", "TCP", 8088),
buildService("not-special", "kubernetes", "", "TCP", 8088),
}
testCases := []struct {
name string // the name of the test case
ns string // the namespace to generate environment for
container *api.Container // the container to use
masterServiceNs string // the namespace to read master service info from
nilLister bool // whether the lister should be nil
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
}{
{
name: "api server = Y, kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "api server = Y, kubelet = N",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
{
name: "api server = N; kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "master service in pod ns",
ns: "test2",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
{Name: "TEST_SERVICE_PORT", Value: "8085"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
},
},
{
name: "pod in master service ns",
ns: "kubernetes",
container: &api.Container{},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"},
},
},
{
name: "downward api pod",
ns: "downward-api",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "status.podIP",
},
},
},
},
},
masterServiceNs: "nothing",
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
{Name: "POD_NAMESPACE", Value: "downward-api"},
{Name: "POD_IP", Value: "1.2.3.4"},
},
},
{
name: "env expansion",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "SERVICE_VAR_TEST",
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
masterServiceNs: "nothing",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "SERVICE_VAR_TEST",
Value: "1.2.3.3:8083",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
},
},
}
for i, tc := range testCases {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
kl.masterServiceNamespace = tc.masterServiceNs
if tc.nilLister {
kl.serviceLister = nil
} else {
kl.serviceLister = testServiceLister{services}
}
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Namespace: tc.ns,
Name: "dapi-test-pod-name",
},
}
podIP := "1.2.3.4"
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP)
if err != nil {
t.Errorf("[%v] Unexpected error: %v", tc.name, err)
}
sort.Sort(envs(result))
sort.Sort(envs(tc.expectedEnvs))
if !reflect.DeepEqual(result, tc.expectedEnvs) {
t.Errorf("%d: [%v] Unexpected env entries; expected {%v}, got {%v}", i, tc.name, tc.expectedEnvs, result)
}
}
}
func waitingState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
}
}
func waitingStateWithLastTermination(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func runningState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
}
func stoppedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
}
}
func succeededState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func failedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
func TestPodPhaseWithRestartAlways(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyAlways,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
stoppedState("containerA"),
stoppedState("containerB"),
},
},
},
api.PodRunning,
"all stopped with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
stoppedState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart always",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartNever(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyNever,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
api.PodSucceeded,
"all succeeded with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
api.PodFailed,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart never",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyOnFailure,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
api.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
api.PodRunning,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart onfailure",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestExecInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
fakeRuntime.PodList = []*containertest.FakePod{}
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
func TestExecInContainerNoSuchContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}},
},
}},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
type fakeReadWriteCloser struct{}
func (f *fakeReadWriteCloser) Write(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Read(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Close() error {
return nil
}
func TestExecInContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
command := []string{"ls"}
stdin := &bytes.Buffer{}
stdout := &fakeReadWriteCloser{}
stderr := &fakeReadWriteCloser{}
tty := true
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
},
},
}},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)),
"",
containerID,
[]string{"ls"},
stdin,
stdout,
stderr,
tty,
nil,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := containerID, fakeCommandRunner.ID.ID; e != a {
t.Fatalf("container name: expected %q, got %q", e, a)
}
if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) {
t.Fatalf("command: expected '%v', got '%v'", e, a)
}
if e, a := stdin, fakeCommandRunner.Stdin; e != a {
t.Fatalf("stdin: expected %#v, got %#v", e, a)
}
if e, a := stdout, fakeCommandRunner.Stdout; e != a {
t.Fatalf("stdout: expected %#v, got %#v", e, a)
}
if e, a := stderr, fakeCommandRunner.Stderr; e != a {
t.Fatalf("stderr: expected %#v, got %#v", e, a)
}
if e, a := tty, fakeCommandRunner.TTY; e != a {
t.Fatalf("tty: expected %t, got %t", e, a)
}
}
func TestPortForwardNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
var port uint16 = 5000
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
port,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.PortForward")
}
}
func TestPortForward(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
podName := "podFoo"
podNamespace := "nsFoo"
podID := types.UID("12345678")
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: podID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"},
},
},
}},
}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
var port uint16 = 5000
stream := &fakeReadWriteCloser{}
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
port,
stream,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := podID, fakeCommandRunner.PodID; e != a {
t.Fatalf("container id: expected %q, got %q", e, a)
}
if e, a := port, fakeCommandRunner.Port; e != a {
t.Fatalf("port: expected %v, got %v", e, a)
}
if e, a := stream, fakeCommandRunner.Stream; e != a {
t.Fatalf("stream: expected %v, got %v", e, a)
}
}
// Tests that identify the host port conflicts are detected correctly.
func TestGetHostPortConflicts(t *testing.T) {
pods := []*api.Pod{
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}},
}
// Pods should not cause any conflict.
if hasHostPortConflicts(pods) {
t.Errorf("expected no conflicts, Got conflicts")
}
expected := &api.Pod{
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
}
// The new pod should cause conflict and be reported.
pods = append(pods, expected)
if !hasHostPortConflicts(pods) {
t.Errorf("expected conflict, Got no conflicts")
}
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
func TestHandleHostNameConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
// default NodeName in test is 127.0.0.1
pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}),
podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}),
}
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
nodes := []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
}
// The first pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
nodes := []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}}},
}
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{NodeName: kl.nodeName,
Containers: []api.Container{{Resources: api.ResourceRequirements{
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
pods := []*api.Pod{
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
}
podToTest := pods[1]
// Run once to populate the status map.
kl.HandlePodAdditions(pods)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
t.Fatalf("expected to have status cached for pod2")
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*api.Pod{})
kl.HandlePodCleanups()
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
}
func TestValidateContainerLogStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
containerName := "x"
testCases := []struct {
statuses []api.ContainerStatus
success bool
}{
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
},
},
success: false,
},
}
for i, tc := range testCases {
_, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: tc.statuses,
}, containerName, false)
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil {
t.Errorf("expected error with invalid container name")
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container")
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container")
}
}
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along
// with the mock FsInfo values added to Cadvisor should make the kubelet report that it has
// sufficient disk space or it is out of disk, depending on the capacity, availability and
// threshold values.
func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error {
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb}
mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
return err
}
kubelet.diskSpaceManager = diskSpaceManager
return nil
}
func TestUpdateNewNodeStatus(t *testing.T) {
// generate one more than maxImagesInNodeStatus in inputImageList
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: expectedImageList,
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
}
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make Kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
expectedNodeOutOfDiskCondition := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
var oodCondition api.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
if cond.Type == api.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition))
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it is out of disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
// images will be sorted from max to min in node status.
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
}
updateAction, ok := actions[1].(core.UpdateAction)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old)
}
if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
}
}
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
LastTransitionTime: unversioned.NewTime(clock.Now()),
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
LastTransitionTime: unversioned.NewTime(clock.Now()),
},
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
kubelet.outOfDiskTransitionFrequency = 5 * time.Second
ood := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder
}
noOod := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder
}
testCases := []struct {
rootFsAvail uint64
dockerFsAvail uint64
expected api.NodeCondition
}{
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==true
rootFsAvail: 50,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==true
rootFsAvail: 200,
dockerFsAvail: 50,
expected: ood,
},
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: noOod,
},
}
kubelet.updateRuntimeUp()
for tcIdx, tc := range testCases {
// Step by a second
clock.Step(1 * time.Second)
// Setup expected times.
tc.expected.LastHeartbeatTime = unversioned.NewTime(clock.Now())
// In the last case, there should be a status transition for NodeOutOfDisk
if tcIdx == len(testCases)-1 {
tc.expected.LastTransitionTime = unversioned.NewTime(clock.Now())
}
// Make kubelet report that it has sufficient disk space
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("%d. unexpected actions: %v", tcIdx, actions)
}
updateAction, ok := actions[1].(core.UpdateAction)
if !ok {
t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("%d. unexpected object type", tcIdx)
}
kubeClient.ClearActions()
var oodCondition api.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.Type == api.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(tc.expected, oodCondition) {
t.Errorf("%d.\nwant \n%v\n, got \n%v", tcIdx, tc.expected, oodCondition)
}
}
}
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{}, //placeholder
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
checkNodeStatus := func(status api.ConditionStatus, reason, message string) {
kubeClient.ClearActions()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
lastIndex := len(updatedNode.Status.Conditions) - 1
if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{
Type: api.NodeReady,
Status: status,
Reason: reason,
Message: message,
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
readyMessage := "kubelet is posting ready status"
downMessage := "container runtime is down"
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
// Should report kubelet ready if the runtime check is updated
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage)
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
// Should report kubelet not ready if the runtime check failed
fakeRuntime := testKubelet.fakeRuntime
// Inject error into fake runtime status check, node should be NotReady
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := podWithUidNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
err := kl.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: updateType,
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
podFullName := kubecontainer.GetPodFullName(pod)
if !manager.HasPod(podFullName) {
t.Errorf("expected mirror pod %q to be created", podFullName)
}
if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) {
t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods())
}
}
}
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
// Mirror pod has an outdated spec.
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
})
mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
pods := []*api.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
err := kl.syncPod(syncPodOptions{
pod: pod,
mirrorPod: mirrorPod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 1 || deletes != 1 {
t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
}
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
orphanPods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
}
kl.podManager.SetPods(orphanPods)
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups()
if manager.NumOfPods() != 0 {
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
}
for _, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
}
}
}
func TestGetContainerInfoForMirrorPods(t *testing.T) {
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "1234",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "5678",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
}
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
kubelet := testKubelet.kubelet
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
},
},
}},
}
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
}
func TestHostNetworkAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
},
})
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestHostNetworkDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
},
})
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestPrivilegeContainerAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
privileged := true
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
})
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestPrivilegeContainerDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: false,
})
privileged := true
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
})
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestFilterOutTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
pods[0].Status.Phase = api.PodFailed
pods[1].Status.Phase = api.PodSucceeded
pods[2].Status.Phase = api.PodRunning
pods[3].Status.Phase = api.PodPending
expected := []*api.Pod{pods[2], pods[3], pods[4]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutTerminatedPods(pods)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestRegisterExistingNodeWithApiserver(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &api.Node{}, &apierrors.StatusError{
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 9 * mb,
Capacity: 10 * mb,
}, nil)
done := make(chan struct{})
go func() {
kubelet.registerWithApiserver()
done <- struct{}{}
}()
select {
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("timed out waiting for registration")
case <-done:
return
}
}
func TestMakePortMappings(t *testing.T) {
port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort {
return api.ContainerPort{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
tests := []struct {
container *api.Container
expectedPortMappings []kubecontainer.PortMapping
}{
{
&api.Container{
Name: "fooContainer",
Ports: []api.ContainerPort{
port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
port("foo", api.ProtocolUDP, 555, 5555, ""),
// Duplicated, should be ignored.
port("foo", api.ProtocolUDP, 888, 8888, ""),
// Duplicated, should be ignored.
port("", api.ProtocolTCP, 80, 8888, ""),
},
},
[]kubecontainer.PortMapping{
portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""),
},
},
}
for i, tt := range tests {
actual := makePortMappings(tt.container)
if !reflect.DeepEqual(tt.expectedPortMappings, actual) {
t.Errorf("%d: Expected: %#v, saw: %#v", i, tt.expectedPortMappings, actual)
}
}
}
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(30)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
// Let the pod worker sets the status to fail after this sync.
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q, got %q.", api.PodFailed, status.Phase)
}
}
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(300)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase == api.PodFailed {
t.Fatalf("expected pod status to not be %q", status.Phase)
}
}
func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: uid,
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
}
}
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod {
pod := podWithUidNameNs(uid, name, namespace)
pod.Spec = spec
return pod
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
}
kl.podManager.SetPods(pods)
// Sync to create pod directories.
kl.HandlePodSyncs(kl.podManager.GetPods())
for i := range pods {
if !dirExists(kl.getPodDir(pods[i].UID)) {
t.Errorf("expected directory to exist for pod %d", i)
}
}
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*api.Pod{pods[0]})
kl.HandlePodCleanups()
if !dirExists(kl.getPodDir(pods[0].UID)) {
t.Errorf("expected directory to exist for pod 0")
}
if dirExists(kl.getPodDir(pods[1].UID)) {
t.Errorf("expected directory to be deleted for pod 1")
}
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups()
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
if shouldExist && !exist {
t.Errorf("expected directory to exist for pod %d", i)
} else if !shouldExist && exist {
t.Errorf("expected directory to be removed for pod %d", i)
}
}
}
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
podWithUidNameNs("12345680", "pod3", "ns"),
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
// deleted.
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
}
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
Namespace: "ns",
}
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*api.Pod{apiPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// Pretend the pod is deleted from apiserver, but is still active on the node.
// The pod directory should not be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{runningPod, ""}}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// The pod is deleted and also not active on the node. The pod directory
// should be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
}
func TestGetPodsToSync(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
startTime := unversioned.NewTime(clock.Now())
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
pods[2].Status.StartTime = &startTime
pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
kubelet.podManager.SetPods(pods)
kubelet.workQueue.Enqueue(pods[2].UID, 0)
kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
clock.Step(1 * time.Minute)
expectedPods := []*api.Pod{pods[0], pods[2], pods[3]}
podsToSync := kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPods) {
for _, expect := range expectedPods {
var found bool
for _, got := range podsToSync {
if expect.UID == got.UID {
found = true
break
}
}
if !found {
t.Errorf("expected pod not found: %+v", expect)
}
}
} else {
t.Errorf("expected %d pods to sync, got %d", len(expectedPods), len(podsToSync))
}
}
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
numContainers := 10
expectedOrder := []string{}
cStatuses := []*kubecontainer.ContainerStatus{}
specContainerList := []api.Container{}
for i := 0; i < numContainers; i++ {
id := fmt.Sprintf("%v", i)
containerName := fmt.Sprintf("%vcontainer", id)
expectedOrder = append(expectedOrder, containerName)
cStatus := &kubecontainer.ContainerStatus{
ID: kubecontainer.BuildContainerID("test", id),
Name: containerName,
}
// Rearrange container statuses
if i%2 == 0 {
cStatuses = append(cStatuses, cStatus)
} else {
cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
}
specContainerList = append(specContainerList, api.Container{Name: containerName})
}
pod := podWithUidNameNs("uid1", "foo", "test")
pod.Spec = api.PodSpec{
Containers: specContainerList,
}
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: cStatuses,
}
for i := 0; i < 5; i++ {
apiStatus := kubelet.generateAPIPodStatus(pod, status)
for i, c := range apiStatus.ContainerStatuses {
if expectedOrder[i] != c.Name {
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
}
}
}
}
func verifyContainerStatuses(statuses []api.ContainerStatus, state, lastTerminationState map[string]api.ContainerState) error {
for _, s := range statuses {
if !reflect.DeepEqual(s.State, state[s.Name]) {
return fmt.Errorf("unexpected state: %s", diff.ObjectDiff(state[s.Name], s.State))
}
if !reflect.DeepEqual(s.LastTerminationState, lastTerminationState[s.Name]) {
return fmt.Errorf("unexpected last termination state %s", diff.ObjectDiff(
lastTerminationState[s.Name], s.LastTerminationState))
}
}
return nil
}
// Test generateAPIPodStatus with different reason cache and old api pod status.
func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// The following waiting reason and message are generated in convertStatusToAPIStatus()
startWaitingReason := "ContainerCreating"
initWaitingReason := "PodInitializing"
testTimestamp := time.Unix(123456789, 987654321)
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
pod := podWithUidNameNs("12345678", "foo", "new")
pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
tests := []struct {
containers []api.Container
statuses []*kubecontainer.ContainerStatus
reasons map[string]error
oldStatuses []api.ContainerStatus
expectedState map[string]api.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]api.ContainerState
expectedLastTerminationState map[string]api.ContainerState
}{
// For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
// old status from apiserver.
{
containers: []api.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
statuses: []*kubecontainer.ContainerStatus{},
reasons: map[string]error{},
oldStatuses: []api.ContainerStatus{{
Name: "with-old-record",
LastTerminationState: api.ContainerState{Terminated: &api.ContainerStateTerminated{}},
}},
expectedState: map[string]api.ContainerState{
"without-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: startWaitingReason,
}},
"with-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: startWaitingReason,
}},
},
expectedInitState: map[string]api.ContainerState{
"without-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: initWaitingReason,
}},
"with-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: initWaitingReason,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"with-old-record": {Terminated: &api.ContainerStateTerminated{}},
},
},
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
{
containers: []api.Container{{Name: "running"}},
statuses: []*kubecontainer.ContainerStatus{
{
Name: "running",
State: kubecontainer.ContainerStateRunning,
StartedAt: testTimestamp,
},
{
Name: "running",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
},
reasons: map[string]error{},
oldStatuses: []api.ContainerStatus{},
expectedState: map[string]api.ContainerState{
"running": {Running: &api.ContainerStateRunning{
StartedAt: unversioned.NewTime(testTimestamp),
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"running": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
// For terminated container:
// * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
// second latest terminated status;
// * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
// terminated status;
// * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
// recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
// terminated status.
{
containers: []api.Container{{Name: "without-reason"}, {Name: "with-reason"}},
statuses: []*kubecontainer.ContainerStatus{
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 4,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 5,
},
},
reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
oldStatuses: []api.ContainerStatus{},
expectedState: map[string]api.ContainerState{
"without-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
"with-reason": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"without-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
"with-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 5,
ContainerID: emptyContainerID,
}},
},
},
}
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.Containers = test.containers
pod.Status.ContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState), "case %d", i)
}
// Everything should be the same for init containers
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.InitContainers = test.containers
pod.Status.InitContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
expectedState := test.expectedState
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState), "case %d", i)
}
}
// Test generateAPIPodStatus with different restart policies.
func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
pod := podWithUidNameNs("12345678", "foo", "new")
containers := []api.Container{{Name: "succeed"}, {Name: "failed"}}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.ContainerStatus{
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
},
}
kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
for c, test := range []struct {
restartPolicy api.RestartPolicy
expectedState map[string]api.ContainerState
expectedLastTerminationState map[string]api.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]api.ContainerState
// Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
expectedInitLastTerminationState map[string]api.ContainerState
}{
{
restartPolicy: api.RestartPolicyNever,
expectedState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: api.RestartPolicyOnFailure,
expectedState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: api.RestartPolicyAlways,
expectedState: map[string]api.ContainerState{
"succeed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
// If the init container is terminated with exit code 0, it won't be restarted even when the
// restart policy is RestartAlways.
expectedInitState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedInitLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
} {
pod.Spec.RestartPolicy = test.restartPolicy
// Test normal containers
pod.Spec.Containers = containers
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c)
pod.Spec.Containers = nil
// Test init containers
pod.Spec.InitContainers = containers
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
if test.expectedInitLastTerminationState != nil {
expectedLastTerminationState = test.expectedInitLastTerminationState
}
assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c)
pod.Spec.InitContainers = nil
}
}
// testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
type testPodAdmitHandler struct {
// list of pods to reject.
podsToReject []*api.Pod
}
// Admit rejects all pods in the podsToReject list with a matching UID.
func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
for _, podToReject := range a.podsToReject {
if podToReject.UID == attrs.Pod.UID {
return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
}
}
return lifecycle.PodAdmitResult{Admit: true}
}
// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
},
}
podToReject := pods[0]
podToAdmit := pods[1]
podsToReject := []*api.Pod{podToReject}
kl.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// podToReject should be Failed
status, found := kl.statusManager.GetPodStatus(podToReject.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", podToReject.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// podToAdmit should be Pending
status, found = kl.statusManager.GetPodStatus(podToAdmit.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", podToAdmit.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
type testPodSyncLoopHandler struct {
// list of pods to sync
podsToSync []*api.Pod
}
// ShouldSync evaluates if the pod should be synced from the kubelet.
func (a *testPodSyncLoopHandler) ShouldSync(pod *api.Pod) bool {
for _, podToSync := range a.podsToSync {
if podToSync.UID == pod.UID {
return true
}
}
return false
}
// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
podUIDs := []types.UID{}
for _, pod := range pods {
podUIDs = append(podUIDs, pod.UID)
}
podsToSync := []*api.Pod{pods[0]}
kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{podsToSync})
kubelet.podManager.SetPods(pods)
expectedPodsUID := []types.UID{pods[0].UID}
podsToSync = kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPodsUID) {
var rightNum int
for _, podUID := range expectedPodsUID {
for _, podToSync := range podsToSync {
if podToSync.UID == podUID {
rightNum++
break
}
}
}
if rightNum != len(expectedPodsUID) {
// Just for report error
podsToSyncUID := []types.UID{}
for _, podToSync := range podsToSync {
podsToSyncUID = append(podsToSyncUID, podToSync.UID)
}
t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID)
}
} else {
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
}
}
// testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
type testPodSyncHandler struct {
// list of pods to evict.
podsToEvict []*api.Pod
// the reason for the eviction
reason string
// the mesage for the eviction
message string
}
// ShouldEvict evaluates if the pod should be evicted from the kubelet.
func (a *testPodSyncHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse {
for _, podToEvict := range a.podsToEvict {
if podToEvict.UID == pod.UID {
return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
}
}
return lifecycle.ShouldEvictResponse{Evict: false}
}
// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := newTestPods(1)[0]
podsToEvict := []*api.Pod{pod}
kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
apiStatus := kubelet.generateAPIPodStatus(pod, status)
if apiStatus.Phase != api.PodFailed {
t.Fatalf("Expected phase %v, but got %v", api.PodFailed, apiStatus.Phase)
}
if apiStatus.Reason != "Evicted" {
t.Fatalf("Expected reason %v, but got %v", "Evicted", apiStatus.Reason)
}
if apiStatus.Message != "because" {
t.Fatalf("Expected message %v, but got %v", "because", apiStatus.Message)
}
}
func TestSyncPodKillPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
},
}
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
gracePeriodOverride := int64(0)
err := kl.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodKill,
killPodOptions: &KillPodOptions{
PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
return api.PodStatus{
Phase: api.PodFailed,
Reason: "reason",
Message: "message",
}
},
PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
},
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", pod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
func waitForVolumeUnmount(
volumeManager kubeletvolume.VolumeManager,
pod *api.Pod) error {
var podVolumes kubecontainer.VolumeMap
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
podVolumes = volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
return false, nil
}
return true, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
}
return nil
}
func waitForVolumeDetach(
volumeName api.UniqueVolumeName,
volumeManager kubeletvolume.VolumeManager) error {
attachedVolumes := []api.UniqueVolumeName{}
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
volumeAttached := volumeManager.VolumeIsAttached(volumeName)
return !volumeAttached, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
}
return nil
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}
func simulateVolumeInUseUpdate(
volumeName api.UniqueVolumeName,
stopCh <-chan struct{},
volumeManager kubeletvolume.VolumeManager) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
volumeManager.MarkVolumesAsReportedInUse(
[]api.UniqueVolumeName{volumeName})
case <-stopCh:
return
}
}
}
| fgrzadkowski/kubernetes | pkg/kubelet/kubelet_test.go | GO | apache-2.0 | 151,958 |
#ifndef BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED
#define BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id: ext_params.hpp,v 1.3 2009/02/22 01:02:38 wdong-pku Exp $
// $Date: 2009/02/22 01:02:38 $
// $Revision: 1.3 $
#include <boost/mpl/aux_/config/preprocessor.hpp>
// BOOST_MPL_PP_EXT_PARAMS(2,2,T): <nothing>
// BOOST_MPL_PP_EXT_PARAMS(2,3,T): T2
// BOOST_MPL_PP_EXT_PARAMS(2,4,T): T2, T3
// BOOST_MPL_PP_EXT_PARAMS(2,n,T): T2, T3, .., Tn-1
#if !defined(BOOST_MPL_CFG_NO_OWN_PP_PRIMITIVES)
# include <boost/mpl/aux_/preprocessor/filter_params.hpp>
# include <boost/mpl/aux_/preprocessor/sub.hpp>
# define BOOST_MPL_PP_EXT_PARAMS(i,j,p) \
BOOST_MPL_PP_EXT_PARAMS_DELAY_1(i,BOOST_MPL_PP_SUB(j,i),p) \
/**/
# define BOOST_MPL_PP_EXT_PARAMS_DELAY_1(i,n,p) \
BOOST_MPL_PP_EXT_PARAMS_DELAY_2(i,n,p) \
/**/
# define BOOST_MPL_PP_EXT_PARAMS_DELAY_2(i,n,p) \
BOOST_MPL_PP_EXT_PARAMS_##i(n,p) \
/**/
# define BOOST_MPL_PP_EXT_PARAMS_1(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##1,p##2,p##3,p##4,p##5,p##6,p##7,p##8,p##9)
# define BOOST_MPL_PP_EXT_PARAMS_2(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##2,p##3,p##4,p##5,p##6,p##7,p##8,p##9,p1)
# define BOOST_MPL_PP_EXT_PARAMS_3(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##3,p##4,p##5,p##6,p##7,p##8,p##9,p1,p2)
# define BOOST_MPL_PP_EXT_PARAMS_4(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##4,p##5,p##6,p##7,p##8,p##9,p1,p2,p3)
# define BOOST_MPL_PP_EXT_PARAMS_5(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##5,p##6,p##7,p##8,p##9,p1,p2,p3,p4)
# define BOOST_MPL_PP_EXT_PARAMS_6(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##6,p##7,p##8,p##9,p1,p2,p3,p4,p5)
# define BOOST_MPL_PP_EXT_PARAMS_7(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##7,p##8,p##9,p1,p2,p3,p4,p5,p6)
# define BOOST_MPL_PP_EXT_PARAMS_8(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##8,p##9,p1,p2,p3,p4,p5,p6,p7)
# define BOOST_MPL_PP_EXT_PARAMS_9(i,p) BOOST_MPL_PP_FILTER_PARAMS_##i(p##9,p1,p2,p3,p4,p5,p6,p7,p8)
#else
# include <boost/preprocessor/arithmetic/add.hpp>
# include <boost/preprocessor/arithmetic/sub.hpp>
# include <boost/preprocessor/comma_if.hpp>
# include <boost/preprocessor/repeat.hpp>
# include <boost/preprocessor/tuple/elem.hpp>
# include <boost/preprocessor/cat.hpp>
# define BOOST_MPL_PP_AUX_EXT_PARAM_FUNC(unused, i, op) \
BOOST_PP_COMMA_IF(i) \
BOOST_PP_CAT( \
BOOST_PP_TUPLE_ELEM(2,1,op) \
, BOOST_PP_ADD_D(1, i, BOOST_PP_TUPLE_ELEM(2,0,op)) \
) \
/**/
# define BOOST_MPL_PP_EXT_PARAMS(i, j, param) \
BOOST_PP_REPEAT( \
BOOST_PP_SUB_D(1,j,i) \
, BOOST_MPL_PP_AUX_EXT_PARAM_FUNC \
, (i,param) \
) \
/**/
#endif
#endif // BOOST_MPL_AUX_PREPROCESSOR_EXT_PARAMS_HPP_INCLUDED
| BinSigma/BinClone | lshkit/trunk/3rd-party/boost/boost/mpl/aux_/preprocessor/ext_params.hpp | C++ | apache-2.0 | 3,002 |
package io.dropwizard.testing.junit5;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.dropwizard.Application;
import io.dropwizard.Configuration;
import io.dropwizard.cli.Command;
import io.dropwizard.cli.ServerCommand;
import io.dropwizard.configuration.ConfigurationSourceProvider;
import io.dropwizard.jersey.jackson.JacksonFeature;
import io.dropwizard.lifecycle.Managed;
import io.dropwizard.setup.Environment;
import io.dropwizard.testing.ConfigOverride;
import io.dropwizard.testing.DropwizardTestSupport;
import org.glassfish.jersey.client.ClientProperties;
import org.glassfish.jersey.client.HttpUrlConnectorProvider;
import org.glassfish.jersey.client.JerseyClientBuilder;
import javax.annotation.Nullable;
import javax.ws.rs.client.Client;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import org.junit.jupiter.api.extension.AfterAllCallback;
import org.junit.jupiter.api.extension.BeforeAllCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
//@formatter:off
/**
* An extension for starting and stopping your application at the start and end of a test class.
* <p>
* By default, the {@link Application} will be constructed using reflection to invoke the nullary
* constructor. If your application does not provide a public nullary constructor, you will need to
* override the {@link #newApplication()} method to provide your application instance(s).
* </p>
*
* @param <C> the configuration type
*/
//@formatter:on
public class DropwizardAppExtension<C extends Configuration> implements DropwizardExtension,
BeforeAllCallback, AfterAllCallback {
private static final int DEFAULT_CONNECT_TIMEOUT_MS = 1000;
private static final int DEFAULT_READ_TIMEOUT_MS = 5000;
private final DropwizardTestSupport<C> testSupport;
private final AtomicInteger recursiveCallCount = new AtomicInteger(0);
@Nullable
private Client client;
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass) {
this(applicationClass, (String) null);
}
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, (String) null, configOverrides);
}
/**
* @since 2.0
*/
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
ConfigurationSourceProvider configSourceProvider,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, configSourceProvider, null, configOverrides);
}
/**
* @deprecated Use {@link #DropwizardAppExtension(Class, String, String, ConfigOverride...)} instead.
*/
@Deprecated
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
Optional<String> customPropertyPrefix,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, customPropertyPrefix.orElse(null), configOverrides);
}
/**
* @since 2.0
*/
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
@Nullable String customPropertyPrefix,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, customPropertyPrefix, ServerCommand::new, configOverrides);
}
/**
* @since 2.0
*/
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
ConfigurationSourceProvider configSourceProvider,
@Nullable String customPropertyPrefix,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, configSourceProvider, customPropertyPrefix, ServerCommand::new, configOverrides);
}
/**
* @deprecated Use {@link #DropwizardAppExtension(Class, String, String, Function, ConfigOverride...)} instead.
*/
@Deprecated
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
Optional<String> customPropertyPrefix,
Function<Application<C>, Command> commandInstantiator,
ConfigOverride... configOverrides) {
this(applicationClass, configPath, customPropertyPrefix.orElse(null), commandInstantiator, configOverrides);
}
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
@Nullable String customPropertyPrefix,
Function<Application<C>, Command> commandInstantiator,
ConfigOverride... configOverrides) {
this(new DropwizardTestSupport<>(applicationClass, configPath, customPropertyPrefix, commandInstantiator, configOverrides));
}
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
@Nullable String configPath,
ConfigurationSourceProvider configSourceProvider,
@Nullable String customPropertyPrefix,
Function<Application<C>, Command> commandInstantiator,
ConfigOverride... configOverrides) {
this(new DropwizardTestSupport<>(applicationClass, configPath, configSourceProvider, customPropertyPrefix, commandInstantiator, configOverrides));
}
/**
* Alternate constructor that allows specifying exact Configuration object to
* use, instead of reading a resource and binding it as Configuration object.
*
* @since 0.9
*/
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
C configuration) {
this(new DropwizardTestSupport<>(applicationClass, configuration));
}
/**
* Alternate constructor that allows specifying the command the Dropwizard application is started with.
*
* @since 1.1.0
*/
public DropwizardAppExtension(Class<? extends Application<C>> applicationClass,
C configuration, Function<Application<C>, Command> commandInstantiator) {
this(new DropwizardTestSupport<>(applicationClass, configuration, commandInstantiator));
}
public DropwizardAppExtension(DropwizardTestSupport<C> testSupport) {
this.testSupport = testSupport;
}
public DropwizardAppExtension<C> addListener(final ServiceListener<C> listener) {
this.testSupport.addListener(new DropwizardTestSupport.ServiceListener<C>() {
@Override
public void onRun(C configuration, Environment environment, DropwizardTestSupport<C> rule) throws Exception {
listener.onRun(configuration, environment, DropwizardAppExtension.this);
}
@Override
public void onStop(DropwizardTestSupport<C> rule) throws Exception {
listener.onStop(DropwizardAppExtension.this);
}
});
return this;
}
public DropwizardAppExtension<C> manage(final Managed managed) {
return addListener(new ServiceListener<C>() {
@Override
public void onRun(C configuration, Environment environment, DropwizardAppExtension<C> rule) throws Exception {
environment.lifecycle().manage(managed);
}
});
}
@Override
public void beforeAll(ExtensionContext extensionContext) throws Exception {
this.before();
}
@Override
public void afterAll(ExtensionContext extensionContext) {
this.after();
}
@Override
public void before() throws Exception {
if (recursiveCallCount.getAndIncrement() == 0) {
testSupport.before();
}
}
@Override
public void after() {
if (recursiveCallCount.decrementAndGet() == 0) {
testSupport.after();
synchronized (this) {
if (client != null) {
client.close();
client = null;
}
}
}
}
public C getConfiguration() {
return testSupport.getConfiguration();
}
public int getLocalPort() {
return testSupport.getLocalPort();
}
public int getPort(int connectorIndex) {
return testSupport.getPort(connectorIndex);
}
public int getAdminPort() {
return testSupport.getAdminPort();
}
public Application<C> newApplication() {
return testSupport.newApplication();
}
@SuppressWarnings({"TypeParameterUnusedInFormals"})
public <A extends Application<C>> A getApplication() {
return testSupport.getApplication();
}
public Environment getEnvironment() {
return testSupport.getEnvironment();
}
public ObjectMapper getObjectMapper() {
return testSupport.getObjectMapper();
}
public abstract static class ServiceListener<T extends Configuration> {
public void onRun(T configuration, Environment environment, DropwizardAppExtension<T> rule) throws Exception {
// Default NOP
}
public void onStop(DropwizardAppExtension<T> rule) throws Exception {
// Default NOP
}
}
public DropwizardTestSupport<C> getTestSupport() {
return testSupport;
}
/**
* Returns a new HTTP Jersey {@link Client} for performing HTTP requests against the tested
* Dropwizard server. The client can be reused across different tests and automatically
* closed along with the server. The client can be augmented by overriding the
* {@link #clientBuilder()} method.
*
* @return a new {@link Client} managed by the extension.
*/
public Client client() {
synchronized (this) {
if (client == null) {
client = clientBuilder().build();
}
return client;
}
}
protected JerseyClientBuilder clientBuilder() {
return new JerseyClientBuilder()
.register(new JacksonFeature(getObjectMapper()))
.property(ClientProperties.CONNECT_TIMEOUT, DEFAULT_CONNECT_TIMEOUT_MS)
.property(ClientProperties.READ_TIMEOUT, DEFAULT_READ_TIMEOUT_MS)
.property(HttpUrlConnectorProvider.SET_METHOD_WORKAROUND, true);
}
}
| dropwizard/dropwizard | dropwizard-testing/src/main/java/io/dropwizard/testing/junit5/DropwizardAppExtension.java | Java | apache-2.0 | 11,112 |
'use strict';
const fs = require('fs');
const path = require('path');
const async = require('async');
const spawn = require('child_process').spawn;
const clc = require('cli-color');
const allFailed = clc.red.bold;
const allPassed = clc.green;
const failed = clc.yellow;
const passed = (t) => t;
const files = fs.readdirSync(__dirname).filter(function (filename) {
return filename.slice(0, 5) === 'test-';
});
async.mapSeries(files, runTest, function (err, passed) {
if (err) throw err;
let failed = 0;
for (const ok of passed) {
if (!ok) failed += 1;
}
if (failed > 0) {
console.log(allFailed('failed') + ` - ${failed} tests failed`);
} else {
console.log(allPassed('passed') + ` - ${passed.length} tests passed`);
}
});
function runTest(filename, done) {
process.stdout.write(` - running ${filename} ...`);
const p = spawn(process.execPath, [path.resolve(__dirname, filename)], {
stdio: ['ignore', 1, 2],
env: {
'NODE_ASYNC_HOOK_NO_WARNING': '1'
}
});
p.once('close', function (statusCode) {
const ok = (statusCode === 0);
if (ok) {
console.log(' ' + passed('ok'));
} else {
console.log(' - ' + failed('failed'));
}
done(null, ok);
});
}
| cloudfoundry/nodejs-buildpack | fixtures/with_appdynamics/node_modules/async-hook-jl/test/runner.js | JavaScript | apache-2.0 | 1,239 |
#
# Author:: Adam Jacob (<adam@chef.io>)
# Author:: William Albenzi (<walbenzi@gmail.com>)
# Copyright:: Copyright 2009-2016, Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "chef/knife"
class Chef
class Knife
class RoleEnvRunListAdd < Knife
deps do
require "chef/role"
require "chef/json_compat"
end
banner "knife role env_run_list add [ROLE] [ENVIRONMENT] [ENTRY [ENTRY]] (options)"
option :after,
short: "-a ITEM",
long: "--after ITEM",
description: "Place the ENTRY in the run list after ITEM."
def add_to_env_run_list(role, environment, entries, after = nil)
if after
nlist = []
unless role.env_run_lists.key?(environment)
role.env_run_lists_add(environment => nlist)
end
role.run_list_for(environment).each do |entry|
nlist << entry
if entry == after
entries.each { |e| nlist << e }
end
end
role.env_run_lists_add(environment => nlist)
else
nlist = []
unless role.env_run_lists.key?(environment)
role.env_run_lists_add(environment => nlist)
end
role.run_list_for(environment).each do |entry|
nlist << entry
end
entries.each { |e| nlist << e }
role.env_run_lists_add(environment => nlist)
end
end
def run
role = Chef::Role.load(@name_args[0])
role.name(@name_args[0])
environment = @name_args[1]
if @name_args.size > 2
# Check for nested lists and create a single plain one
entries = @name_args[2..-1].map do |entry|
entry.split(",").map { |e| e.strip }
end.flatten
else
# Convert to array and remove the extra spaces
entries = @name_args[2].split(",").map { |e| e.strip }
end
add_to_env_run_list(role, environment, entries, config[:after])
role.save
config[:env_run_list] = true
output(format_for_display(role))
end
end
end
end
| martinisoft/chef | lib/chef/knife/role_env_run_list_add.rb | Ruby | apache-2.0 | 2,693 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.extension.manifest;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElementWrapper;
import java.util.List;
@ApiModel
@XmlAccessorType(XmlAccessType.FIELD)
public class Stateful {
private String description;
@XmlElementWrapper
@XmlElement(name = "scope")
private List<Scope> scopes;
@ApiModelProperty(value = "The description for how the extension stores state")
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@ApiModelProperty(value = "The scopes used to store state")
public List<Scope> getScopes() {
return scopes;
}
public void setScopes(List<Scope> scopes) {
this.scopes = scopes;
}
}
| MikeThomsen/nifi | nifi-manifest/nifi-extension-manifest-model/src/main/java/org/apache/nifi/extension/manifest/Stateful.java | Java | apache-2.0 | 1,823 |
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fifo
import (
"fmt"
"os"
"sync"
"syscall"
"github.com/pkg/errors"
)
const oPathFlags = 0o10000000
type handle struct {
f *os.File
fd uintptr
dev uint64
ino uint64
closeOnce sync.Once
name string
}
func getHandle(fn string) (*handle, error) {
f, err := os.OpenFile(fn, oPathFlags, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v with oPathFlags", fn)
}
var (
stat syscall.Stat_t
fd = f.Fd()
)
if err := syscall.Fstat(int(fd), &stat); err != nil {
f.Close()
return nil, errors.Wrapf(err, "failed to stat handle %v", fd)
}
h := &handle{
f: f,
name: fn,
dev: stat.Dev,
ino: stat.Ino,
fd: fd,
}
// check /proc just in case
if _, err := os.Stat(h.procPath()); err != nil {
f.Close()
return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
}
return h, nil
}
func (h *handle) procPath() string {
return fmt.Sprintf("/proc/self/fd/%d", h.fd)
}
func (h *handle) Name() string {
return h.name
}
func (h *handle) Path() (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(h.procPath(), &stat); err != nil {
return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
}
if stat.Dev != h.dev || stat.Ino != h.ino {
return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
}
return h.procPath(), nil
}
func (h *handle) Close() error {
h.closeOnce.Do(func() {
h.f.Close()
})
return nil
}
| nalind/cri-o | utils/fifo/handle.go | GO | apache-2.0 | 2,092 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jackrabbit.ocm.nodemanagement.impl;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import javax.jcr.PropertyType;
import javax.jcr.RepositoryException;
import javax.jcr.Session;
import javax.jcr.Value;
import javax.jcr.ValueFactory;
import javax.jcr.nodetype.NodeDefinitionTemplate;
import javax.jcr.nodetype.NodeTypeTemplate;
import javax.jcr.nodetype.PropertyDefinitionTemplate;
import javax.jcr.version.OnParentVersionAction;
import org.apache.jackrabbit.ocm.mapper.model.BeanDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.ChildNodeDefDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.ClassDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.CollectionDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.FieldDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.MappingDescriptor;
import org.apache.jackrabbit.ocm.mapper.model.PropertyDefDescriptor;
import org.apache.jackrabbit.ocm.nodemanagement.NodeTypeManager;
import org.apache.jackrabbit.ocm.nodemanagement.exception.NamespaceCreationException;
import org.apache.jackrabbit.ocm.nodemanagement.exception.NodeTypeCreationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NodeTypeManagerImpl implements NodeTypeManager {
private static Logger log = LoggerFactory.getLogger(NodeTypeManagerImpl.class);
@Override
public void createNamespace(final Session session, final String namespace, final String namespaceUri) throws NamespaceCreationException {
if (session != null)
{
try
{
session.getWorkspace().getNamespaceRegistry().registerNamespace(namespace, namespaceUri);
log.info("Namespace created: " +
"{" + namespaceUri + "}" + namespace);
}
catch (Exception e)
{
throw new NamespaceCreationException(e);
}
}
}
@Override
public void createNodeTypes(final Session session, final MappingDescriptor mappingDescriptor) throws NodeTypeCreationException {
if (mappingDescriptor != null && mappingDescriptor.getClassDescriptorsByClassName().size() > 0)
{
final Collection classDescriptorObjects = mappingDescriptor.getClassDescriptorsByClassName().values();
final ClassDescriptor[] classDescriptors = (ClassDescriptor[])classDescriptorObjects.toArray(new ClassDescriptor[classDescriptorObjects.size()]);
createNodeTypes(session, classDescriptors);
}
else
{
throw new NodeTypeCreationException("The MappingDescriptor can't be null or empty.");
}
}
@Override
public void createNodeTypes(final Session session, final ClassDescriptor[] classDescriptors) throws NodeTypeCreationException {
if (classDescriptors != null && classDescriptors.length > 0)
{
log.info("Trying to create " + classDescriptors.length +
" JCR node types.");
for (int i = 0; i < classDescriptors.length; i++)
{
createSingleNodeType(session, classDescriptors[i]);
}
}
else
{
throw new NodeTypeCreationException("The ClassDescriptor can't be null or empty.");
}
}
@Override
public void createSingleNodeType(final Session session, final ClassDescriptor classDescriptor) throws NodeTypeCreationException {
try {
if (classDescriptor.getJcrType() != null &&
(classDescriptor.getJcrType().startsWith("nt:")
|| classDescriptor.getJcrType().startsWith("mix:"))) {
throw new NodeTypeCreationException("Namespace nt and mix are reserved namespaces. Please specify your own.");
}
if (checkSuperTypes(session.getWorkspace().getNodeTypeManager(),
classDescriptor.getJcrSuperTypes())) {
javax.jcr.nodetype.NodeTypeManager ntm = session.getWorkspace().getNodeTypeManager();
final NodeTypeTemplate ntt = ntm.createNodeTypeTemplate();
if (classDescriptor.getJcrType() == null) {
ntt.setName(classDescriptor.getClassName());
} else {
ntt.setName(classDescriptor.getJcrType());
}
ntt.setAbstract(classDescriptor.isAbstract());
if (classDescriptor.getJcrSuperTypes() != null && classDescriptor.getJcrSuperTypes().length() > 0) {
String[] superTypesArray = classDescriptor.getJcrSuperTypes().split(",");
//TODO combine the mixins here as well as supertypes
// Add classDescriptor.getJcrMixinTypes() to superTypesArray
for (String s : classDescriptor.getJcrMixinTypes()) {
System.out.println(s);
}
ntt.setDeclaredSuperTypeNames(superTypesArray);
}
// should we also support mixins to be created?
ntt.setMixin(false);
ntt.setQueryable(true);
ntt.setOrderableChildNodes(true);
final List nodeDefinitionTemplates = ntt.getNodeDefinitionTemplates();
final List propertyDefinitionTemplates = ntt.getPropertyDefinitionTemplates();
if (classDescriptor.getFieldDescriptors() != null) {
Iterator fieldIterator = classDescriptor.getFieldDescriptors().iterator();
while (fieldIterator.hasNext()) {
FieldDescriptor field = (FieldDescriptor) fieldIterator.next();
if (!field.isPath()) {
final PropertyDefinitionTemplate pdt = getPropertyDefinition(ntm, session.getValueFactory(), field);
// add the just created pdt to the nodetypetemplate
propertyDefinitionTemplates.add(pdt);
}
}
if (classDescriptor.getBeanDescriptors() != null) {
Iterator beanIterator = classDescriptor.getBeanDescriptors().iterator();
while (beanIterator.hasNext()) {
BeanDescriptor field = (BeanDescriptor) beanIterator.next();
if (this.isPropertyType(field.getJcrType())) {
final PropertyDefinitionTemplate pdt = getPropertyDefinition(ntm, session.getValueFactory(), field);
// add the just created pdt to the nodetypetemplate
propertyDefinitionTemplates.add(pdt);
} else {
final NodeDefinitionTemplate ndt = getNodeDefinition(ntm, session.getValueFactory(), field);
// add the just created pdt to the nodetypetemplate
nodeDefinitionTemplates.add(ndt);
}
}
}
if (classDescriptor.getCollectionDescriptors() != null) {
Iterator collectionIterator = classDescriptor.getCollectionDescriptors().iterator();
while (collectionIterator.hasNext()) {
CollectionDescriptor field = (CollectionDescriptor) collectionIterator.next();
if (this.isPropertyType(field.getJcrType())) {
final PropertyDefinitionTemplate pdt = getPropertyDefinition(ntm, session.getValueFactory(), field);
// add the just created pdt to the nodetypetemplate
propertyDefinitionTemplates.add(pdt);
} else {
final NodeDefinitionTemplate ndt = getNodeDefinition(ntm, session.getValueFactory(), field);
// add the just created pdt to the nodetypetemplate
nodeDefinitionTemplates.add(ndt);
}
}
}
ntm.registerNodeType(ntt, false);
log.info("Registered JCR node type '" + ntt.getName() +
"' for class '" + classDescriptor.getClassName() + "'");
}
} else {
throw new NodeTypeCreationException("JCR supertypes could not be resolved.");
}
} catch (Exception e) {
log.error("Could not create node types from class descriptor.", e);
throw new NodeTypeCreationException(e);
}
}
private NodeDefinitionTemplate getNodeDefinition(final javax.jcr.nodetype.NodeTypeManager ntm,
final ValueFactory valueFactory,
final ChildNodeDefDescriptor field) throws RepositoryException {
final NodeDefinitionTemplate ndt = ntm.createNodeDefinitionTemplate();
if (field.getJcrName() != null) {
ndt.setName(field.getJcrName());
} else {
ndt.setName("*");
}
int onParentVersion = OnParentVersionAction.IGNORE;
if (field.getJcrOnParentVersion() != null
&& field.getJcrOnParentVersion().length() > 0) {
onParentVersion = OnParentVersionAction.valueFromName(field.getJcrOnParentVersion());
}
ndt.setOnParentVersion(onParentVersion);
ndt.setSameNameSiblings(field.isJcrSameNameSiblings());
ndt.setAutoCreated(field.isJcrAutoCreated());
ndt.setDefaultPrimaryTypeName(field.getDefaultPrimaryType());
ndt.setMandatory(field.isJcrMandatory());
ndt.setProtected(field.isJcrProtected());
ndt.setRequiredPrimaryTypeNames(getJcrSuperTypes(field.getJcrType()));
return ndt;
}
private PropertyDefinitionTemplate getPropertyDefinition(final javax.jcr.nodetype.NodeTypeManager ntm,
final ValueFactory valueFactory,
final PropertyDefDescriptor field) throws RepositoryException {
final PropertyDefinitionTemplate pdt = ntm.createPropertyDefinitionTemplate();
if (field.getJcrName() != null) {
pdt.setName(field.getJcrName());
} else {
pdt.setName(field.getFieldName());
}
if (field.getJcrType() != null) {
try {
pdt.setRequiredType(PropertyType.valueFromName(field.getJcrType()));
} catch (IllegalArgumentException e) {
log.warn("Invalid property type '{}' for '{}'. Set default to String type", field.getJcrType(), field.getJcrName());
pdt.setRequiredType(PropertyType.STRING);
}
} else {
log.info("No property type set for {}. Setting 'String' type.", field.getJcrName());
pdt.setRequiredType(PropertyType.STRING);
}
int onParentVersion = OnParentVersionAction.IGNORE;
if (field.getJcrOnParentVersion() != null &&
field.getJcrOnParentVersion().length() > 0) {
onParentVersion = OnParentVersionAction.valueFromName(field.getJcrOnParentVersion());
}
pdt.setOnParentVersion(onParentVersion);
pdt.setAutoCreated(field.isJcrAutoCreated());
pdt.setAvailableQueryOperators(new String[0]);
pdt.setFullTextSearchable(true);
pdt.setMandatory(field.isJcrMandatory());
pdt.setMultiple(field.isJcrMultiple());
pdt.setOnParentVersion(onParentVersion);
pdt.setProtected(field.isJcrProtected());
pdt.setQueryOrderable(true);
if (field instanceof FieldDescriptor) {
FieldDescriptor f = (FieldDescriptor) field;
if (f.getJcrDefaultValue() != null) {
if (pdt.getRequiredType() == PropertyType.STRING) {
Value[] vals = {valueFactory.createValue(f.getJcrDefaultValue())};
pdt.setDefaultValues(vals);
} else {
log.warn("Can only set default value for String properties. Skip for field '{}'", field.getJcrName());
}
}
pdt.setValueConstraints(f.getJcrValueConstraints());
}
return pdt;
}
/** Checks if all JCR super types for a given node type exist.
*
* @param ntMgr NodeTypeManager
* @param superTypes Comma separated String with JCR node types
* @return returns <code>false</code> if one of the supertypes does not exist, otherwise returns <code>true</code>
*/
private boolean checkSuperTypes(javax.jcr.nodetype.NodeTypeManager ntMgr,
String superTypes)
{
boolean exists = true;
if (superTypes != null && superTypes.length() > 0)
{
String[] superTypesArray = superTypes.split(",");
log.debug("JCR super types found: " + superTypesArray.length);
for (int i = 0; i < superTypesArray.length; i++)
{
try
{
ntMgr.getNodeType(superTypesArray[i]);
}
catch (Exception e)
{
log.error("JCR super type '" + superTypesArray[i] + "' does not exist!");
exists = false;
break;
}
}
}
return exists;
}
public String[] getJcrSuperTypes(String superTypes)
{
return superTypes.split(",");
}
private boolean isPropertyType(String type)
{
return (type.equals(PropertyType.TYPENAME_BINARY) ||
type.equals(PropertyType.TYPENAME_BOOLEAN) ||
type.equals(PropertyType.TYPENAME_DATE) ||
type.equals(PropertyType.TYPENAME_DOUBLE) ||
type.equals(PropertyType.TYPENAME_LONG) ||
type.equals(PropertyType.TYPENAME_NAME) ||
type.equals(PropertyType.TYPENAME_PATH) ||
type.equals(PropertyType.TYPENAME_REFERENCE) ||
type.equals(PropertyType.TYPENAME_WEAKREFERENCE) ||
type.equals(PropertyType.TYPENAME_DECIMAL) ||
type.equals(PropertyType.TYPENAME_URI) ||
type.equals(PropertyType.TYPENAME_STRING));
}
}
| apache/jackrabbit-ocm | src/main/java/org/apache/jackrabbit/ocm/nodemanagement/impl/NodeTypeManagerImpl.java | Java | apache-2.0 | 15,473 |
//===----------------------------------------------------------------------===//
//
// Peloton
//
// tpcc_order_status.cpp
//
// Identification: src/main/tpcc/tpcc_order_status.cpp
//
// Copyright (c) 2015-16, Carnegie Mellon University Database Group
//
//===----------------------------------------------------------------------===//
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <chrono>
#include <iostream>
#include <ctime>
#include <cassert>
#include <thread>
#include <algorithm>
#include <random>
#include <cstddef>
#include <limits>
#include "benchmark/tpcc/tpcc_workload.h"
#include "benchmark/tpcc/tpcc_configuration.h"
#include "benchmark/tpcc/tpcc_loader.h"
#include "catalog/manager.h"
#include "catalog/schema.h"
#include "type/types.h"
#include "type/value.h"
#include "type/value_factory.h"
#include "common/logger.h"
#include "common/timer.h"
#include "common/generator.h"
#include "concurrency/transaction.h"
#include "concurrency/transaction_manager_factory.h"
#include "executor/executor_context.h"
#include "executor/abstract_executor.h"
#include "executor/logical_tile.h"
#include "executor/logical_tile_factory.h"
#include "executor/materialization_executor.h"
#include "executor/update_executor.h"
#include "executor/index_scan_executor.h"
#include "executor/insert_executor.h"
#include "executor/order_by_executor.h"
#include "executor/limit_executor.h"
#include "expression/abstract_expression.h"
#include "expression/constant_value_expression.h"
#include "expression/tuple_value_expression.h"
#include "expression/comparison_expression.h"
#include "expression/expression_util.h"
#include "common/container_tuple.h"
#include "index/index_factory.h"
#include "logging/log_manager.h"
#include "planner/abstract_plan.h"
#include "planner/materialization_plan.h"
#include "planner/insert_plan.h"
#include "planner/update_plan.h"
#include "planner/index_scan_plan.h"
#include "planner/order_by_plan.h"
#include "planner/limit_plan.h"
#include "storage/data_table.h"
#include "storage/table_factory.h"
namespace peloton {
namespace benchmark {
namespace tpcc {
bool RunOrderStatus(const size_t &thread_id){
/*
"ORDER_STATUS": {
"getCustomerByCustomerId": "SELECT C_ID, C_FIRST, C_MIDDLE, C_LAST, C_BALANCE FROM CUSTOMER WHERE C_W_ID = ? AND C_D_ID = ? AND C_ID = ?", # w_id, d_id, c_id
"getCustomersByLastName": "SELECT C_ID, C_FIRST, C_MIDDLE, C_LAST, C_BALANCE FROM CUSTOMER WHERE C_W_ID = ? AND C_D_ID = ? AND C_LAST = ? ORDER BY C_FIRST", # w_id, d_id, c_last
"getLastOrder": "SELECT O_ID, O_CARRIER_ID, O_ENTRY_D FROM ORDERS WHERE O_W_ID = ? AND O_D_ID = ? AND O_C_ID = ? ORDER BY O_ID DESC LIMIT 1", # w_id, d_id, c_id
"getOrderLines": "SELECT OL_SUPPLY_W_ID, OL_I_ID, OL_QUANTITY, OL_AMOUNT, OL_DELIVERY_D FROM ORDER_LINE WHERE OL_W_ID = ? AND OL_D_ID = ? AND OL_O_ID = ?", # w_id, d_id, o_id
}
*/
auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
auto txn = txn_manager.BeginTransaction();
std::unique_ptr<executor::ExecutorContext> context(
new executor::ExecutorContext(txn));
// Generate w_id, d_id, c_id, c_last
//int w_id = GetRandomInteger(0, state.warehouse_count - 1);
int w_id = GenerateWarehouseId(thread_id);
int d_id = GetRandomInteger(0, state.districts_per_warehouse - 1);
int c_id = -1;
std::string c_last;
// if (GetRandomInteger(1, 100) <= 60) {
// c_last = GetRandomLastName(state.customers_per_district);
// } else {
c_id = GetNURand(1023, 0, state.customers_per_district - 1);
// }
// Run queries
if (c_id != -1) {
LOG_TRACE("getCustomerByCustomerId: SELECT C_ID, C_FIRST, C_MIDDLE, C_LAST, C_BALANCE FROM CUSTOMER WHERE C_W_ID = ? AND C_D_ID = ? AND C_ID = ? # w_id, d_id, c_id");
// Construct index scan executor
std::vector<oid_t> customer_column_ids =
{COL_IDX_C_ID, COL_IDX_C_FIRST, COL_IDX_C_MIDDLE,
COL_IDX_C_LAST, COL_IDX_C_BALANCE};
std::vector<oid_t> customer_key_column_ids = {COL_IDX_C_W_ID, COL_IDX_C_D_ID, COL_IDX_C_ID};
std::vector<ExpressionType> customer_expr_types;
std::vector<type::Value > customer_key_values;
std::vector<expression::AbstractExpression *> runtime_keys;
customer_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetIntegerValue(w_id).Copy());
customer_expr_types.push_back(
ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetIntegerValue(d_id).Copy());
customer_expr_types.push_back(
ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetIntegerValue(c_id).Copy());
auto customer_pkey_index = customer_table->GetIndexWithOid(customer_table_pkey_index_oid);
planner::IndexScanPlan::IndexScanDesc customer_index_scan_desc(customer_pkey_index, customer_key_column_ids, customer_expr_types,
customer_key_values, runtime_keys);
auto predicate = nullptr;
planner::IndexScanPlan customer_index_scan_node(customer_table, predicate,
customer_column_ids, customer_index_scan_desc);
executor::IndexScanExecutor customer_index_scan_executor(&customer_index_scan_node, context.get());
auto result = ExecuteRead(&customer_index_scan_executor);
if (txn->GetResult() != ResultType::SUCCESS) {
txn_manager.AbortTransaction(txn);
return false;
}
if (result.size() == 0) {
LOG_ERROR("wrong result size : %lu", result.size());
PL_ASSERT(false);
}
if (result[0].size() == 0) {
LOG_ERROR("wrong result[0] size : %lu", result[0].size());
PL_ASSERT(false);
}
} else {
LOG_ERROR("getCustomersByLastName: SELECT C_ID, C_FIRST, C_MIDDLE, C_LAST, C_BALANCE FROM CUSTOMER WHERE C_W_ID = ? AND C_D_ID = ? AND C_LAST = ? ORDER BY C_FIRST, # w_id, d_id, c_last");
// Construct index scan executor
std::vector<oid_t> customer_column_ids =
{COL_IDX_C_ID, COL_IDX_C_FIRST, COL_IDX_C_MIDDLE,
COL_IDX_C_LAST, COL_IDX_C_BALANCE};
std::vector<oid_t> customer_key_column_ids = {COL_IDX_C_W_ID, COL_IDX_C_D_ID, COL_IDX_C_LAST};
std::vector<ExpressionType> customer_expr_types;
std::vector<type::Value > customer_key_values;
std::vector<expression::AbstractExpression *> runtime_keys;
customer_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetIntegerValue(w_id).Copy());
customer_expr_types.push_back(
ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetIntegerValue(d_id).Copy());
customer_expr_types.push_back(
ExpressionType::COMPARE_EQUAL);
customer_key_values.push_back(type::ValueFactory::GetVarcharValue(c_last).Copy());
auto customer_skey_index = customer_table->GetIndexWithOid(customer_table_skey_index_oid);
planner::IndexScanPlan::IndexScanDesc customer_index_scan_desc(customer_skey_index, customer_key_column_ids, customer_expr_types,
customer_key_values, runtime_keys);
auto predicate = nullptr;
planner::IndexScanPlan customer_index_scan_node(customer_table, predicate,
customer_column_ids, customer_index_scan_desc);
executor::IndexScanExecutor customer_index_scan_executor(&customer_index_scan_node, context.get());
// Construct order by executor
std::vector<oid_t> sort_keys = {1};
std::vector<bool> descend_flags = {false};
std::vector<oid_t> output_columns = {0,1,2,3,4};
planner::OrderByPlan customer_order_by_node(sort_keys, descend_flags, output_columns);
executor::OrderByExecutor customer_order_by_executor(&customer_order_by_node, context.get());
customer_order_by_executor.AddChild(&customer_index_scan_executor);
auto result = ExecuteRead(&customer_order_by_executor);
if (txn->GetResult() != ResultType::SUCCESS) {
txn_manager.AbortTransaction(txn);
return false;
}
PL_ASSERT(result.size() > 0);
// Get the middle one
size_t name_count = result.size();
auto &customer = result[name_count/2];
PL_ASSERT(customer.size() > 0);
c_id = type::ValuePeeker::PeekInteger(customer[0]);
}
if (c_id < 0) {
LOG_ERROR("wrong c_id");
PL_ASSERT(false);
}
LOG_TRACE("getLastOrder: SELECT O_ID, O_CARRIER_ID, O_ENTRY_D FROM ORDERS WHERE O_W_ID = ? AND O_D_ID = ? AND O_C_ID = ? ORDER BY O_ID DESC LIMIT 1, # w_id, d_id, c_id");
// Construct index scan executor
std::vector<oid_t> orders_column_ids = {COL_IDX_O_ID
, COL_IDX_O_CARRIER_ID, COL_IDX_O_ENTRY_D};
std::vector<oid_t> orders_key_column_ids = {COL_IDX_O_W_ID, COL_IDX_O_D_ID, COL_IDX_O_C_ID};
std::vector<ExpressionType> orders_expr_types;
std::vector<type::Value > orders_key_values;
std::vector<expression::AbstractExpression *> runtime_keys;
orders_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
orders_key_values.push_back(type::ValueFactory::GetIntegerValue(w_id).Copy());
orders_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
orders_key_values.push_back(type::ValueFactory::GetIntegerValue(d_id).Copy());
orders_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
orders_key_values.push_back(type::ValueFactory::GetIntegerValue(c_id).Copy());
// Get the index
auto orders_skey_index = orders_table->GetIndexWithOid(orders_table_skey_index_oid);
planner::IndexScanPlan::IndexScanDesc orders_index_scan_desc(
orders_skey_index, orders_key_column_ids, orders_expr_types,
orders_key_values, runtime_keys);
auto predicate = nullptr;
planner::IndexScanPlan orders_index_scan_node(orders_table,
predicate, orders_column_ids, orders_index_scan_desc);
executor::IndexScanExecutor orders_index_scan_executor(
&orders_index_scan_node, context.get());
// Construct order by executor
std::vector<oid_t> sort_keys = {0};
std::vector<bool> descend_flags = {true};
std::vector<oid_t> output_columns = {0,1,2};
planner::OrderByPlan orders_order_by_node(sort_keys, descend_flags, output_columns);
executor::OrderByExecutor orders_order_by_executor(&orders_order_by_node, context.get());
orders_order_by_executor.AddChild(&orders_index_scan_executor);
// Construct limit executor
size_t limit = 1;
size_t offset = 0;
planner::LimitPlan limit_node(limit, offset);
executor::LimitExecutor limit_executor(&limit_node, context.get());
limit_executor.AddChild(&orders_order_by_executor);
auto orders = ExecuteRead(&orders_order_by_executor);
if (txn->GetResult() != ResultType::SUCCESS) {
txn_manager.AbortTransaction(txn);
return false;
}
if (orders.size() != 0) {
LOG_TRACE("getOrderLines: SELECT OL_SUPPLY_W_ID, OL_I_ID, OL_QUANTITY, OL_AMOUNT, OL_DELIVERY_D FROM ORDER_LINE WHERE OL_W_ID = ? AND OL_D_ID = ? AND OL_O_ID = ?, # w_id, d_id, o_id");
// Construct index scan executor
std::vector<oid_t> order_line_column_ids = {COL_IDX_OL_SUPPLY_W_ID, COL_IDX_OL_I_ID, COL_IDX_OL_QUANTITY, COL_IDX_OL_AMOUNT, COL_IDX_OL_DELIVERY_D};
std::vector<oid_t> order_line_key_column_ids = {COL_IDX_OL_W_ID, COL_IDX_OL_D_ID, COL_IDX_OL_O_ID};
std::vector<ExpressionType> order_line_expr_types;
std::vector<type::Value > order_line_key_values;
order_line_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
order_line_key_values.push_back(type::ValueFactory::GetIntegerValue(w_id).Copy());
order_line_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
order_line_key_values.push_back(type::ValueFactory::GetIntegerValue(d_id).Copy());
order_line_expr_types.push_back(ExpressionType::COMPARE_EQUAL);
order_line_key_values.push_back(orders[0][0]);
auto order_line_skey_index = order_line_table->GetIndexWithOid(order_line_table_skey_index_oid);
planner::IndexScanPlan::IndexScanDesc order_line_index_scan_desc(
order_line_skey_index, order_line_key_column_ids, order_line_expr_types,
order_line_key_values, runtime_keys);
predicate = nullptr;
planner::IndexScanPlan order_line_index_scan_node(order_line_table,
predicate, order_line_column_ids, order_line_index_scan_desc);
executor::IndexScanExecutor order_line_index_scan_executor(&order_line_index_scan_node, context.get());
ExecuteRead(&order_line_index_scan_executor);
if (txn->GetResult() != ResultType::SUCCESS) {
txn_manager.AbortTransaction(txn);
return false;
}
}
PL_ASSERT(txn->GetResult() == ResultType::SUCCESS);
auto result = txn_manager.CommitTransaction(txn);
if (result == ResultType::SUCCESS) {
return true;
} else {
return false;
}
}
}
}
}
| wangziqi2016/peloton | src/main/tpcc/tpcc_order_status.cpp | C++ | apache-2.0 | 12,658 |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from geecheck_tests import common
class TestVersion(unittest.TestCase):
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
def testFusionVersion(self):
"""Check if Fusion release is the latest available."""
latest_version = common.GetLatestVersion()
fusion_version = common.GetFusionVersion()
error_msg = ('Running Fusion version %s. Upgrade to version %s.' %
(fusion_version, latest_version))
self.assertEqual(fusion_version, latest_version, msg=error_msg)
print ('Currently running the latest version of Fusion (%s).' %
fusion_version)
@unittest.skipUnless(common.IsGeeServerInstalled(),
'GEE Server is not installed')
def testGeeServerVersion(self):
"""Check if GEE Server release is the latest available."""
latest_version = common.GetLatestVersion()
gee_server_version = common.GetGeeServerVersion()
error_msg = ('Running GEE Server version %s. Upgrade to (%s).' %
(gee_server_version, latest_version))
self.assertEqual(gee_server_version, latest_version, msg=error_msg)
print ('Currently running the latest version of GEE Server (%s).' %
gee_server_version)
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
@unittest.skipUnless(common.IsGeeServerInstalled(),
'GEE Server is not installed')
def testFusionVersionsMatch(self):
"""Check Fusion and server versions are aligned."""
fusion_version = common.GetFusionVersion()
gee_server_version = common.GetGeeServerVersion()
error_msg = ('Fusion and GEE Server versions DO NOT match. '
'Currently running Fusion v. %s and GEE Server v. %s.' %
(fusion_version, gee_server_version))
self.assertEqual(fusion_version, gee_server_version, msg=error_msg)
print 'Fusion and GEE Server versions match. Current version is %s.' % (
fusion_version)
if __name__ == '__main__':
unittest.main()
| tst-ahernandez/earthenterprise | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/fusion_version_test.py | Python | apache-2.0 | 2,649 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.vcloud;
import static org.jclouds.reflect.Reflection2.method;
import static org.testng.Assert.assertEquals;
import java.io.IOException;
import org.jclouds.http.HttpRequest;
import org.jclouds.http.functions.ParseSax;
import org.jclouds.providers.AnonymousProviderMetadata;
import org.jclouds.providers.ProviderMetadata;
import org.jclouds.rest.internal.BaseRestAnnotationProcessingTest;
import org.jclouds.rest.internal.GeneratedHttpRequest;
import org.jclouds.vcloud.xml.SupportedVersionsHandler;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.reflect.Invokable;
/**
* Tests behavior of {@code VCloudVersionsApi}
*/
// NOTE:without testName, this will not call @Before* and fail w/NPE during surefire
@Test(groups = "unit", testName = "VCloudVersionsApiTest")
public class VCloudVersionsApiTest extends BaseRestAnnotationProcessingTest<VCloudVersionsApi> {
public void testVersions() throws SecurityException, NoSuchMethodException, IOException {
Invokable<?, ?> method = method(VCloudVersionsApi.class, "getSupportedVersions");
GeneratedHttpRequest request = processor.createRequest(method, ImmutableList.of());
assertEquals(request.getRequestLine(), "GET http://localhost:8080/versions HTTP/1.1");
assertNonPayloadHeadersEqual(request, "");
assertPayloadEquals(request, null, null, false);
assertResponseParserClassEquals(method, request, ParseSax.class);
assertSaxResponseParserClassEquals(method, SupportedVersionsHandler.class);
assertFallbackClassEquals(method, null);
checkFilters(request);
}
@Override
protected void checkFilters(HttpRequest request) {
assertEquals(request.getFilters().size(), 1);
}
@Override
protected ProviderMetadata createProviderMetadata() {
return AnonymousProviderMetadata.forApiOnEndpoint(VCloudVersionsApi.class,
"http://localhost:8080");
}
}
| asankasanjaya/stratos | dependencies/jclouds/apis/vcloud/1.8.1-stratos/src/test/java/org/jclouds/vcloud/VCloudVersionsApiTest.java | Java | apache-2.0 | 2,778 |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.intellij.lang.regexp;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.util.ClassExtension;
import com.intellij.psi.PsiComment;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import org.intellij.lang.regexp.psi.*;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
/**
* @author yole
*/
public final class RegExpLanguageHosts extends ClassExtension<RegExpLanguageHost> {
private static final RegExpLanguageHosts INSTANCE = new RegExpLanguageHosts();
private final DefaultRegExpPropertiesProvider myDefaultProvider;
private static RegExpLanguageHost myHost;
public static RegExpLanguageHosts getInstance() {
return INSTANCE;
}
private RegExpLanguageHosts() {
super("com.intellij.regExpLanguageHost");
myDefaultProvider = DefaultRegExpPropertiesProvider.getInstance();
}
@TestOnly
public static void setRegExpHost(@Nullable RegExpLanguageHost host) {
myHost = host;
}
@Contract("null -> null")
@Nullable
private static RegExpLanguageHost findRegExpHost(@Nullable final PsiElement element) {
if (element == null) {
return null;
}
if (ApplicationManager.getApplication().isUnitTestMode() && myHost != null) {
return myHost;
}
final PsiFile file = element.getContainingFile();
final PsiElement context = file.getContext();
if (context instanceof RegExpLanguageHost) {
return (RegExpLanguageHost)context;
}
if (context != null) {
return INSTANCE.forClass(context.getClass());
}
return null;
}
public boolean isRedundantEscape(@NotNull final RegExpChar ch, @NotNull final String text) {
if (text.length() <= 1) {
return false;
}
final RegExpLanguageHost host = findRegExpHost(ch);
if (host != null) {
final char c = text.charAt(1);
return !host.characterNeedsEscaping(c);
}
else {
return !("\\]".equals(text) || "\\}".equals(text));
}
}
public boolean supportsInlineOptionFlag(char flag, PsiElement context) {
final RegExpLanguageHost host = findRegExpHost(context);
return host == null || host.supportsInlineOptionFlag(flag, context);
}
public boolean supportsExtendedHexCharacter(@Nullable RegExpChar regExpChar) {
final RegExpLanguageHost host = findRegExpHost(regExpChar);
try {
return host != null && host.supportsExtendedHexCharacter(regExpChar);
} catch (AbstractMethodError e) {
// supportsExtendedHexCharacter not present
return false;
}
}
public boolean supportsLiteralBackspace(@Nullable RegExpChar regExpChar) {
final RegExpLanguageHost host = findRegExpHost(regExpChar);
return host != null && host.supportsLiteralBackspace(regExpChar);
}
public boolean supportsNamedGroupSyntax(@Nullable final RegExpGroup group) {
final RegExpLanguageHost host = findRegExpHost(group);
return host != null && host.supportsNamedGroupSyntax(group);
}
public boolean supportsNamedGroupRefSyntax(@Nullable final RegExpNamedGroupRef ref) {
final RegExpLanguageHost host = findRegExpHost(ref);
try {
return host != null && host.supportsNamedGroupRefSyntax(ref);
} catch (AbstractMethodError e) {
// supportsNamedGroupRefSyntax() not present
return false;
}
}
public boolean isValidGroupName(String name, @Nullable final PsiElement context) {
final RegExpLanguageHost host = findRegExpHost(context);
return host != null && host.isValidGroupName(name, context);
}
public boolean supportsPerl5EmbeddedComments(@Nullable final PsiComment comment) {
final RegExpLanguageHost host = findRegExpHost(comment);
return host != null && host.supportsPerl5EmbeddedComments();
}
public boolean supportsPythonConditionalRefs(@Nullable final RegExpPyCondRef condRef) {
final RegExpLanguageHost host = findRegExpHost(condRef);
return host != null && host.supportsPythonConditionalRefs();
}
public boolean supportsPossessiveQuantifiers(@Nullable final RegExpQuantifier quantifier) {
final RegExpLanguageHost host = findRegExpHost(quantifier);
return host == null || host.supportsPossessiveQuantifiers();
}
public boolean supportsBoundary(@Nullable final RegExpBoundary boundary) {
final RegExpLanguageHost host = findRegExpHost(boundary);
return host == null || host.supportsBoundary(boundary);
}
public boolean supportsSimpleClass(@Nullable final RegExpSimpleClass simpleClass) {
final RegExpLanguageHost host = findRegExpHost(simpleClass);
return host == null || host.supportsSimpleClass(simpleClass);
}
public boolean isValidCategory(@NotNull final PsiElement element, @NotNull String category) {
final RegExpLanguageHost host = findRegExpHost(element);
return host != null ? host.isValidCategory(category) : myDefaultProvider.isValidCategory(category);
}
public boolean supportsNamedCharacters(@NotNull final RegExpNamedCharacter namedCharacter) {
final RegExpLanguageHost host = findRegExpHost(namedCharacter);
return host != null && host.supportsNamedCharacters(namedCharacter);
}
public boolean isValidNamedCharacter(@NotNull final RegExpNamedCharacter namedCharacter) {
final RegExpLanguageHost host = findRegExpHost(namedCharacter);
return host != null && host.isValidNamedCharacter(namedCharacter);
}
@NotNull
public String[][] getAllKnownProperties(@NotNull final PsiElement element) {
final RegExpLanguageHost host = findRegExpHost(element);
return host != null ? host.getAllKnownProperties() : myDefaultProvider.getAllKnownProperties();
}
@Nullable
String getPropertyDescription(@NotNull final PsiElement element, @Nullable final String name) {
final RegExpLanguageHost host = findRegExpHost(element);
return host != null ? host.getPropertyDescription(name) : myDefaultProvider.getPropertyDescription(name);
}
@NotNull
String[][] getKnownCharacterClasses(@NotNull final PsiElement element) {
final RegExpLanguageHost host = findRegExpHost(element);
return host != null ? host.getKnownCharacterClasses() : myDefaultProvider.getKnownCharacterClasses();
}
String[][] getPosixCharacterClasses(@NotNull final PsiElement element) {
return myDefaultProvider.getPosixCharacterClasses();
}
}
| michaelgallacher/intellij-community | RegExpSupport/src/org/intellij/lang/regexp/RegExpLanguageHosts.java | Java | apache-2.0 | 7,023 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.index.sasi.disk;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
import org.apache.cassandra.db.BufferDecoratedKey;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.dht.Murmur3Partitioner;
import org.apache.cassandra.index.sasi.disk.TokenTreeBuilder.EntryType;
import org.apache.cassandra.index.sasi.utils.CombinedValue;
import org.apache.cassandra.index.sasi.utils.MappedBuffer;
import org.apache.cassandra.index.sasi.utils.RangeIterator;
import org.apache.cassandra.db.marshal.LongType;
import org.apache.cassandra.io.compress.BufferType;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.utils.MurmurHash;
import org.apache.cassandra.utils.Pair;
import org.apache.cassandra.io.util.RandomAccessReader;
import org.apache.cassandra.io.util.SequentialWriter;
import junit.framework.Assert;
import org.junit.Test;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import com.carrotsearch.hppc.LongOpenHashSet;
import com.carrotsearch.hppc.LongSet;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.google.common.base.Function;
public class TokenTreeTest
{
private static final Function<Long, DecoratedKey> KEY_CONVERTER = new KeyConverter();
static LongSet singleOffset = new LongOpenHashSet() {{ add(1); }};
static LongSet bigSingleOffset = new LongOpenHashSet() {{ add(((long) Integer.MAX_VALUE) + 10); }};
static LongSet shortPackableCollision = new LongOpenHashSet() {{ add(2L); add(3L); }}; // can pack two shorts
static LongSet intPackableCollision = new LongOpenHashSet() {{ add(6L); add(((long) Short.MAX_VALUE) + 1); }}; // can pack int & short
static LongSet multiCollision = new LongOpenHashSet() {{ add(3L); add(4L); add(5L); }}; // can't pack
static LongSet unpackableCollision = new LongOpenHashSet() {{ add(((long) Short.MAX_VALUE) + 1); add(((long) Short.MAX_VALUE) + 2); }}; // can't pack
final static SortedMap<Long, LongSet> simpleTokenMap = new TreeMap<Long, LongSet>()
{{
put(1L, bigSingleOffset); put(3L, shortPackableCollision); put(4L, intPackableCollision); put(6L, singleOffset);
put(9L, multiCollision); put(10L, unpackableCollision); put(12L, singleOffset); put(13L, singleOffset);
put(15L, singleOffset); put(16L, singleOffset); put(20L, singleOffset); put(22L, singleOffset);
put(25L, singleOffset); put(26L, singleOffset); put(27L, singleOffset); put(28L, singleOffset);
put(40L, singleOffset); put(50L, singleOffset); put(100L, singleOffset); put(101L, singleOffset);
put(102L, singleOffset); put(103L, singleOffset); put(108L, singleOffset); put(110L, singleOffset);
put(112L, singleOffset); put(115L, singleOffset); put(116L, singleOffset); put(120L, singleOffset);
put(121L, singleOffset); put(122L, singleOffset); put(123L, singleOffset); put(125L, singleOffset);
}};
final static SortedMap<Long, LongSet> bigTokensMap = new TreeMap<Long, LongSet>()
{{
for (long i = 0; i < 1000000; i++)
put(i, singleOffset);
}};
final static SortedMap<Long, LongSet> collidingTokensMap = new TreeMap<Long, LongSet>()
{{
put(1L, singleOffset); put(7L, singleOffset); put(8L, singleOffset);
}};
final static SortedMap<Long, LongSet> tokens = bigTokensMap;
@Test
public void buildAndIterate() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final Iterator<Pair<Long, LongSet>> tokenIterator = builder.iterator();
final Iterator<Map.Entry<Long, LongSet>> listIterator = tokens.entrySet().iterator();
while (tokenIterator.hasNext() && listIterator.hasNext())
{
Pair<Long, LongSet> tokenNext = tokenIterator.next();
Map.Entry<Long, LongSet> listNext = listIterator.next();
Assert.assertEquals(listNext.getKey(), tokenNext.left);
Assert.assertEquals(listNext.getValue(), tokenNext.right);
}
Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
Assert.assertFalse("list iterator not finished", listIterator.hasNext());
}
@Test
public void buildWithMultipleMapsAndIterate() throws Exception
{
final SortedMap<Long, LongSet> merged = new TreeMap<>();
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
builder.add(collidingTokensMap);
merged.putAll(collidingTokensMap);
for (Map.Entry<Long, LongSet> entry : simpleTokenMap.entrySet())
{
if (merged.containsKey(entry.getKey()))
{
LongSet mergingOffsets = entry.getValue();
LongSet existingOffsets = merged.get(entry.getKey());
if (mergingOffsets.equals(existingOffsets))
continue;
Set<Long> mergeSet = new HashSet<>();
for (LongCursor merging : mergingOffsets)
mergeSet.add(merging.value);
for (LongCursor existing : existingOffsets)
mergeSet.add(existing.value);
LongSet mergedResults = new LongOpenHashSet();
for (Long result : mergeSet)
mergedResults.add(result);
merged.put(entry.getKey(), mergedResults);
}
else
{
merged.put(entry.getKey(), entry.getValue());
}
}
final Iterator<Pair<Long, LongSet>> tokenIterator = builder.iterator();
final Iterator<Map.Entry<Long, LongSet>> listIterator = merged.entrySet().iterator();
while (tokenIterator.hasNext() && listIterator.hasNext())
{
Pair<Long, LongSet> tokenNext = tokenIterator.next();
Map.Entry<Long, LongSet> listNext = listIterator.next();
Assert.assertEquals(listNext.getKey(), tokenNext.left);
Assert.assertEquals(listNext.getValue(), tokenNext.right);
}
Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
Assert.assertFalse("list iterator not finished", listIterator.hasNext());
}
@Test
public void testSerializedSize() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final File treeFile = File.createTempFile("token-tree-size-test", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, 4096, BufferType.ON_HEAP))
{
builder.write(writer);
writer.sync();
}
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
Assert.assertEquals((int) reader.bytesRemaining(), builder.serializedSize());
}
@Test
public void buildSerializeAndIterate() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-iterate-test1", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, 4096, BufferType.ON_HEAP))
{
builder.write(writer);
writer.sync();
}
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final TokenTree tokenTree = new TokenTree(new MappedBuffer(reader));
final Iterator<Token> tokenIterator = tokenTree.iterator(KEY_CONVERTER);
final Iterator<Map.Entry<Long, LongSet>> listIterator = simpleTokenMap.entrySet().iterator();
while (tokenIterator.hasNext() && listIterator.hasNext())
{
Token treeNext = tokenIterator.next();
Map.Entry<Long, LongSet> listNext = listIterator.next();
Assert.assertEquals(listNext.getKey(), treeNext.get());
Assert.assertEquals(convert(listNext.getValue()), convert(treeNext));
}
Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
Assert.assertFalse("list iterator not finished", listIterator.hasNext());
reader.close();
}
@Test
public void buildSerializeAndGet() throws Exception
{
final long tokMin = 0;
final long tokMax = 1000;
final TokenTree tokenTree = generateTree(tokMin, tokMax);
for (long i = 0; i <= tokMax; i++)
{
TokenTree.OnDiskToken result = tokenTree.get(i, KEY_CONVERTER);
Assert.assertNotNull("failed to find object for token " + i, result);
Set<Long> found = result.getOffsets();
Assert.assertEquals(1, found.size());
Assert.assertEquals(i, found.toArray()[0]);
}
Assert.assertNull("found missing object", tokenTree.get(tokMax + 10, KEY_CONVERTER));
}
@Test
public void buildSerializeIterateAndSkip() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(tokens).finish();
final File treeFile = File.createTempFile("token-tree-iterate-test2", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, 4096, BufferType.ON_HEAP))
{
builder.write(writer);
writer.sync();
}
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final TokenTree tokenTree = new TokenTree(new MappedBuffer(reader));
final RangeIterator<Long, Token> treeIterator = tokenTree.iterator(KEY_CONVERTER);
final RangeIterator<Long, TokenWithOffsets> listIterator = new EntrySetSkippableIterator(tokens);
long lastToken = 0L;
while (treeIterator.hasNext() && lastToken < 12)
{
Token treeNext = treeIterator.next();
TokenWithOffsets listNext = listIterator.next();
Assert.assertEquals(listNext.token, (lastToken = treeNext.get()));
Assert.assertEquals(convert(listNext.offsets), convert(treeNext));
}
treeIterator.skipTo(100548L);
listIterator.skipTo(100548L);
while (treeIterator.hasNext() && listIterator.hasNext())
{
Token treeNext = treeIterator.next();
TokenWithOffsets listNext = listIterator.next();
Assert.assertEquals(listNext.token, (long) treeNext.get());
Assert.assertEquals(convert(listNext.offsets), convert(treeNext));
}
Assert.assertFalse("Tree iterator not completed", treeIterator.hasNext());
Assert.assertFalse("List iterator not completed", listIterator.hasNext());
reader.close();
}
@Test
public void skipPastEnd() throws Exception
{
final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
final File treeFile = File.createTempFile("token-tree-skip-past-test", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, 4096, BufferType.ON_HEAP))
{
builder.write(writer);
writer.sync();
}
final RandomAccessReader reader = RandomAccessReader.open(treeFile);
final RangeIterator<Long, Token> tokenTree = new TokenTree(new MappedBuffer(reader)).iterator(KEY_CONVERTER);
tokenTree.skipTo(simpleTokenMap.lastKey() + 10);
}
@Test
public void testTokenMerge() throws Exception
{
final long min = 0, max = 1000;
// two different trees with the same offsets
TokenTree treeA = generateTree(min, max);
TokenTree treeB = generateTree(min, max);
RangeIterator<Long, Token> a = treeA.iterator(new KeyConverter());
RangeIterator<Long, Token> b = treeB.iterator(new KeyConverter());
long count = min;
while (a.hasNext() && b.hasNext())
{
final Token tokenA = a.next();
final Token tokenB = b.next();
// merging of two OnDiskToken
tokenA.merge(tokenB);
// merging with RAM Token with different offset
tokenA.merge(new TokenWithOffsets(tokenA.get(), convert(count + 1)));
// and RAM token with the same offset
tokenA.merge(new TokenWithOffsets(tokenA.get(), convert(count)));
// should fail when trying to merge different tokens
try
{
tokenA.merge(new TokenWithOffsets(tokenA.get() + 1, convert(count)));
Assert.fail();
}
catch (IllegalArgumentException e)
{
// expected
}
final Set<Long> offsets = new TreeSet<>();
for (DecoratedKey key : tokenA)
offsets.add(LongType.instance.compose(key.getKey()));
Set<Long> expected = new TreeSet<>();
{
expected.add(count);
expected.add(count + 1);
}
Assert.assertEquals(expected, offsets);
count++;
}
Assert.assertEquals(max, count - 1);
}
@Test
public void testEntryTypeOrdinalLookup()
{
Assert.assertEquals(EntryType.SIMPLE, EntryType.of(EntryType.SIMPLE.ordinal()));
Assert.assertEquals(EntryType.PACKED, EntryType.of(EntryType.PACKED.ordinal()));
Assert.assertEquals(EntryType.FACTORED, EntryType.of(EntryType.FACTORED.ordinal()));
Assert.assertEquals(EntryType.OVERFLOW, EntryType.of(EntryType.OVERFLOW.ordinal()));
}
private static class EntrySetSkippableIterator extends RangeIterator<Long, TokenWithOffsets>
{
private final PeekingIterator<Map.Entry<Long, LongSet>> elements;
EntrySetSkippableIterator(SortedMap<Long, LongSet> elms)
{
super(elms.firstKey(), elms.lastKey(), elms.size());
elements = Iterators.peekingIterator(elms.entrySet().iterator());
}
@Override
public TokenWithOffsets computeNext()
{
if (!elements.hasNext())
return endOfData();
Map.Entry<Long, LongSet> next = elements.next();
return new TokenWithOffsets(next.getKey(), next.getValue());
}
@Override
protected void performSkipTo(Long nextToken)
{
while (elements.hasNext())
{
if (Long.compare(elements.peek().getKey(), nextToken) >= 0)
{
break;
}
elements.next();
}
}
@Override
public void close() throws IOException
{
// nothing to do here
}
}
public static class TokenWithOffsets extends Token
{
private final LongSet offsets;
public TokenWithOffsets(long token, final LongSet offsets)
{
super(token);
this.offsets = offsets;
}
@Override
public void merge(CombinedValue<Long> other)
{}
@Override
public int compareTo(CombinedValue<Long> o)
{
return Long.compare(token, o.get());
}
@Override
public boolean equals(Object other)
{
if (!(other instanceof TokenWithOffsets))
return false;
TokenWithOffsets o = (TokenWithOffsets) other;
return token == o.token && offsets.equals(o.offsets);
}
@Override
public int hashCode()
{
return new HashCodeBuilder().append(token).build();
}
@Override
public String toString()
{
return String.format("TokenValue(token: %d, offsets: %s)", token, offsets);
}
@Override
public Iterator<DecoratedKey> iterator()
{
List<DecoratedKey> keys = new ArrayList<>(offsets.size());
for (LongCursor offset : offsets)
keys.add(dk(offset.value));
return keys.iterator();
}
}
private static Set<DecoratedKey> convert(LongSet offsets)
{
Set<DecoratedKey> keys = new HashSet<>();
for (LongCursor offset : offsets)
keys.add(KEY_CONVERTER.apply(offset.value));
return keys;
}
private static Set<DecoratedKey> convert(Token results)
{
Set<DecoratedKey> keys = new HashSet<>();
for (DecoratedKey key : results)
keys.add(key);
return keys;
}
private static LongSet convert(long... values)
{
LongSet result = new LongOpenHashSet(values.length);
for (long v : values)
result.add(v);
return result;
}
private static class KeyConverter implements Function<Long, DecoratedKey>
{
@Override
public DecoratedKey apply(Long offset)
{
return dk(offset);
}
}
private static DecoratedKey dk(Long token)
{
ByteBuffer buf = ByteBuffer.allocate(8);
buf.putLong(token);
buf.flip();
Long hashed = MurmurHash.hash2_64(buf, buf.position(), buf.remaining(), 0);
return new BufferDecoratedKey(new Murmur3Partitioner.LongToken(hashed), buf);
}
private static TokenTree generateTree(final long minToken, final long maxToken) throws IOException
{
final SortedMap<Long, LongSet> toks = new TreeMap<Long, LongSet>()
{{
for (long i = minToken; i <= maxToken; i++)
{
LongSet offsetSet = new LongOpenHashSet();
offsetSet.add(i);
put(i, offsetSet);
}
}};
final TokenTreeBuilder builder = new TokenTreeBuilder(toks).finish();
final File treeFile = File.createTempFile("token-tree-get-test", "tt");
treeFile.deleteOnExit();
try (SequentialWriter writer = new SequentialWriter(treeFile, 4096, BufferType.ON_HEAP))
{
builder.write(writer);
writer.sync();
}
RandomAccessReader reader = null;
try
{
reader = RandomAccessReader.open(treeFile);
return new TokenTree(new MappedBuffer(reader));
}
finally
{
FileUtils.closeQuietly(reader);
}
}
}
| newrelic-forks/cassandra | test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java | Java | apache-2.0 | 19,354 |
//// [staticAsIdentifier.ts]
class C1 {
static static
[x: string]: string;
}
class C2 {
static static
m() {}
}
class C3 {
static static p: string;
}
class C4 {
static static foo() {}
}
//// [staticAsIdentifier.js]
var C1 = /** @class */ (function () {
function C1() {
}
return C1;
}());
var C2 = /** @class */ (function () {
function C2() {
}
C2.prototype.m = function () { };
return C2;
}());
var C3 = /** @class */ (function () {
function C3() {
}
return C3;
}());
var C4 = /** @class */ (function () {
function C4() {
}
C4.foo = function () { };
return C4;
}());
| kitsonk/TypeScript | tests/baselines/reference/staticAsIdentifier.js | JavaScript | apache-2.0 | 677 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.ComponentModel.Composition;
using System.Linq;
using Microsoft.CodeAnalysis.Editor.Commands;
using Microsoft.CodeAnalysis.Editor.Host;
using Microsoft.CodeAnalysis.Editor.Shared.Extensions;
using Microsoft.CodeAnalysis.Editor.Shared.Options;
using Microsoft.CodeAnalysis.ErrorReporting;
using Microsoft.CodeAnalysis.FindReferences;
using Microsoft.CodeAnalysis.Internal.Log;
using Microsoft.CodeAnalysis.Shared.TestHooks;
using Microsoft.CodeAnalysis.Text;
using Microsoft.VisualStudio.Text;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.Editor.Implementation.FindReferences
{
[ExportCommandHandler(PredefinedCommandHandlerNames.FindReferences, ContentTypeNames.RoslynContentType)]
internal class FindReferencesCommandHandler : ICommandHandler<FindReferencesCommandArgs>
{
private readonly IEnumerable<IDefinitionsAndReferencesPresenter> _synchronousPresenters;
private readonly IEnumerable<Lazy<IStreamingFindReferencesPresenter>> _streamingPresenters;
private readonly IWaitIndicator _waitIndicator;
private readonly IAsynchronousOperationListener _asyncListener;
[ImportingConstructor]
internal FindReferencesCommandHandler(
IWaitIndicator waitIndicator,
[ImportMany] IEnumerable<IDefinitionsAndReferencesPresenter> synchronousPresenters,
[ImportMany] IEnumerable<Lazy<IStreamingFindReferencesPresenter>> streamingPresenters,
[ImportMany] IEnumerable<Lazy<IAsynchronousOperationListener, FeatureMetadata>> asyncListeners)
{
Contract.ThrowIfNull(synchronousPresenters);
Contract.ThrowIfNull(streamingPresenters);
Contract.ThrowIfNull(asyncListeners);
_waitIndicator = waitIndicator;
_synchronousPresenters = synchronousPresenters;
_streamingPresenters = streamingPresenters;
_asyncListener = new AggregateAsynchronousOperationListener(
asyncListeners, FeatureAttribute.FindReferences);
}
public CommandState GetCommandState(FindReferencesCommandArgs args, Func<CommandState> nextHandler)
{
return nextHandler();
}
public void ExecuteCommand(FindReferencesCommandArgs args, Action nextHandler)
{
var caretPosition = args.TextView.GetCaretPoint(args.SubjectBuffer) ?? -1;
if (caretPosition >= 0)
{
var snapshot = args.SubjectBuffer.CurrentSnapshot;
var document = snapshot.GetOpenDocumentInCurrentContextWithChanges();
if (document != null)
{
if (TryExecuteCommand(caretPosition, document))
{
return;
}
}
}
nextHandler();
}
private bool TryExecuteCommand(int caretPosition, Document document)
{
var streamingService = document.Project.LanguageServices.GetService<IStreamingFindReferencesService>();
var synchronousService = document.Project.LanguageServices.GetService<IFindReferencesService>();
var streamingPresenter = GetStreamingPresenter();
// See if we're running on a host that can provide streaming results.
// We'll both need a FAR service that can stream results to us, and
// a presenter that can accept streamed results.
var streamingEnabled = document.Options.GetOption(FeatureOnOffOptions.StreamingFindReferences);
if (streamingEnabled && streamingService != null && streamingPresenter != null)
{
StreamingFindReferences(document, streamingService, streamingPresenter, caretPosition);
return true;
}
// Otherwise, either the language doesn't support streaming results,
// or the host has no way to present results in a sreaming manner.
// Fall back to the old non-streaming approach to finding and presenting
// results.
if (synchronousService != null)
{
FindReferences(document, synchronousService, caretPosition);
return true;
}
return false;
}
private IStreamingFindReferencesPresenter GetStreamingPresenter()
{
try
{
return _streamingPresenters.FirstOrDefault()?.Value;
}
catch
{
return null;
}
}
private async void StreamingFindReferences(
Document document, IStreamingFindReferencesService service,
IStreamingFindReferencesPresenter presenter, int caretPosition)
{
try
{
using (var token = _asyncListener.BeginAsyncOperation(nameof(StreamingFindReferences)))
{
// Let the presented know we're starging a search. It will give us back
// the context object that the FAR service will push results into.
var context = presenter.StartSearch();
await service.FindReferencesAsync(document, caretPosition, context).ConfigureAwait(false);
// Note: we don't need to put this in a finally. The only time we might not hit
// this is if cancellation or another error gets thrown. In the former case,
// that means that a new search has started. We don't care about telling the
// context it has completed. In the latter case somethign wrong has happened
// and we don't want to run any more code code in this particular context.
context.OnCompleted();
}
}
catch (Exception e) when (FatalError.ReportUnlessCanceled(e))
{
}
}
internal void FindReferences(
Document document, IFindReferencesService service, int caretPosition)
{
_waitIndicator.Wait(
title: EditorFeaturesResources.Find_References,
message: EditorFeaturesResources.Finding_references,
action: context =>
{
using (Logger.LogBlock(FunctionId.CommandHandler_FindAllReference, context.CancellationToken))
{
if (!service.TryFindReferences(document, caretPosition, context))
{
// The service failed, so just present an empty list of references
foreach (var presenter in _synchronousPresenters)
{
presenter.DisplayResult(DefinitionsAndReferences.Empty);
return;
}
}
}
}, allowCancel: true);
}
}
} | Shiney/roslyn | src/EditorFeatures/Core/Implementation/FindReferences/FindReferencesCommandHandler.cs | C# | apache-2.0 | 7,265 |
/*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
/**
* Core HTTP/2 configuration APIs.
*/
package org.apache.hc.core5.http2.config;
| apache/httpcore | httpcore5-h2/src/main/java/org/apache/hc/core5/http2/config/package-info.java | Java | apache-2.0 | 1,270 |
// Copyright 2015, EMC, Inc.
'use strict';
var di = require('di'),
util = require('util');
module.exports = apcObmServiceFactory;
di.annotate(apcObmServiceFactory, new di.Provide('apc-obm-service'));
di.annotate(apcObmServiceFactory,
new di.Inject('Promise', 'OBM.base', '_')
);
function apcObmServiceFactory(Promise, BaseObmService, _) {
function ApcObmService(options) {
BaseObmService.call(this, options);
this.requiredKeys = ['community', 'host', 'port'];
this.mibCommand = 'PowerNet-MIB::sPDUOutletCtl';
}
util.inherits(ApcObmService, BaseObmService);
ApcObmService.prototype.reboot = function() {
return this._runInternal([this.mibCommand + '.' + this.options.config.port, 'i', '3']);
};
ApcObmService.prototype.powerOn = function() {
return this._runInternal([this.mibCommand + '.' + this.options.config.port, 'i', '1']);
};
ApcObmService.prototype.powerOff = function() {
return this._runInternal([this.mibCommand + '.' + this.options.config.port, 'i', '2']);
};
ApcObmService.prototype.powerStatus = function() {
return this._runInternal([this.mibCommand + '.' + this.options.config.port], 'snmpwalk')
.then(function (result) {
if (_.contains(result.stdout, 'outletOn')) {
return Promise.resolve(true);
}
if (_.contains(result.stdout, 'outletOff')) {
return Promise.resolve(false);
}
return Promise.reject(
new Error('Unable to determine power state (' + result.stdout + ').')
);
});
};
ApcObmService.prototype._runInternal = function (command, file) {
return this.run({
command: file || 'snmpset',
args: [
'-v2c',
'-c', this.options.config.community,
this.options.config.host
].concat(command)
});
};
ApcObmService.create = function(options) {
return BaseObmService.create(ApcObmService, options);
};
return ApcObmService;
}
| AlaricChan/on-tasks | lib/services/apc-obm-service.js | JavaScript | apache-2.0 | 2,121 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.utils;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.LockSupport;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* This class implements time simulation support. When time simulation is enabled,
* methods on this class will use fixed time. When time simulation is disabled,
* methods will pass through to relevant java.lang.System/java.lang.Thread calls.
* Methods using units higher than nanoseconds will pass through to System.currentTimeMillis().
* Methods supporting nanoseconds will pass through to System.nanoTime().
*/
public final class Time {
private static final Logger LOG = Logger.getLogger(Time.class.getName());
private static final AtomicBoolean SIMULATING = new AtomicBoolean(false);
private static final AtomicLong AUTO_ADVANCE_NANOS_ON_SLEEP = new AtomicLong(0);
private static final Map<Thread, AtomicLong> THREAD_SLEEP_TIMES_NANOS = new ConcurrentHashMap<>();
private static final Object SLEEP_TIMES_LOCK = new Object();
private static final AtomicLong SIMULATED_CURR_TIME_NANOS = new AtomicLong(0);
private Time() {
}
public static boolean isSimulating() {
return SIMULATING.get();
}
public static void sleepUntil(long targetTimeMs) throws InterruptedException {
if (SIMULATING.get()) {
simulatedSleepUntilNanos(millisToNanos(targetTimeMs));
} else {
long sleepTimeMs = targetTimeMs - currentTimeMillis();
if (sleepTimeMs > 0) {
Thread.sleep(sleepTimeMs);
}
}
}
public static void sleepUntilNanos(long targetTimeNanos) throws InterruptedException {
if (SIMULATING.get()) {
simulatedSleepUntilNanos(targetTimeNanos);
} else {
long sleepTimeNanos = targetTimeNanos - nanoTime();
long sleepTimeMs = nanosToMillis(sleepTimeNanos);
int sleepTimeNanosSansMs = (int) (sleepTimeNanos % 1_000_000);
if (sleepTimeNanos > 0) {
Thread.sleep(sleepTimeMs, sleepTimeNanosSansMs);
}
}
}
private static void simulatedSleepUntilNanos(long targetTimeNanos) throws InterruptedException {
try {
synchronized (SLEEP_TIMES_LOCK) {
if (!SIMULATING.get()) {
LOG.log(Level.FINER, Thread.currentThread()
+ " is still sleeping after simulated time disabled.",
new RuntimeException("STACK TRACE"));
throw new InterruptedException();
}
THREAD_SLEEP_TIMES_NANOS.put(Thread.currentThread(), new AtomicLong(targetTimeNanos));
}
while (SIMULATED_CURR_TIME_NANOS.get() < targetTimeNanos) {
synchronized (SLEEP_TIMES_LOCK) {
if (!SIMULATING.get()) {
LOG.log(Level.FINER, Thread.currentThread()
+ " is still sleeping after simulated time disabled.",
new RuntimeException("STACK TRACE"));
throw new InterruptedException();
}
long autoAdvance = AUTO_ADVANCE_NANOS_ON_SLEEP.get();
if (autoAdvance > 0) {
advanceTimeNanos(autoAdvance);
}
}
Thread.sleep(10);
}
} finally {
THREAD_SLEEP_TIMES_NANOS.remove(Thread.currentThread());
}
}
public static void sleep(long ms) throws InterruptedException {
if (ms > 0) {
if (SIMULATING.get()) {
simulatedSleepUntilNanos(millisToNanos(currentTimeMillis() + ms));
} else {
Thread.sleep(ms);
}
}
}
public static void parkNanos(long nanos) throws InterruptedException {
if (nanos > 0) {
if (SIMULATING.get()) {
simulatedSleepUntilNanos(nanoTime() + nanos);
} else {
LockSupport.parkNanos(nanos);
}
}
}
public static void sleepSecs(long secs) throws InterruptedException {
if (secs > 0) {
sleep(secs * 1000);
}
}
public static long nanoTime() {
if (SIMULATING.get()) {
return SIMULATED_CURR_TIME_NANOS.get();
} else {
return System.nanoTime();
}
}
public static long currentTimeMillis() {
if (SIMULATING.get()) {
return nanosToMillis(SIMULATED_CURR_TIME_NANOS.get());
} else {
return System.currentTimeMillis();
}
}
public static long nanosToMillis(long nanos) {
return nanos / 1_000_000;
}
public static long millisToNanos(long millis) {
return millis * 1_000_000;
}
public static long secsToMillis(int secs) {
return 1000 * (long) secs;
}
public static long secsToMillisLong(double secs) {
return (long) (1000 * secs);
}
public static int currentTimeSecs() {
return (int) (currentTimeMillis() / 1000);
}
public static int deltaSecs(int timeInSeconds) {
return Time.currentTimeSecs() - timeInSeconds;
}
public static long deltaMs(long timeInMilliseconds) {
return Time.currentTimeMillis() - timeInMilliseconds;
}
public static void advanceTime(long ms) {
advanceTimeNanos(millisToNanos(ms));
}
public static void advanceTimeNanos(long nanos) {
if (!SIMULATING.get()) {
throw new IllegalStateException("Cannot simulate time unless in simulation mode");
}
if (nanos < 0) {
throw new IllegalArgumentException("advanceTime only accepts positive time as an argument");
}
synchronized (SLEEP_TIMES_LOCK) {
long newTime = SIMULATED_CURR_TIME_NANOS.addAndGet(nanos);
Iterator<AtomicLong> sleepTimesIter = THREAD_SLEEP_TIMES_NANOS.values().iterator();
while (sleepTimesIter.hasNext()) {
AtomicLong curr = sleepTimesIter.next();
if (SIMULATED_CURR_TIME_NANOS.get() >= curr.get()) {
sleepTimesIter.remove();
}
}
LOG.log(Level.FINER, "Advanced simulated time to " + newTime);
}
}
public static void advanceTimeSecs(long secs) {
advanceTime(secs * 1_000);
}
public static boolean isThreadWaiting(Thread t) {
if (!SIMULATING.get()) {
throw new IllegalStateException("Must be in simulation mode");
}
AtomicLong time = THREAD_SLEEP_TIMES_NANOS.get(t);
return !t.isAlive() || time != null && nanoTime() < time.longValue();
}
public static class SimulatedTime implements AutoCloseable {
public SimulatedTime() {
this(null);
}
public SimulatedTime(Number advanceTimeMs) {
synchronized (Time.SLEEP_TIMES_LOCK) {
Time.SIMULATING.set(true);
Time.SIMULATED_CURR_TIME_NANOS.set(0);
Time.THREAD_SLEEP_TIMES_NANOS.clear();
if (advanceTimeMs != null) {
Time.AUTO_ADVANCE_NANOS_ON_SLEEP.set(millisToNanos(advanceTimeMs.longValue()));
} else {
Time.AUTO_ADVANCE_NANOS_ON_SLEEP.set(0);
}
LOG.warning("AutoCloseable Simulated Time Starting...");
}
}
@Override
public void close() {
synchronized (Time.SLEEP_TIMES_LOCK) {
Time.SIMULATING.set(false);
LOG.warning("AutoCloseable Simulated Time Ending...");
}
}
}
}
| mycFelix/heron | storm-compatibility/src/java/org/apache/storm/utils/Time.java | Java | apache-2.0 | 7,892 |
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import flags
from nova import test
from nova.api.openstack import accounts
from nova.auth.manager import User
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
FLAGS.verbose = True
def fake_init(self):
self.manager = fakes.FakeAuthManager()
def fake_admin_check(self, req):
return True
class AccountsTest(test.TestCase):
def setUp(self):
super(AccountsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(accounts.Controller, '__init__',
fake_init)
self.stubs.Set(accounts.Controller, '_check_admin',
fake_admin_check)
fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
FLAGS.allow_admin_api = True
fakemgr = fakes.FakeAuthManager()
joeuser = User('id1', 'guy1', 'acc1', 'secret1', False)
superuser = User('id2', 'guy2', 'acc2', 'secret2', True)
fakemgr.add_user(joeuser)
fakemgr.add_user(superuser)
fakemgr.create_project('test1', joeuser)
fakemgr.create_project('test2', superuser)
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
super(AccountsTest, self).tearDown()
def test_get_account(self):
req = webob.Request.blank('/v1.0/accounts/test1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res_dict['account']['id'], 'test1')
self.assertEqual(res_dict['account']['name'], 'test1')
self.assertEqual(res_dict['account']['manager'], 'id1')
self.assertEqual(res.status_int, 200)
def test_account_delete(self):
req = webob.Request.blank('/v1.0/accounts/test1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
self.assertEqual(res.status_int, 200)
def test_account_create(self):
body = dict(account=dict(description='test account',
manager='id1'))
req = webob.Request.blank('/v1.0/accounts/newacct')
req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['account']['id'], 'newacct')
self.assertEqual(res_dict['account']['name'], 'newacct')
self.assertEqual(res_dict['account']['description'], 'test account')
self.assertEqual(res_dict['account']['manager'], 'id1')
self.assertTrue('newacct' in
fakes.FakeAuthManager.projects)
self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
def test_account_update(self):
body = dict(account=dict(description='test account',
manager='id2'))
req = webob.Request.blank('/v1.0/accounts/test1')
req.headers["Content-Type"] = "application/json"
req.method = 'PUT'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['account']['id'], 'test1')
self.assertEqual(res_dict['account']['name'], 'test1')
self.assertEqual(res_dict['account']['description'], 'test account')
self.assertEqual(res_dict['account']['manager'], 'id2')
self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
| superstack/nova | nova/tests/api/openstack/test_accounts.py | Python | apache-2.0 | 4,537 |
/*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.server.service;
import com.thoughtworks.go.CurrentGoCDVersion;
import com.thoughtworks.go.domain.GoVersion;
import com.thoughtworks.go.domain.VersionInfo;
import com.thoughtworks.go.server.dao.VersionInfoDao;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.*;
public class ServerVersionInfoBuilderTest {
private VersionInfoDao versionInfoDao;
private ServerVersionInfoBuilder builder;
@Before
public void setUp() {
versionInfoDao = mock(VersionInfoDao.class);
builder = spy(new ServerVersionInfoBuilder(versionInfoDao));
}
@Test
public void shouldGetVersionInfoForGOServerIfExists(){
VersionInfo goVersionInfo = new VersionInfo("go_server", new GoVersion("1.2.3-1"));
when(versionInfoDao.findByComponentName("go_server")).thenReturn(goVersionInfo);
VersionInfo versionInfo = builder.getServerVersionInfo();
assertThat(versionInfo.getComponentName(), is(goVersionInfo.getComponentName()));
assertThat(versionInfo.getInstalledVersion(), is(goVersionInfo.getInstalledVersion()));
}
@Test
public void shouldCreateVersionInfoForGOServerIfDoesNotExist(){
when(versionInfoDao.findByComponentName("go_server")).thenReturn(null);
VersionInfo versionInfo = builder.getServerVersionInfo();
verify(versionInfoDao).saveOrUpdate(isA(VersionInfo.class));
assertThat(versionInfo.getComponentName(), is("go_server"));
assertThat(versionInfo.getInstalledVersion().toString(), is(new GoVersion(CurrentGoCDVersion.getInstance().formatted()).toString()));
}
@Test
public void shouldUpdateTheVersionInfoIfInstalledVersionHasChanged(){
VersionInfo goVersionInfo = new VersionInfo("go_server", new GoVersion("1.2.3-1"));
when(versionInfoDao.findByComponentName("go_server")).thenReturn(goVersionInfo);
VersionInfo versionInfo = builder.getServerVersionInfo();
verify(versionInfoDao).saveOrUpdate(isA(VersionInfo.class));
assertThat(versionInfo.getComponentName(), is(goVersionInfo.getComponentName()));
assertThat(versionInfo.getInstalledVersion().toString(), is(new GoVersion(CurrentGoCDVersion.getInstance().formatted()).toString()));
}
@Test
public void shouldNotCreateAVersionInfoOnDevelopmentServer(){
when(versionInfoDao.findByComponentName("go_server")).thenReturn(null);
when(builder.getInstalledVersion()).thenReturn("N/A");
VersionInfo versionInfo = builder.getServerVersionInfo();
verify(versionInfoDao, never()).saveOrUpdate(isA(VersionInfo.class));
assertNull(versionInfo);
}
@Test
public void shouldNotUpdateTheVersionInfoIfUnableToParseTheInstalledVersion(){
VersionInfo goVersionInfo = new VersionInfo("go_server", new GoVersion("1.2.3-1"));
when(versionInfoDao.findByComponentName("go_server")).thenReturn(goVersionInfo);
when(builder.getInstalledVersion()).thenReturn("N/A");
VersionInfo versionInfo = builder.getServerVersionInfo();
verify(versionInfoDao, never()).saveOrUpdate(isA(VersionInfo.class));
assertThat(versionInfo.getComponentName(), is(goVersionInfo.getComponentName()));
assertThat(versionInfo.getInstalledVersion(), is(goVersionInfo.getInstalledVersion()));
}
}
| kierarad/gocd | server/src/test-fast/java/com/thoughtworks/go/server/service/ServerVersionInfoBuilderTest.java | Java | apache-2.0 | 4,100 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/macie2/model/AdminStatus.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
using namespace Aws::Utils;
namespace Aws
{
namespace Macie2
{
namespace Model
{
namespace AdminStatusMapper
{
static const int ENABLED_HASH = HashingUtils::HashString("ENABLED");
static const int DISABLING_IN_PROGRESS_HASH = HashingUtils::HashString("DISABLING_IN_PROGRESS");
AdminStatus GetAdminStatusForName(const Aws::String& name)
{
int hashCode = HashingUtils::HashString(name.c_str());
if (hashCode == ENABLED_HASH)
{
return AdminStatus::ENABLED;
}
else if (hashCode == DISABLING_IN_PROGRESS_HASH)
{
return AdminStatus::DISABLING_IN_PROGRESS;
}
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
overflowContainer->StoreOverflow(hashCode, name);
return static_cast<AdminStatus>(hashCode);
}
return AdminStatus::NOT_SET;
}
Aws::String GetNameForAdminStatus(AdminStatus enumValue)
{
switch(enumValue)
{
case AdminStatus::ENABLED:
return "ENABLED";
case AdminStatus::DISABLING_IN_PROGRESS:
return "DISABLING_IN_PROGRESS";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
}
return {};
}
}
} // namespace AdminStatusMapper
} // namespace Model
} // namespace Macie2
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-macie2/source/model/AdminStatus.cpp | C++ | apache-2.0 | 2,005 |
// Generated by CoffeeScript 1.3.3
(function() {
var InstagramMedia;
InstagramMedia = (function() {
function InstagramMedia(parent) {
this.parent = parent;
}
/*
Basic Media
*/
InstagramMedia.prototype.popular = function(params) {
var credentials;
credentials = this.parent._credentials({});
params['path'] = "/" + this.parent._api_version + "/media/popular?" + (this.parent._to_querystring(credentials));
return this.parent._request(params);
};
InstagramMedia.prototype.info = function(params) {
var credentials;
credentials = this.parent._credentials({});
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "?" + (this.parent._to_querystring(credentials));
return this.parent._request(params);
};
InstagramMedia.prototype.search = function(params) {
params = this.parent._credentials(params);
params['path'] = "/" + this.parent._api_version + "/media/search?" + (this.parent._to_querystring(params));
return this.parent._request(params);
};
/*
Likes
*/
InstagramMedia.prototype.likes = function(params) {
var credentials;
credentials = this.parent._credentials({});
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/likes?" + (this.parent._to_querystring(credentials));
return this.parent._request(params);
};
InstagramMedia.prototype.like = function(params) {
params['post_data'] = this.parent._credentials({}, 'access_token');
params['method'] = 'POST';
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/likes";
return this.parent._request(params);
};
InstagramMedia.prototype.unlike = function(params) {
params = this.parent._credentials(params, 'access_token');
params['method'] = 'DELETE';
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/likes?" + (this.parent._to_querystring(params));
return this.parent._request(params);
};
/*
Comments
*/
InstagramMedia.prototype.comments = function(params) {
var credentials;
credentials = this.parent._credentials({});
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/comments?" + (this.parent._to_querystring(credentials));
return this.parent._request(params);
};
InstagramMedia.prototype.comment = function(params) {
params['post_data'] = this.parent._credentials({
text: params['text']
}, 'access_token');
params['method'] = 'POST';
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/comments";
return this.parent._request(params);
};
InstagramMedia.prototype.uncomment = function(params) {
var credentials;
credentials = this.parent._credentials({}, 'access_token');
params['method'] = 'DELETE';
params['path'] = "/" + this.parent._api_version + "/media/" + params['media_id'] + "/comments/" + params['comment_id'] + "?" + (this.parent._to_querystring(credentials));
return this.parent._request(params);
};
/*
Subscriptions
*/
InstagramMedia.prototype.subscribe = function(params) {
params['object'] = 'geography';
return this.parent.subscriptions._subscribe(params);
};
InstagramMedia.prototype.unsubscribe = function(params) {
return this.parent.subscriptions._unsubscribe(params);
};
InstagramMedia.prototype.unsubscribe_all = function(params) {
params['object'] = 'geography';
return this.parent.subscriptions._unsubscribe(params);
};
return InstagramMedia;
})();
module.exports = InstagramMedia;
}).call(this);
| Rmalnoult/pearlstagram-nodejs | node_modules/instagram-node-lib/lib/class.instagram.media.js | JavaScript | apache-2.0 | 3,838 |