signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Points { /** * Inverse transforms a point as specified , storing the result in the point provided . * @ return a reference to the result point , for chaining . */ public static Point inverseTransform ( double x , double y , double sx , double sy , double rotation , double tx , double ty , Point result ) { } }
x -= tx ; y -= ty ; // untranslate double sinnega = Math . sin ( - rotation ) , cosnega = Math . cos ( - rotation ) ; double nx = ( x * cosnega - y * sinnega ) ; // unrotate double ny = ( x * sinnega + y * cosnega ) ; return result . set ( nx / sx , ny / sy ) ; // unscale
public class BasicFreeVarCollector { /** * If tree refers to a class instance creation expression * add all free variables of the freshly created class . */ public void visitNewClass ( JCNewClass tree ) { } }
ClassSymbol c = ( ClassSymbol ) tree . constructor . owner ; addFreeVars ( c ) ; super . visitNewClass ( tree ) ;
public class SftpFile { /** * Get the parent of the current file . This method determines the correct * path of the parent file ; if no parent exists ( i . e . the current file is * the root of the filesystem ) then this method returns a null value . * @ return SftpFile * @ throws SshException * @ throws SftpStatusException */ public SftpFile getParent ( ) throws SshException , SftpStatusException { } }
if ( absolutePath . lastIndexOf ( '/' ) == - 1 ) { // This is simply a filename so the parent is the users default // directory String dir = sftp . getDefaultDirectory ( ) ; return sftp . getFile ( dir ) ; } // Extract the filename from the absolute path and return the parent String path = sftp . getAbsolutePath ( absolutePath ) ; if ( path . equals ( "/" ) ) return null ; // If we have . or . . then strip the path and let getParent start over // again with the correct canonical path if ( filename . equals ( "." ) || filename . equals ( ".." ) ) { return sftp . getFile ( path ) . getParent ( ) ; } int idx = path . lastIndexOf ( '/' ) ; String parent = path . substring ( 0 , idx ) ; // Check if we at the root if so we will have to add / if ( parent . equals ( "" ) ) parent = "/" ; return sftp . getFile ( parent ) ;
public class OmemoMessageBuilder { /** * Move the auth tag from the end of the cipherText to the messageKey . * @ param messageKey source messageKey without authTag * @ param cipherText source cipherText with authTag * @ param messageKeyWithAuthTag destination messageKey with authTag * @ param cipherTextWithoutAuthTag destination cipherText without authTag */ static void moveAuthTag ( byte [ ] messageKey , byte [ ] cipherText , byte [ ] messageKeyWithAuthTag , byte [ ] cipherTextWithoutAuthTag ) { } }
// Check dimensions of arrays if ( messageKeyWithAuthTag . length != messageKey . length + 16 ) { throw new IllegalArgumentException ( "Length of messageKeyWithAuthTag must be length of messageKey + " + "length of AuthTag (16)" ) ; } if ( cipherTextWithoutAuthTag . length != cipherText . length - 16 ) { throw new IllegalArgumentException ( "Length of cipherTextWithoutAuthTag must be length of cipherText " + "- length of AuthTag (16)" ) ; } // Move auth tag from cipherText to messageKey System . arraycopy ( messageKey , 0 , messageKeyWithAuthTag , 0 , 16 ) ; System . arraycopy ( cipherText , 0 , cipherTextWithoutAuthTag , 0 , cipherTextWithoutAuthTag . length ) ; System . arraycopy ( cipherText , cipherText . length - 16 , messageKeyWithAuthTag , 16 , 16 ) ;
public class SessionImpl { /** * { @ inheritDoc } */ public void exportDocumentView ( String absPath , ContentHandler contentHandler , boolean skipBinary , boolean noRecurse ) throws InvalidSerializedDataException , PathNotFoundException , SAXException , RepositoryException { } }
checkLive ( ) ; LocationFactory factory = new LocationFactory ( ( ( NamespaceRegistryImpl ) repository . getNamespaceRegistry ( ) ) ) ; WorkspaceEntry wsConfig = ( WorkspaceEntry ) container . getComponentInstanceOfType ( WorkspaceEntry . class ) ; ValueFactoryImpl valueFactoryImpl = new ValueFactoryImpl ( factory , wsConfig , cleanerHolder ) ; try { BaseXmlExporter exporter = new ExportImportFactory ( ) . getExportVisitor ( XmlMapping . DOCVIEW , contentHandler , skipBinary , noRecurse , getTransientNodesManager ( ) , repository . getNamespaceRegistry ( ) , valueFactoryImpl ) ; JCRPath srcNodePath = getLocationFactory ( ) . parseAbsPath ( absPath ) ; ItemData srcItemData = dataManager . getItemData ( srcNodePath . getInternalPath ( ) ) ; if ( srcItemData == null ) { throw new PathNotFoundException ( "No node exists at " + absPath ) ; } exporter . export ( ( NodeData ) srcItemData ) ; } catch ( XMLStreamException e ) { throw new SAXException ( e ) ; }
public class DescribeMaintenanceWindowExecutionTaskInvocationsResult { /** * Information about the task invocation results per invocation . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setWindowExecutionTaskInvocationIdentities ( java . util . Collection ) } or * { @ link # withWindowExecutionTaskInvocationIdentities ( java . util . Collection ) } if you want to override the existing * values . * @ param windowExecutionTaskInvocationIdentities * Information about the task invocation results per invocation . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeMaintenanceWindowExecutionTaskInvocationsResult withWindowExecutionTaskInvocationIdentities ( MaintenanceWindowExecutionTaskInvocationIdentity ... windowExecutionTaskInvocationIdentities ) { } }
if ( this . windowExecutionTaskInvocationIdentities == null ) { setWindowExecutionTaskInvocationIdentities ( new com . amazonaws . internal . SdkInternalList < MaintenanceWindowExecutionTaskInvocationIdentity > ( windowExecutionTaskInvocationIdentities . length ) ) ; } for ( MaintenanceWindowExecutionTaskInvocationIdentity ele : windowExecutionTaskInvocationIdentities ) { this . windowExecutionTaskInvocationIdentities . add ( ele ) ; } return this ;
public class LongChromosome { /** * Create a new { @ code LongChromosome } with the given genes . * @ param genes the genes of the chromosome . * @ return a new chromosome with the given genes . * @ throws NullPointerException if the given { @ code genes } are { @ code null } * @ throws IllegalArgumentException if the length of the genes array is * empty or the given { @ code genes } doesn ' t have the same range . */ public static LongChromosome of ( final LongGene ... genes ) { } }
checkGeneRange ( Stream . of ( genes ) . map ( LongGene :: range ) ) ; return new LongChromosome ( ISeq . of ( genes ) , IntRange . of ( genes . length ) ) ;
public class Objects2 { /** * 如果两个对象中有 null 值或两个对象相同 , 返回 null , 否则返回对象 1。 * @ param value * 对象 1 * @ param value2 * 对象 2 * @ param < T > * 对象类型 * @ return 结果 */ public static < T > T nullIf ( final T value , final T value2 ) { } }
if ( value == null || value2 == null ) { return null ; } if ( value . equals ( value2 ) ) { return null ; } return value ;
public class SpecNodeWithRelationships { /** * Add a relationship to the topic . * @ param topic The topic that is to be related to . * @ param type The type of the relationship . */ public void addRelationshipToTarget ( final SpecTopic topic , final RelationshipType type ) { } }
final TargetRelationship relationship = new TargetRelationship ( this , topic , type ) ; topicTargetRelationships . add ( relationship ) ; relationships . add ( relationship ) ;
public class AmazonEnvironmentAwareClientBuilder { /** * Build the client . * @ param < T > the type parameter * @ param builder the builder * @ param clientType the client type * @ return the client instance */ public < T > T build ( final AwsClientBuilder builder , final Class < T > clientType ) { } }
val cfg = new ClientConfiguration ( ) ; try { val localAddress = getSetting ( "localAddress" ) ; if ( StringUtils . isNotBlank ( localAddress ) ) { cfg . setLocalAddress ( InetAddress . getByName ( localAddress ) ) ; } } catch ( final Exception e ) { LOGGER . error ( e . getMessage ( ) , e ) ; } builder . withClientConfiguration ( cfg ) ; val key = getSetting ( "credentialAccessKey" ) ; val secret = getSetting ( "credentialSecretKey" ) ; val credentials = ChainingAWSCredentialsProvider . getInstance ( key , secret ) ; builder . withCredentials ( credentials ) ; var region = getSetting ( "region" ) ; val currentRegion = Regions . getCurrentRegion ( ) ; if ( currentRegion != null && StringUtils . isBlank ( region ) ) { region = currentRegion . getName ( ) ; } var regionOverride = getSetting ( "regionOverride" ) ; if ( currentRegion != null && StringUtils . isNotBlank ( regionOverride ) ) { regionOverride = currentRegion . getName ( ) ; } val finalRegion = StringUtils . defaultIfBlank ( regionOverride , region ) ; if ( StringUtils . isNotBlank ( finalRegion ) ) { builder . withRegion ( finalRegion ) ; } val endpoint = getSetting ( "endpoint" ) ; if ( StringUtils . isNotBlank ( endpoint ) ) { builder . withEndpointConfiguration ( new AwsClientBuilder . EndpointConfiguration ( endpoint , finalRegion ) ) ; } val result = builder . build ( ) ; return clientType . cast ( result ) ;
public class PeerManager { /** * from interface PeerProvider */ public void invokeAction ( ClientObject caller , byte [ ] serializedAction ) { } }
NodeAction action = null ; try { ObjectInputStream oin = new ObjectInputStream ( new ByteArrayInputStream ( serializedAction ) ) ; action = ( NodeAction ) oin . readObject ( ) ; _injector . injectMembers ( action ) ; action . invoke ( ) ; } catch ( Exception e ) { log . warning ( "Failed to execute node action" , "from" , ( caller == null ) ? "self" : caller . who ( ) , "action" , action , "serializedSize" , serializedAction . length , e ) ; }
public class GridFTPClient { /** * Starts local server in striped active mode . * setStripedPassive ( ) must be called before that . * This method takes no parameters . HostPortList of the remote * server , known from the last call of setStripedPassive ( ) , is stored * internally and the local server will connect to this address . */ public void setLocalStripedActive ( ) throws ClientException , IOException { } }
if ( gSession . serverAddressList == null ) { throw new ClientException ( ClientException . CALL_PASSIVE_FIRST ) ; } try { gLocalServer . setStripedActive ( gSession . serverAddressList ) ; } catch ( UnknownHostException e ) { throw new ClientException ( ClientException . UNKNOWN_HOST ) ; }
public class RingbufferContainer { /** * Initializes the ring buffer with references to other services , the * ringbuffer store and the config . This is because on a replication * operation the container is only partially constructed . The init method * finishes the configuration of the ring buffer container for further * usage . * @ param config the configuration of the ring buffer * @ param nodeEngine the NodeEngine */ public void init ( RingbufferConfig config , NodeEngine nodeEngine ) { } }
this . config = config ; this . serializationService = nodeEngine . getSerializationService ( ) ; initRingbufferStore ( nodeEngine . getConfigClassLoader ( ) ) ;
public class AsmUtils { /** * This method is used to read the whole stream into byte array . This allows patching . * It also works around a bug in ASM 6.1 ( https : / / gitlab . ow2 . org / asm / asm / issues / 317816 ) . */ private static byte [ ] readStream ( final InputStream in ) throws IOException { } }
final ByteArrayOutputStream bos = new ByteArrayOutputStream ( ) ; final byte [ ] data = new byte [ 4096 ] ; int bytesRead ; while ( ( bytesRead = in . read ( data , 0 , data . length ) ) != - 1 ) { bos . write ( data , 0 , bytesRead ) ; } return bos . toByteArray ( ) ;
public class UpdatableHeap { /** * Offer element at the given position . * @ param pos Position * @ param e Element */ protected void offerAt ( final int pos , O e ) { } }
if ( pos == NO_VALUE ) { // resize when needed if ( size + 1 > queue . length ) { resize ( size + 1 ) ; } index . put ( e , size ) ; size ++ ; heapifyUp ( size - 1 , e ) ; heapModified ( ) ; return ; } assert ( pos >= 0 ) : "Unexpected negative position." ; assert ( queue [ pos ] . equals ( e ) ) ; // Did the value improve ? if ( comparator . compare ( e , queue [ pos ] ) >= 0 ) { return ; } heapifyUp ( pos , e ) ; heapModified ( ) ; return ;
public class IonReaderTextRawTokensX { /** * NOT for use outside of string / symbol / clob ! * Absorbs backslash - NL pairs , returning * { @ link # CHAR _ SEQ _ ESCAPED _ NEWLINE _ SEQUENCE _ 1 } etc . */ protected final int read_string_char ( ProhibitedCharacters prohibitedCharacters ) throws IOException { } }
int c = _stream . read ( ) ; if ( prohibitedCharacters . includes ( c ) ) { error ( "invalid character [" + printCodePointAsString ( c ) + "]" ) ; } // the c = = ' \ \ ' clause will cause us to eat ALL slash - newlines if ( c == '\r' || c == '\n' || c == '\\' ) { c = line_count ( c ) ; } return c ;
public class AbstractExtendedSet { /** * { @ inheritDoc } */ @ Override public ExtendedSet < T > symmetricDifference ( Collection < ? extends T > other ) { } }
ExtendedSet < T > res = union ( other ) ; res . removeAll ( intersection ( other ) ) ; return res ;
public class FileUtil { /** * 返回主文件名 * @ param file 文件 * @ return 主文件名 */ public static String mainName ( File file ) { } }
if ( file . isDirectory ( ) ) { return file . getName ( ) ; } return mainName ( file . getName ( ) ) ;
public class ForkJoinPool { /** * Returns an estimate of the total number of tasks stolen from * one thread ' s work queue by another . The reported value * underestimates the actual total number of steals when the pool * is not quiescent . This value may be useful for monitoring and * tuning fork / join programs : in general , steal counts should be * high enough to keep threads busy , but low enough to avoid * overhead and contention across threads . * @ return the number of steals */ public long getStealCount ( ) { } }
long count = stealCount . get ( ) ; WorkQueue [ ] ws ; WorkQueue w ; if ( ( ws = workQueues ) != null ) { for ( int i = 1 ; i < ws . length ; i += 2 ) { if ( ( w = ws [ i ] ) != null ) count += w . totalSteals ; } } return count ;
public class AbstractCommandLineRunner { /** * Prints all the input contents , starting with a comment that specifies the input file name * ( using root - relative paths ) before each file . */ @ VisibleForTesting @ GwtIncompatible ( "Unnecessary" ) void printBundleTo ( Iterable < CompilerInput > inputs , Appendable out ) throws IOException { } }
// Prebuild ASTs before they ' re needed in getLoadFlags , for performance and because // StackOverflowErrors can be hit if not prebuilt . if ( compiler . getOptions ( ) . numParallelThreads > 1 ) { new PrebuildAst ( compiler , compiler . getOptions ( ) . numParallelThreads ) . prebuild ( inputs ) ; } if ( ! compiler . getOptions ( ) . preventLibraryInjection ) { // ES6 modules will need a runtime in a bundle . Skip appending this runtime if there are no // ES6 modules to cut down on size . for ( CompilerInput input : inputs ) { if ( "es6" . equals ( input . getLoadFlags ( ) . get ( "module" ) ) ) { appendRuntimeTo ( out ) ; break ; } } } for ( CompilerInput input : inputs ) { String name = input . getName ( ) ; String code = input . getSourceFile ( ) . getCode ( ) ; // Ignore empty fill files created by the compiler to facilitate cross - module code motion . // Note that non - empty fill files ( ones whose code has actually been moved into ) are still // emitted . In particular , this ensures that if there are no ( real ) inputs the bundle will be // empty . if ( Compiler . isFillFileName ( name ) && code . isEmpty ( ) ) { continue ; } String rootRelativePath = rootRelativePathsMap . get ( name ) ; String displayName = rootRelativePath != null ? rootRelativePath : input . getName ( ) ; out . append ( "//" ) ; out . append ( displayName ) ; out . append ( "\n" ) ; prepForBundleAndAppendTo ( out , input , code ) ; out . append ( "\n" ) ; }
public class XpathUtils { /** * Same as { @ link # asByteBuffer ( String , Node ) } but allows an xpath to be * passed in explicitly for reuse . */ public static ByteBuffer asByteBuffer ( String expression , Node node , XPath xpath ) throws XPathExpressionException { } }
String base64EncodedString = evaluateAsString ( expression , node , xpath ) ; if ( isEmptyString ( base64EncodedString ) ) return null ; if ( ! isEmpty ( node ) ) { byte [ ] decodedBytes = Base64 . decode ( base64EncodedString ) ; return ByteBuffer . wrap ( decodedBytes ) ; } return null ;
public class MessageDialogBuilder { /** * Assigns a set of extra window hints that you want the built dialog to have * @ param extraWindowHints Window hints to assign to the window in addition to the ones the builder will put * @ return Itself */ public MessageDialogBuilder setExtraWindowHints ( Collection < Window . Hint > extraWindowHints ) { } }
this . extraWindowHints . clear ( ) ; this . extraWindowHints . addAll ( extraWindowHints ) ; return this ;
public class AnnotationValueBuilder { /** * Sets the value member to the given enum objects . * @ param enumObjs The enum [ ] * @ return This builder */ public AnnotationValueBuilder < T > values ( @ Nullable Enum < ? > ... enumObjs ) { } }
return member ( AnnotationMetadata . VALUE_MEMBER , enumObjs ) ;
public class SubProcessKernel { /** * Pass the Path of the binary to the SubProcess in Command position 0 */ private ProcessBuilder appendExecutablePath ( ProcessBuilder builder ) { } }
String executable = builder . command ( ) . get ( 0 ) ; if ( executable == null ) { throw new IllegalArgumentException ( "No executable provided to the Process Builder... we will do... nothing... " ) ; } builder . command ( ) . set ( 0 , FileUtils . getFileResourceId ( configuration . getWorkerPath ( ) , executable ) . toString ( ) ) ; return builder ;
public class Validators { /** * Creates and returns a validator , which allows to validate texts to ensure , that they * represent valid IPv6 addresses . Empty texts are also accepted . * @ param context * The context , which should be used to retrieve the error message , as an instance of * the class { @ link Context } . The context may not be null * @ return The validator , which has been created , as an instance of the type { @ link Validator } */ public static Validator < CharSequence > iPv6Address ( @ NonNull final Context context ) { } }
return new IPv6AddressValidator ( context , R . string . default_error_message ) ;
public class ContactsApi { /** * Get contacts Return contacts of a character - - - This route is cached for * up to 300 seconds SSO Scope : esi - characters . read _ contacts . v1 * @ param characterId * An EVE character ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ return List & lt ; ContactsResponse & gt ; * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public List < ContactsResponse > getCharactersCharacterIdContacts ( Integer characterId , String datasource , String ifNoneMatch , Integer page , String token ) throws ApiException { } }
ApiResponse < List < ContactsResponse > > resp = getCharactersCharacterIdContactsWithHttpInfo ( characterId , datasource , ifNoneMatch , page , token ) ; return resp . getData ( ) ;
public class ReflectionMethodHelper { /** * Get all set / get methods from a Class . With methods from all super classes . * @ param pvClass Analyse Class . * @ return All methods found . */ public static Method [ ] getAllMethodsByClass ( Class < ? > pvClass ) { } }
return getAllMethodsByClassIntern ( pvClass , new HashSet < Method > ( ) ) . toArray ( new Method [ 0 ] ) ;
public class IndexerCacheStore { /** * Get the mode handler */ public IndexerIoModeHandler getModeHandler ( ) { } }
if ( modeHandler == null ) { if ( ctx . getCache ( ) . getStatus ( ) != ComponentStatus . RUNNING ) { throw new IllegalStateException ( "The cache should be started first" ) ; } synchronized ( this ) { if ( modeHandler == null ) { this . modeHandler = new IndexerIoModeHandler ( cacheManager . isCoordinator ( ) || ctx . getCache ( ) . getAdvancedCache ( ) . getRpcManager ( ) == null ? IndexerIoMode . READ_WRITE : IndexerIoMode . READ_ONLY ) ; } } } return modeHandler ;
public class SigninFormPanel { /** * Factory method for creating the new { @ link SigninPanel } that contains the TextField for the * email and password . This method is invoked in the constructor from the derived classes and * can be overridden so users can provide their own version of a Component that contains the * TextField for the email and password . * @ param id * the id * @ param model * the model * @ return the new { @ link SigninPanel } that contains the TextField for the email and password . */ protected Component newSigninPanel ( final String id , final IModel < T > model ) { } }
final Component component = new SigninPanel < > ( id , model ) ; return component ;
public class AnnotationProcessor { /** * Builds { @ link TypeModel } objects from the { @ code roots } . The * types built contain contract methods . Helper types are created * for interfaces . */ @ Requires ( { } }
"roots != null" , "diagnosticManager != null" } ) @ Ensures ( { "result != null" , "result.size() >= roots.size()" , "result.size() <= 2 * roots.size()" } ) protected List < TypeModel > createTypes ( Set < TypeElement > roots , DiagnosticManager diagnosticManager ) { boolean errors = false ; /* * Extract all type names that will be part of this compilation * task . */ final HashSet < String > knownTypeNames = new HashSet < String > ( ) ; for ( TypeElement r : roots ) { ElementScanner6 < Void , Void > visitor = new ElementScanner6 < Void , Void > ( ) { @ Override public Void visitType ( TypeElement e , Void p ) { knownTypeNames . add ( e . getQualifiedName ( ) . toString ( ) ) ; return super . visitType ( e , p ) ; } } ; r . accept ( visitor , null ) ; } /* * Mark annotations inherited from classes compiled in the same * task as weak so we don ' t generate stubs for them later on . This * prevents name clashes due to erasure . */ ArrayList < TypeModel > undecoratedTypes = new ArrayList < TypeModel > ( roots . size ( ) ) ; for ( TypeElement r : roots ) { TypeModel type = factory . createType ( r , diagnosticManager ) ; ElementScanner annotator = new ElementScanner ( ) { @ Override public void visitContractAnnotation ( ContractAnnotationModel annotation ) { if ( annotation . isVirtual ( ) && knownTypeNames . contains ( annotation . getOwner ( ) . getQualifiedName ( ) ) ) { annotation . setWeakVirtual ( true ) ; } } } ; type . accept ( annotator ) ; undecoratedTypes . add ( type ) ; } /* * Decorate the type models with contract methods and create * helper types . */ ArrayList < TypeModel > types = new ArrayList < TypeModel > ( undecoratedTypes . size ( ) ) ; for ( TypeModel type : undecoratedTypes ) { ClassContractCreator creator = new ClassContractCreator ( diagnosticManager ) ; type . accept ( creator ) ; TypeModel helper = creator . getHelperType ( ) ; types . add ( type ) ; if ( helper != null ) { types . add ( helper ) ; } } return types ;
public class Bean { /** * Internal : Introspects the given type and creates the appropriate { @ link PropertyDescriptor } . * < p > This logic is a completely rewritten version of { @ link java . beans . Introspector } , * providing fewer bounds on the matching types ( less strict equivalence for the return * type of readMethod and the first parameter of writeMethod ) and a supplementary support * for declared accessors . In addition to this , the new implementation was needed as the * package { @ code java . beans } is absent from the Android ' s Java API implementation . * @ param methods the methods to parse * @ return all the property descriptors found */ private static Collection < PropertyDescriptor > getPropertyDescriptors ( Method [ ] methods ) { } }
List < PropertyDescriptor > desciptorsHolder = new ArrayList < PropertyDescriptor > ( ) ; // Collects writeMetod and readMethod for ( Method method : methods ) { if ( isStatic ( method ) || method . isSynthetic ( ) ) { continue ; } String name = method . getName ( ) ; if ( method . getParameterTypes ( ) . length == 0 ) { // Getter if ( name . length ( ) > 3 && name . startsWith ( "get" ) && method . getReturnType ( ) != void . class ) { PropertyDescriptor info = new PropertyDescriptor ( ) ; info . name = uncapitalize ( name . substring ( 3 ) ) ; info . readMethod = method ; info . isGetter = true ; desciptorsHolder . add ( info ) ; } // Isser else if ( name . length ( ) > 2 && name . startsWith ( "is" ) && method . getReturnType ( ) == boolean . class ) { PropertyDescriptor info = new PropertyDescriptor ( ) ; info . name = uncapitalize ( name . substring ( 2 ) ) ; info . readMethod = method ; info . isIsser = true ; desciptorsHolder . add ( info ) ; } } else if ( method . getParameterTypes ( ) . length == 1 ) { // Setter if ( name . length ( ) > 3 && name . startsWith ( "set" ) && method . getReturnType ( ) == void . class ) { PropertyDescriptor info = new PropertyDescriptor ( ) ; info . name = uncapitalize ( name . substring ( 3 ) ) ; info . writeMethod = method ; info . isSetter = true ; desciptorsHolder . add ( info ) ; } } } // Merges descriptors with the same name into a single entity Map < String , PropertyDescriptor > descriptors = new HashMap < String , PropertyDescriptor > ( ) ; for ( PropertyDescriptor descriptor : desciptorsHolder ) { PropertyDescriptor instance = descriptors . get ( descriptor . name ) ; if ( instance == null ) { descriptors . put ( descriptor . name , descriptor ) ; instance = descriptor ; } if ( descriptor . isIsser ) { instance . readMethod = descriptor . readMethod ; } else if ( descriptor . isGetter ) { // if both getter and isser methods are present as descriptors , // the isser is chose as readMethod , and the getter discarded if ( instance . readMethod == null ) { instance . readMethod = descriptor . readMethod ; } } else if ( descriptor . isSetter ) { instance . writeMethod = descriptor . writeMethod ; } } return descriptors . values ( ) ;
public class MouseLiberalAdapter { /** * mouseReleased , Final function . Handles mouse released events . This function also detects * liberal single clicks , and liberal double clicks . */ @ Override final public void mouseReleased ( MouseEvent e ) { } }
// Check to see if this mouse release completes a liberal single click . if ( isComponentPressedDown ) { // A liberal single click has occurred . mouseLiberalClick ( e ) ; // Check to see if we had two liberal single clicks within the double click time window . long now = System . currentTimeMillis ( ) ; long timeBetweenUnusedClicks = now - lastUnusedLiberalSingleClickTimeStamp ; if ( timeBetweenUnusedClicks <= slowestDoubleClickMilliseconds ) { // A liberal double click has occurred . mouseLiberalDoubleClick ( e ) ; // Mark the single click timestamp as " used " by this double click . lastUnusedLiberalSingleClickTimeStamp = 0 ; } else { // Save the single click timestamp as part of a possible future double click . lastUnusedLiberalSingleClickTimeStamp = System . currentTimeMillis ( ) ; } } // Record the mouse release . isComponentPressedDown = false ; // Call the mouse release event . mouseRelease ( e ) ;
public class ModelParameterServer { /** * This method stores provided entities for MPS internal use * @ param configuration * @ param transport * @ param isMasterNode */ public void configure ( @ NonNull VoidConfiguration configuration , @ NonNull Transport transport , @ NonNull UpdaterParametersProvider updaterProvider ) { } }
this . transport = transport ; this . masterMode = false ; this . configuration = configuration ; this . updaterParametersProvider = updaterProvider ;
public class TridiagonalHelper_DDRB { /** * Multiples the appropriate submatrix of A by the specified reflector and stores * the result ( ' y ' ) in V . < br > * < br > * y = A * u < br > * @ param blockLength * @ param A Contains the ' A ' matrix and ' u ' vector . * @ param V Where resulting ' y ' row vectors are stored . * @ param row row in matrix ' A ' that ' u ' vector and the row in ' V ' that ' y ' is stored in . */ public static void multA_u ( final int blockLength , final DSubmatrixD1 A , final DSubmatrixD1 V , int row ) { } }
int heightMatA = A . row1 - A . row0 ; for ( int i = row + 1 ; i < heightMatA ; i ++ ) { double val = innerProdRowSymm ( blockLength , A , row , A , i , 1 ) ; V . set ( row , i , val ) ; }
public class AmazonECRClient { /** * Retrieves the pre - signed Amazon S3 download URL corresponding to an image layer . You can only get URLs for image * layers that are referenced in an image . * < note > * This operation is used by the Amazon ECR proxy , and it is not intended for general use by customers for pulling * and pushing images . In most cases , you should use the < code > docker < / code > CLI to pull , tag , and push images . * < / note > * @ param getDownloadUrlForLayerRequest * @ return Result of the GetDownloadUrlForLayer operation returned by the service . * @ throws ServerException * These errors are usually caused by a server - side issue . * @ throws InvalidParameterException * The specified parameter is invalid . Review the available parameters for the API request . * @ throws LayersNotFoundException * The specified layers could not be found , or the specified layer is not valid for this repository . * @ throws LayerInaccessibleException * The specified layer is not available because it is not associated with an image . Unassociated image * layers may be cleaned up at any time . * @ throws RepositoryNotFoundException * The specified repository could not be found . Check the spelling of the specified repository and ensure * that you are performing operations on the correct registry . * @ sample AmazonECR . GetDownloadUrlForLayer * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ecr - 2015-09-21 / GetDownloadUrlForLayer " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetDownloadUrlForLayerResult getDownloadUrlForLayer ( GetDownloadUrlForLayerRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetDownloadUrlForLayer ( request ) ;
public class ApiDescriptionBuilder { /** * Updates the operations to the api operation * @ param operations - operations for each of the http methods for that path * @ return this @ see springfox . documentation . builders . ApiDescriptionBuilder */ public ApiDescriptionBuilder operations ( List < Operation > operations ) { } }
if ( operations != null ) { this . operations = operations . stream ( ) . sorted ( operationOrdering ) . collect ( toList ( ) ) ; } return this ;
public class EchoClient { /** * Sends the given { @ link String message } to the { @ link EchoClient } . * @ param message { @ link String } containing the message to send . * @ return the { @ link String response } returned by the { @ link EchoServer } . * @ see # newSocket ( String , int ) * @ see # sendMessage ( Socket , String ) * @ see # receiveResponse ( Socket ) */ public String sendMessage ( String message ) { } }
Socket socket = null ; try { socket = newSocket ( getHost ( ) , getPort ( ) ) ; return receiveResponse ( sendMessage ( socket , message ) ) ; } finally { close ( socket ) ; }
public class LengthValidator { /** * / * ( non - Javadoc ) * @ see nz . co . senanque . validationengine . annotations1 . FieldValidator # validate ( java . lang . Object ) */ public void validate ( Object o ) { } }
if ( o != null && o instanceof String ) { int l = ( ( String ) o ) . length ( ) ; if ( m_minLength != - 1 && l < m_minLength ) { String message = m_propertyMetadata . getMessageSourceAccessor ( ) . getMessage ( m_message , new Object [ ] { m_propertyMetadata . getLabelName ( ) , m_minLength , m_maxLength , String . valueOf ( o ) } ) ; throw new ValidationException ( message ) ; } if ( m_maxLength != - 1 && l > m_maxLength ) { String message = m_propertyMetadata . getMessageSourceAccessor ( ) . getMessage ( m_message , new Object [ ] { m_propertyMetadata . getLabelName ( ) , m_minLength , m_maxLength , String . valueOf ( o ) } ) ; throw new ValidationException ( message ) ; } }
public class RouteFetcher { /** * Build a route request given the passed { @ link Location } and { @ link RouteProgress } . * Uses { @ link RouteOptions # coordinates ( ) } and { @ link RouteProgress # remainingWaypoints ( ) } * to determine the amount of remaining waypoints there are along the given route . * @ param location current location of the device * @ param routeProgress for remaining waypoints along the route * @ return request reflecting the current progress */ @ Nullable public NavigationRoute . Builder buildRequestFrom ( Location location , RouteProgress routeProgress ) { } }
Context context = contextWeakReference . get ( ) ; if ( invalid ( context , location , routeProgress ) ) { return null ; } Point origin = Point . fromLngLat ( location . getLongitude ( ) , location . getLatitude ( ) ) ; Double bearing = location . hasBearing ( ) ? Float . valueOf ( location . getBearing ( ) ) . doubleValue ( ) : null ; RouteOptions options = routeProgress . directionsRoute ( ) . routeOptions ( ) ; NavigationRoute . Builder builder = NavigationRoute . builder ( context ) . accessToken ( accessToken ) . origin ( origin , bearing , BEARING_TOLERANCE ) . routeOptions ( options ) ; List < Point > remainingWaypoints = routeUtils . calculateRemainingWaypoints ( routeProgress ) ; if ( remainingWaypoints == null ) { Timber . e ( "An error occurred fetching a new route" ) ; return null ; } addDestination ( remainingWaypoints , builder ) ; addWaypoints ( remainingWaypoints , builder ) ; addWaypointNames ( routeProgress , builder ) ; addApproaches ( routeProgress , builder ) ; return builder ;
public class DirectoryLookupService { /** * Get All ModelServiceInstance on the Directory Server . * @ return the ModelServiceInstance List . */ public List < ModelServiceInstance > getAllInstances ( ) { } }
List < ModelServiceInstance > result = Collections . emptyList ( ) ; try { result = getDirectoryServiceClient ( ) . getAllInstances ( ) ; } catch ( ServiceException se ) { LOGGER . error ( "Error when getAllInstances()" , se ) ; } return result ;
public class TypeDeclarationGenerator { /** * Overridden in TypePrivateDeclarationGenerator */ protected void printDeadClassConstant ( VariableDeclarationFragment fragment ) { } }
VariableElement var = fragment . getVariableElement ( ) ; Object value = var . getConstantValue ( ) ; assert value != null ; String declType = getDeclarationType ( var ) ; declType += ( declType . endsWith ( "*" ) ? "" : " " ) ; String name = nameTable . getVariableShortName ( var ) ; if ( ElementUtil . isPrimitiveConstant ( var ) ) { printf ( "#define %s_%s %s\n" , typeName , name , LiteralGenerator . generate ( value ) ) ; } else { println ( "FOUNDATION_EXPORT " + UnicodeUtils . format ( "%s%s_%s" , declType , typeName , name ) + ";" ) ; }
public class MoveAnalysis { @ Override public void visitArrayAccess ( Expr . ArrayAccess expr , Boolean consumed ) { } }
visitExpression ( expr . getFirstOperand ( ) , false ) ; visitExpression ( expr . getSecondOperand ( ) , false ) ; if ( ! consumed ) { expr . setMove ( ) ; }
public class AmazonSageMakerClient { /** * Returns information about a notebook instance . * @ param describeNotebookInstanceRequest * @ return Result of the DescribeNotebookInstance operation returned by the service . * @ sample AmazonSageMaker . DescribeNotebookInstance * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / sagemaker - 2017-07-24 / DescribeNotebookInstance " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeNotebookInstanceResult describeNotebookInstance ( DescribeNotebookInstanceRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeNotebookInstance ( request ) ;
public class StateMachine { /** * Initializes the state machine . * @ param executor The state machine executor . * @ throws NullPointerException if { @ code context } is null */ public void init ( StateMachineExecutor executor ) { } }
this . executor = Assert . notNull ( executor , "executor" ) ; this . context = executor . context ( ) ; this . clock = context . clock ( ) ; this . sessions = context . sessions ( ) ; if ( this instanceof SessionListener ) { executor . context ( ) . sessions ( ) . addListener ( ( SessionListener ) this ) ; } configure ( executor ) ;
public class BeansUtil { /** * Copies the property from the object to the other object . * @ param dst * the destination object to which the property value is set . * @ param src * the source object from which the property value is got . * @ param propertyName * the property name . * @ return * the number of properties copied ; zero or one . */ public static int copyProperty ( final Object dst , final Object src , final String propertyName ) { } }
int count = 0 ; if ( isPropertyGettable ( src , propertyName ) && isPropertySettable ( dst , propertyName ) ) { Object value = getProperty ( src , propertyName ) ; setProperty ( dst , propertyName , value ) ; count ++ ; } return count ;
public class Firmata { /** * Route a Message object built from data over the SerialPort communication line to a corresponding * MessageListener array designed to handle and interpret the object for processing in the client code . * @ param message Firmata Message to be routed to the registered listeners . */ private void routeMessage ( Message message ) { } }
Class messageClass = message . getClass ( ) ; // Dispatch message to all specific listeners for the message class type if ( messageListenerMap . containsKey ( messageClass ) ) { dispatchMessage ( messageListenerMap . get ( messageClass ) , message ) ; } // Dispatch message to all generic listeners if ( messageListenerMap . containsKey ( Message . class ) ) { dispatchMessage ( messageListenerMap . get ( Message . class ) , message ) ; }
public class BeanUtils { /** * Returns a Method object corresponding to a setter that sets an instance of componentClass from target . * @ param target class that the setter should exist on * @ param componentClass component to set * @ return Method object , or null of one does not exist */ public static Method setterMethod ( Class < ? > target , Class < ? > componentClass ) { } }
try { return target . getMethod ( setterName ( componentClass ) , componentClass ) ; } catch ( NoSuchMethodException e ) { // if ( log . isTraceEnabled ( ) ) log . trace ( " Unable to find method " + setterName ( componentClass ) + " in class " + target ) ; return null ; } catch ( NullPointerException e ) { return null ; }
public class JobExecutionsInner { /** * Lists a job ' s executions . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server . * @ param jobAgentName The name of the job agent . * @ param jobName The name of the job to get . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < JobExecutionInner > > listByJobAsync ( final String resourceGroupName , final String serverName , final String jobAgentName , final String jobName , final ListOperationCallback < JobExecutionInner > serviceCallback ) { } }
return AzureServiceFuture . fromPageResponse ( listByJobSinglePageAsync ( resourceGroupName , serverName , jobAgentName , jobName ) , new Func1 < String , Observable < ServiceResponse < Page < JobExecutionInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < JobExecutionInner > > > call ( String nextPageLink ) { return listByJobNextSinglePageAsync ( nextPageLink ) ; } } , serviceCallback ) ;
public class OpenAPIModelFilterAdapter { /** * { @ inheritDoc } */ @ Override public OAuthFlow visitOAuthFlow ( Context context , OAuthFlow authFlow ) { } }
visitor . visitOAuthFlow ( context , authFlow ) ; return authFlow ;
public class FieldType { /** * Convert a field value to something suitable to be stored in the database . */ public Object convertJavaFieldToSqlArgValue ( Object fieldVal ) throws SQLException { } }
/* * Limitation here . Some people may want to override the null with their own value in the converter but we * currently don ' t allow that . Specifying a default value I guess is a better mechanism . */ if ( fieldVal == null ) { return null ; } else { return fieldConverter . javaToSqlArg ( this , fieldVal ) ; }
public class LogViewer { /** * Parses the instanceId into the requested main process instanceId and the subprocess instanceid . The main * process instanceId must be a long value as the main instance Id is a timestamp . * @ param instanceId - the instanceId requested by the user */ void setInstanceId ( String instanceId ) throws IllegalArgumentException { } }
if ( instanceId != null && ! "" . equals ( instanceId ) ) { subInstanceId = getSubProcessInstanceId ( instanceId ) ; try { long id = getProcessInstanceId ( instanceId ) ; mainInstanceId = id < 0 ? null : new Date ( id ) ; } catch ( NumberFormatException nfe ) { throw new IllegalArgumentException ( getLocalizedString ( "LVM_ERROR_INSTANCEID" ) , nfe ) ; } }
public class WebApp { /** * events */ protected void commonInitializationStart ( WebAppConfiguration config , DeployedModule moduleConfig ) throws Throwable { } }
// End 299205 , Collaborator added in extension processor recieves no // events WebGroupConfiguration webGroupCfg = ( ( WebGroup ) parent ) . getConfiguration ( ) ; isServlet23 = webGroupCfg . isServlet2_3 ( ) ; int versionID = webGroupCfg . getVersionID ( ) ; effectiveMajorVersion = versionID / 10 ; effectiveMinorVersion = versionID % 10 ; collabHelper = createCollaboratorHelper ( moduleConfig ) ; // must happen // before // createSessionContext // which calls // startEnvSetup webAppNameSpaceCollab = collabHelper . getWebAppNameSpaceCollaborator ( ) ; // LIBERTY : make sure we initialize the connector collaborator before trying to use it collabHelper . getWebAppConnectionCollaborator ( ) ; if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { logger . logp ( Level . FINE , CLASS_NAME , "commonInitializationStart" , "servlet spec version -->" + versionID + "effectiveMajorVersion->" + effectiveMajorVersion + "effectiveMinorVersion->" + effectiveMinorVersion ) ; } contextPath = ( ( WebGroup ) parent ) . getConfiguration ( ) . getContextRoot ( ) ; if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { logger . logp ( Level . FINE , CLASS_NAME , "commonInitializationStart" , "contextPath -->" + contextPath ) ; } this . webExtensionProcessor = this . getWebExtensionProcessor ( ) ; this . staticDocRoot = new DocumentRootUtils ( this , this . config , DocumentRootUtils . STATIC_FILE ) ; this . jspDocRoot = new DocumentRootUtils ( this , this . config , DocumentRootUtils . JSP ) ; // LIBERTY CMD placeholder until we get metadata // WebModuleMetaData cmd = null ; // webAppNameSpaceCollab . preInvoke ( cmd ) ; webAppNameSpaceCollab . preInvoke ( config . getMetaData ( ) . getCollaboratorComponentMetaData ( ) ) ; loadWebAppAttributes ( ) ; // loadLifecycleListeners ( ) ; // since we have now removed clearing the listeners from within loadLifecycleListeners ( due to when it is being called ) , // we need to add this method to clear listeners now in case there was an error and the app gets updated clearLifecycleListeners ( ) ; webAppNameSpaceCollab . postInvoke ( ) ; registerGlobalWebAppListeners ( ) ; txCollab = collabHelper . getWebAppTransactionCollaborator ( ) ; createSessionContext ( moduleConfig ) ; eventSource . onApplicationStart ( new ApplicationEvent ( this , this , new com . ibm . ws . webcontainer . util . IteratorEnumerator ( config . getServletNames ( ) ) ) ) ;
public class DataTableCore { /** * Optional parameter defining which rows are selected when the datatable is initially rendered . If this attribute is an integer , it ' s the row index . If it ' s a string , it ' s a jQuery expression . If it ' s another object , it ' s compared to the loop var . Automatically sets selection = ' true ' and selected - items = ' row ' . < P > * Usually this method is called internally by the JSF engine . */ public void setSelectedRow ( java . lang . Object _selectedRow ) { } }
getStateHelper ( ) . put ( PropertyKeys . selectedRow , _selectedRow ) ;
public class StoreRoutingPlan { /** * Determines the partition ID that replicates the key on the given node . * @ param nodeId of the node * @ param key to look up . * @ return partitionId if found , otherwise null . */ public Integer getNodesPartitionIdForKey ( int nodeId , final byte [ ] key ) { } }
// this is all the partitions the key replicates to . List < Integer > partitionIds = getReplicatingPartitionList ( key ) ; for ( Integer partitionId : partitionIds ) { // check which of the replicating partitions belongs to the node in // question if ( getNodeIdForPartitionId ( partitionId ) == nodeId ) { return partitionId ; } } return null ;
public class KunderaQueryUtils { /** * Gets the order by items . * @ param jpqlExpression * the jpql expression * @ return the order by items */ public static List < OrderByItem > getOrderByItems ( JPQLExpression jpqlExpression ) { } }
List < OrderByItem > orderList = new LinkedList < > ( ) ; if ( hasOrderBy ( jpqlExpression ) ) { Expression orderByItems = getOrderByClause ( jpqlExpression ) . getOrderByItems ( ) ; if ( orderByItems instanceof CollectionExpression ) { ListIterator < Expression > iterator = orderByItems . children ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { OrderByItem orderByItem = ( OrderByItem ) iterator . next ( ) ; orderList . add ( orderByItem ) ; } } else { orderList . add ( ( OrderByItem ) orderByItems ) ; } } return orderList ;
public class DaytimeClock { /** * / * [ deutsch ] * < p > Versucht , den Original - Server - Zeitstempel zu lesen . < / p > * @ return unparsed server reply * @ throws IOException if connection fails * @ since 2.1 */ public String getRawTimestamp ( ) throws IOException { } }
final NetTimeConfiguration config = this . getNetTimeConfiguration ( ) ; String address = config . getTimeServerAddress ( ) ; int port = config . getTimeServerPort ( ) ; int timeout = config . getConnectionTimeout ( ) ; return getDaytimeReply ( address , port , timeout ) ;
public class ProcessUtils { /** * Kills the given { @ link Process } . * @ param process { @ link Process } to kill . * @ return a boolean value indicating whether the the given { @ link Process } was successfully terminated . * @ see java . lang . Process * @ see java . lang . Process # destroy ( ) * @ see java . lang . Process # destroyForcibly ( ) * @ see # isAlive ( Process ) */ @ NullSafe public static boolean kill ( Process process ) { } }
boolean alive = isAlive ( process ) ; if ( alive ) { process . destroy ( ) ; try { alive = ! process . waitFor ( KILL_WAIT_TIMEOUT , KILL_WAIT_TIME_UNIT ) ; } catch ( InterruptedException ignore ) { Thread . currentThread ( ) . interrupt ( ) ; } finally { if ( alive ) { process . destroyForcibly ( ) ; try { alive = ! process . waitFor ( KILL_WAIT_TIMEOUT , KILL_WAIT_TIME_UNIT ) ; } catch ( InterruptedException ignore ) { Thread . currentThread ( ) . interrupt ( ) ; } } } } return ! ( alive && isAlive ( process ) ) ;
public class ClsCommand { /** * { @ inheritDoc } * @ see jp . co . future . uroborosql . client . command . ReplCommand # execute ( org . jline . reader . LineReader , java . lang . String [ ] , jp . co . future . uroborosql . config . SqlConfig , java . util . Properties ) */ @ Override public boolean execute ( final LineReader reader , final String [ ] parts , final SqlConfig sqlConfig , final Properties props ) { } }
reader . getTerminal ( ) . puts ( Capability . clear_screen ) ; reader . getTerminal ( ) . flush ( ) ; return true ;
public class SchemaBuilder { /** * Shortcut for { @ link # dropMaterializedView ( CqlIdentifier ) * dropMaterializedView ( CqlIdentifier . fromCql ( viewName ) } . */ @ NonNull public static Drop dropMaterializedView ( @ NonNull String viewName ) { } }
return dropMaterializedView ( CqlIdentifier . fromCql ( viewName ) ) ;
public class MetricsContainer { /** * Add a MetricsProvider to this container . * @ param name the name of the MetricsProvider . * @ param provider the MetricsProvider instance . */ public void addProvider ( String name , MetricsProvider provider ) { } }
if ( log . isInfoEnabled ( ) ) { log . info ( "Adding Provider: " + provider . getClass ( ) . getName ( ) + "=" + name ) ; } container . put ( name , provider ) ;
public class DataSourceTask { /** * Utility function that composes a string for logging purposes . The string includes the given message and * the index of the task in its task group together with the number of tasks in the task group . * @ param message The main message for the log . * @ param taskName The name of the task . * @ return The string ready for logging . */ private String getLogString ( String message , String taskName ) { } }
return BatchTask . constructLogString ( message , taskName , this ) ;
public class DE9IMRelation { /** * Get the DE - 9IM relation ( s ) existing between two { @ link GeometricShapeVariable } s . * @ param gv1 The first { @ link GeometricShapeVariable } ( the source of the directed edge ) . * @ param gv2 The second { @ link GeometricShapeVariable } ( the destination of the directed edge ) . * @ return The DE - 9IM relation ( s ) existing between the two given { @ link GeometricShapeVariable } s . */ public static Type [ ] getRelations ( GeometricShapeVariable gv1 , GeometricShapeVariable gv2 ) { } }
return getRelations ( gv1 , gv2 , false ) ;
public class ZonedDateTime { /** * Returns a copy of this date - time with the specified amount added . * This returns a { @ code ZonedDateTime } , based on this one , with the specified amount added . * The amount is typically { @ link Period } or { @ link Duration } but may be * any other type implementing the { @ link TemporalAmount } interface . * The calculation is delegated to the amount object by calling * { @ link TemporalAmount # addTo ( Temporal ) } . The amount implementation is free * to implement the addition in any way it wishes , however it typically * calls back to { @ link # plus ( long , TemporalUnit ) } . Consult the documentation * of the amount implementation to determine if it can be successfully added . * This instance is immutable and unaffected by this method call . * @ param amountToAdd the amount to add , not null * @ return a { @ code ZonedDateTime } based on this date - time with the addition made , not null * @ throws DateTimeException if the addition cannot be made * @ throws ArithmeticException if numeric overflow occurs */ @ Override public ZonedDateTime plus ( TemporalAmount amountToAdd ) { } }
if ( amountToAdd instanceof Period ) { Period periodToAdd = ( Period ) amountToAdd ; return resolveLocal ( dateTime . plus ( periodToAdd ) ) ; } Objects . requireNonNull ( amountToAdd , "amountToAdd" ) ; return ( ZonedDateTime ) amountToAdd . addTo ( this ) ;
public class Latch { /** * Waits for N threads to enter the { @ link # synchronize ( ) } method , then * returns . * @ return * returns normally if N threads successfully synchronized . * @ throws InterruptedException * if any of the threads that were synchronizing get interrupted , * or if the { @ link # abort ( Throwable ) } is called . */ public synchronized void synchronize ( ) throws InterruptedException { } }
check ( n ) ; try { onCriteriaMet ( ) ; } catch ( Error | RuntimeException e ) { abort ( e ) ; throw e ; } check ( n * 2 ) ;
public class Configuration { /** * Returns the value associated with the given config option as a { @ code double } . * @ param configOption The configuration option * @ return the ( default ) value associated with the given config option */ @ PublicEvolving public double getDouble ( ConfigOption < Double > configOption ) { } }
Object o = getValueOrDefaultFromOption ( configOption ) ; return convertToDouble ( o , configOption . defaultValue ( ) ) ;
public class AlluxioFuseFileSystem { /** * Reads the contents of a directory . * @ param path The FS path of the directory * @ param buff The FUSE buffer to fill * @ param filter FUSE filter * @ param offset Ignored in alluxio - fuse * @ param fi FileInfo data structure kept by FUSE * @ return 0 on success , a negative value on error */ @ Override public int readdir ( String path , Pointer buff , FuseFillDir filter , @ off_t long offset , FuseFileInfo fi ) { } }
final AlluxioURI turi = mPathResolverCache . getUnchecked ( path ) ; LOG . trace ( "readdir({}) [Alluxio: {}]" , path , turi ) ; try { final List < URIStatus > ls = mFileSystem . listStatus ( turi ) ; // standard . and . . entries filter . apply ( buff , "." , null , 0 ) ; filter . apply ( buff , ".." , null , 0 ) ; for ( final URIStatus file : ls ) { filter . apply ( buff , file . getName ( ) , null , 0 ) ; } } catch ( FileDoesNotExistException | InvalidPathException e ) { LOG . debug ( "Failed to read directory {}, path does not exist or is invalid" , path ) ; return - ErrorCodes . ENOENT ( ) ; } catch ( Throwable t ) { LOG . error ( "Failed to read directory {}" , path , t ) ; return AlluxioFuseUtils . getErrorCode ( t ) ; } return 0 ;
public class CmsObject { /** * Creates a new project . < p > * @ param name the name of the project to create * @ param description the description for the new project * @ param groupname the name of the project user group * @ param managergroupname the name of the project manager group * @ return the created project * @ throws CmsException if something goes wrong */ public CmsProject createProject ( String name , String description , String groupname , String managergroupname ) throws CmsException { } }
return m_securityManager . createProject ( m_context , name , description , groupname , managergroupname , CmsProject . PROJECT_TYPE_NORMAL ) ;
public class IpSet { /** * The array of IP addresses in the IP address set . An IP address set can have a maximum of two IP addresses . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setIpAddresses ( java . util . Collection ) } or { @ link # withIpAddresses ( java . util . Collection ) } if you want to * override the existing values . * @ param ipAddresses * The array of IP addresses in the IP address set . An IP address set can have a maximum of two IP addresses . * @ return Returns a reference to this object so that method calls can be chained together . */ public IpSet withIpAddresses ( String ... ipAddresses ) { } }
if ( this . ipAddresses == null ) { setIpAddresses ( new java . util . ArrayList < String > ( ipAddresses . length ) ) ; } for ( String ele : ipAddresses ) { this . ipAddresses . add ( ele ) ; } return this ;
public class FileHelper { /** * Performs a chmod ( which assumes this system is Linux / UNIX / Solaris / etc ) , replacing the permissions using octal * @ param f * @ param permissions * < strong > REMEMBER TO SPECIFY THIS VALUE IN OCTAL ( ie . with a leading zero ) < / strong > * @ return * @ throws IOException */ public static boolean chmod ( final String as , final File f , final int permissions ) { } }
if ( ! f . exists ( ) ) { log . error ( "[FileHelper] {chmod} Non-existant file: " + f . getPath ( ) ) ; return false ; } try { Execed call = Exec . utilityAs ( as , "chmod" , Integer . toOctalString ( permissions ) , f . getPath ( ) ) ; int returnCode = call . waitForExit ( ) ; return returnCode == 0 ; } catch ( Exception e ) { log . error ( "[FileHelper] {chmod} Failure: " + e . getMessage ( ) , e ) ; return false ; }
public class CryptoUtil { /** * Encrypts the given input bytes using a symmetric key ( AES ) . * The AES key is stored protected by an asymmetric key pair ( RSA ) . * @ param decryptedInput the input bytes to encrypt . There ' s no limit in size . * @ return the encrypted output bytes * @ throws CryptoException if the RSA Key pair was deemed invalid and got deleted . Operation can be retried . * @ throws IncompatibleDeviceException in the event the device can ' t understand the cryptographic settings required */ public byte [ ] encrypt ( byte [ ] decryptedInput ) throws CryptoException , IncompatibleDeviceException { } }
try { SecretKey key = new SecretKeySpec ( getAESKey ( ) , ALGORITHM_AES ) ; Cipher cipher = Cipher . getInstance ( AES_TRANSFORMATION ) ; cipher . init ( Cipher . ENCRYPT_MODE , key ) ; byte [ ] encrypted = cipher . doFinal ( decryptedInput ) ; byte [ ] encodedIV = Base64 . encode ( cipher . getIV ( ) , Base64 . DEFAULT ) ; // Save IV for Decrypt stage storage . store ( KEY_IV_ALIAS , new String ( encodedIV ) ) ; return encrypted ; } catch ( NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException e ) { /* * This exceptions are safe to be ignored : * - NoSuchPaddingException : * Thrown if NOPADDING is not available . Was introduced in API 1. * - NoSuchAlgorithmException : * Thrown if the transformation is null , empty or invalid , or if no security provider * implements it . Was introduced in API 1. * - InvalidKeyException : * Thrown if the given key is inappropriate for initializing this cipher . * - InvalidAlgorithmParameterException : * If the IV parameter is null . * - BadPaddingException : * Thrown only on decrypt mode . * Read more in https : / / developer . android . com / reference / javax / crypto / Cipher */ Log . e ( TAG , "Error while encrypting the input." , e ) ; throw new IncompatibleDeviceException ( e ) ; } catch ( IllegalBlockSizeException | BadPaddingException e ) { /* * - IllegalBlockSizeException : * Thrown if no padding has been requested and the length is not multiple of block size . * - BadPaddingException : * Thrown only on decrypt mode . */ throw new CryptoException ( "The AES decrypted input is invalid." , e ) ; }
public class MethodUtils { /** * < p > Invoke a named method whose parameter type matches the object type . < / p > * < p > The behaviour of this method is less deterministic * than { @ link # invokeExactMethod ( Object object , String methodName , Object [ ] args ) } . * It loops through all methods with names that match * and then executes the first it finds with compatible parameters . < / p > * < p > This method supports calls to methods taking primitive parameters * via passing in wrapping classes . So , for example , a < code > Boolean < / code > class * would match a < code > boolean < / code > primitive . < / p > * < p > This is a convenient wrapper for * { @ link # invokeMethod ( Object object , String methodName , Object [ ] args , Class [ ] parameterTypes ) } . * @ param object invoke method on this object * @ param methodName get method with this name * @ param args use these arguments - treat null as empty array ( passing null will * result in calling the parameterless method with name { @ code methodName } ) . * @ return The value returned by the invoked method * @ throws NoSuchMethodException if there is no such accessible method * @ throws InvocationTargetException wraps an exception thrown by the * method invoked * @ throws IllegalAccessException if the requested method is not accessible * via reflection */ public static Object invokeMethod ( Object object , String methodName , Object [ ] args ) throws NoSuchMethodException , IllegalAccessException , InvocationTargetException { } }
if ( args == null ) { args = EMPTY_OBJECT_ARRAY ; } int arguments = args . length ; Class < ? > [ ] parameterTypes = new Class [ arguments ] ; for ( int i = 0 ; i < arguments ; i ++ ) { parameterTypes [ i ] = args [ i ] . getClass ( ) ; } return invokeMethod ( object , methodName , args , parameterTypes ) ;
public class AttributePropertiesManager { /** * Get an attribute ' s properties from tango db * @ param attributeName * @ return The properties * @ throws DevFailed */ private Map < String , String > getAttributePropertiesFromDBSingle ( final String attributeName ) throws DevFailed { } }
xlogger . entry ( attributeName ) ; final Map < String , String > result = new CaseInsensitiveMap < String > ( ) ; final Map < String , String [ ] > prop = DatabaseFactory . getDatabase ( ) . getAttributeProperties ( deviceName , attributeName ) ; for ( final Entry < String , String [ ] > entry : prop . entrySet ( ) ) { final String name = entry . getKey ( ) ; final String [ ] value = entry . getValue ( ) ; if ( value . length > 0 ) { result . put ( name , value [ 0 ] ) ; } else { result . put ( name , "" ) ; } } xlogger . exit ( ) ; return result ;
public class JobDriver { /** * Build a new Task configuration for a given task ID . * @ param taskId Unique string ID of the task * @ return Immutable task configuration object , ready to be submitted to REEF . * @ throws RuntimeException that wraps BindException if unable to build the configuration . */ private Configuration getTaskConfiguration ( final String taskId ) { } }
try { return TaskConfiguration . CONF . set ( TaskConfiguration . IDENTIFIER , taskId ) . set ( TaskConfiguration . TASK , SleepTask . class ) . build ( ) ; } catch ( final BindException ex ) { LOG . log ( Level . SEVERE , "Failed to create Task Configuration: " + taskId , ex ) ; throw new RuntimeException ( ex ) ; }
public class GetPendingJobExecutionsResult { /** * A list of JobExecutionSummary objects with status QUEUED . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setQueuedJobs ( java . util . Collection ) } or { @ link # withQueuedJobs ( java . util . Collection ) } if you want to * override the existing values . * @ param queuedJobs * A list of JobExecutionSummary objects with status QUEUED . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetPendingJobExecutionsResult withQueuedJobs ( JobExecutionSummary ... queuedJobs ) { } }
if ( this . queuedJobs == null ) { setQueuedJobs ( new java . util . ArrayList < JobExecutionSummary > ( queuedJobs . length ) ) ; } for ( JobExecutionSummary ele : queuedJobs ) { this . queuedJobs . add ( ele ) ; } return this ;
public class Neo4JClientFactory { /** * Create Neo4J Embedded Graph DB instance , that acts as a Neo4J connection * repository for Neo4J If a Neo4j specfic client properties file is * specified in persistence . xml , it initializes DB instance with those * properties . Other DB instance is initialized with default properties . */ @ Override protected Object createPoolOrConnection ( ) { } }
if ( log . isInfoEnabled ( ) ) log . info ( "Initializing Neo4J database connection..." ) ; PersistenceUnitMetadata puMetadata = kunderaMetadata . getApplicationMetadata ( ) . getPersistenceUnitMetadata ( getPersistenceUnit ( ) ) ; Properties props = puMetadata . getProperties ( ) ; String datastoreFilePath = null ; if ( externalProperties != null ) { datastoreFilePath = ( String ) externalProperties . get ( PersistenceProperties . KUNDERA_DATASTORE_FILE_PATH ) ; } if ( StringUtils . isBlank ( datastoreFilePath ) ) { datastoreFilePath = ( String ) props . get ( PersistenceProperties . KUNDERA_DATASTORE_FILE_PATH ) ; } if ( StringUtils . isBlank ( datastoreFilePath ) ) { throw new PersistenceUnitConfigurationException ( "For Neo4J, it's mandatory to specify kundera.datastore.file.path property in persistence.xml" ) ; } Neo4JSchemaMetadata nsmd = Neo4JPropertyReader . nsmd ; ClientProperties cp = nsmd != null ? nsmd . getClientProperties ( ) : null ; GraphDatabaseService graphDb = ( GraphDatabaseService ) getConnectionPoolOrConnection ( ) ; if ( cp != null && graphDb == null ) { DataStore dataStore = nsmd != null ? nsmd . getDataStore ( ) : null ; Properties properties = dataStore != null && dataStore . getConnection ( ) != null ? dataStore . getConnection ( ) . getProperties ( ) : null ; if ( properties != null ) { Map < String , String > config = new HashMap < String , String > ( ( Map ) properties ) ; GraphDatabaseBuilder builder = new GraphDatabaseFactory ( ) . newEmbeddedDatabaseBuilder ( datastoreFilePath ) ; builder . setConfig ( config ) ; graphDb = builder . newGraphDatabase ( ) ; // registerShutdownHook ( graphDb ) ; } } if ( graphDb == null ) { graphDb = new GraphDatabaseFactory ( ) . newEmbeddedDatabase ( datastoreFilePath ) ; // registerShutdownHook ( graphDb ) ; } return graphDb ;
public class AmazonRoute53Client { /** * Creates a configuration for DNS query logging . After you create a query logging configuration , Amazon Route 53 * begins to publish log data to an Amazon CloudWatch Logs log group . * DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone , * such as the following : * < ul > * < li > * Route 53 edge location that responded to the DNS query * < / li > * < li > * Domain or subdomain that was requested * < / li > * < li > * DNS record type , such as A or AAAA * < / li > * < li > * DNS response code , such as < code > NoError < / code > or < code > ServFail < / code > * < / li > * < / ul > * < dl > * < dt > Log Group and Resource Policy < / dt > * < dd > * Before you create a query logging configuration , perform the following operations . * < note > * If you create a query logging configuration using the Route 53 console , Route 53 performs these operations * automatically . * < / note > * < ol > * < li > * Create a CloudWatch Logs log group , and make note of the ARN , which you specify when you create a query logging * configuration . Note the following : * < ul > * < li > * You must create the log group in the us - east - 1 region . * < / li > * < li > * You must use the same AWS account to create the log group and the hosted zone that you want to configure query * logging for . * < / li > * < li > * When you create log groups for query logging , we recommend that you use a consistent prefix , for example : * < code > / aws / route53 / < i > hosted zone name < / i > < / code > * In the next step , you ' ll create a resource policy , which controls access to one or more log groups and the * associated AWS resources , such as Route 53 hosted zones . There ' s a limit on the number of resource policies that * you can create , so we recommend that you use a consistent prefix so you can use the same resource policy for all * the log groups that you create for query logging . * < / li > * < / ul > * < / li > * < li > * Create a CloudWatch Logs resource policy , and give it the permissions that Route 53 needs to create log streams * and to send query logs to log streams . For the value of < code > Resource < / code > , specify the ARN for the log group * that you created in the previous step . To use the same resource policy for all the CloudWatch Logs log groups * that you created for query logging configurations , replace the hosted zone name with < code > * < / code > , for example : * < code > arn : aws : logs : us - east - 1:123412341234 : log - group : / aws / route53 / * < / code > * < note > * You can ' t use the CloudWatch console to create or edit a resource policy . You must use the CloudWatch API , one of * the AWS SDKs , or the AWS CLI . * < / note > < / li > * < / ol > * < / dd > * < dt > Log Streams and Edge Locations < / dt > * < dd > * When Route 53 finishes creating the configuration for DNS query logging , it does the following : * < ul > * < li > * Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the * specified hosted zone . That log stream is used to log all queries that Route 53 responds to for that edge * location . * < / li > * < li > * Begins to send query logs to the applicable log stream . * < / li > * < / ul > * The name of each log stream is in the following format : * < code > < i > hosted zone ID < / i > / < i > edge location code < / i > < / code > * The edge location code is a three - letter code and an arbitrarily assigned number , for example , DFW3 . The * three - letter code typically corresponds with the International Air Transport Association airport code for an * airport near the edge location . ( These abbreviations might change in the future . ) For a list of edge locations , * see " The Route 53 Global Network " on the < a href = " http : / / aws . amazon . com / route53 / details / " > Route 53 Product * Details < / a > page . * < / dd > * < dt > Queries That Are Logged < / dt > * < dd > * Query logs contain only the queries that DNS resolvers forward to Route 53 . If a DNS resolver has already cached * the response to a query ( such as the IP address for a load balancer for example . com ) , the resolver will continue * to return the cached response . It doesn ' t forward another query to Route 53 until the TTL for the corresponding * resource record set expires . Depending on how many DNS queries are submitted for a resource record set , and * depending on the TTL for that resource record set , query logs might contain information about only one query out * of every several thousand queries that are submitted to DNS . For more information about how DNS works , see < a * href = " https : / / docs . aws . amazon . com / Route53 / latest / DeveloperGuide / welcome - dns - service . html " > Routing Internet * Traffic to Your Website or Web Application < / a > in the < i > Amazon Route 53 Developer Guide < / i > . * < / dd > * < dt > Log File Format < / dt > * < dd > * For a list of the values in each query log and the format of each value , see < a * href = " https : / / docs . aws . amazon . com / Route53 / latest / DeveloperGuide / query - logs . html " > Logging DNS Queries < / a > in the * < i > Amazon Route 53 Developer Guide < / i > . * < / dd > * < dt > Pricing < / dt > * < dd > * For information about charges for query logs , see < a href = " http : / / aws . amazon . com / cloudwatch / pricing / " > Amazon * CloudWatch Pricing < / a > . * < / dd > * < dt > How to Stop Logging < / dt > * < dd > * If you want Route 53 to stop sending query logs to CloudWatch Logs , delete the query logging configuration . For * more information , see < a * href = " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ DeleteQueryLoggingConfig . html " * > DeleteQueryLoggingConfig < / a > . * < / dd > * < / dl > * @ param createQueryLoggingConfigRequest * @ return Result of the CreateQueryLoggingConfig operation returned by the service . * @ throws ConcurrentModificationException * Another user submitted a request to create , update , or delete the object at the same time that you did . * Retry the request . * @ throws NoSuchHostedZoneException * No hosted zone exists with the ID that you specified . * @ throws NoSuchCloudWatchLogsLogGroupException * There is no CloudWatch Logs log group with the specified ARN . * @ throws InvalidInputException * The input is not valid . * @ throws QueryLoggingConfigAlreadyExistsException * You can create only one query logging configuration for a hosted zone , and a query logging configuration * already exists for this hosted zone . * @ throws InsufficientCloudWatchLogsResourcePolicyException * Amazon Route 53 doesn ' t have the permissions required to create log streams and send query logs to log * streams . Possible causes include the following : < / p > * < ul > * < li > * There is no resource policy that specifies the log group ARN in the value for < code > Resource < / code > . * < / li > * < li > * The resource policy that includes the log group ARN in the value for < code > Resource < / code > doesn ' t have * the necessary permissions . * < / li > * < li > * The resource policy hasn ' t finished propagating yet . * < / li > * @ sample AmazonRoute53 . CreateQueryLoggingConfig * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53-2013-04-01 / CreateQueryLoggingConfig " * target = " _ top " > AWS API Documentation < / a > */ @ Override public CreateQueryLoggingConfigResult createQueryLoggingConfig ( CreateQueryLoggingConfigRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateQueryLoggingConfig ( request ) ;
public class TcpPacketReceiver { /** * / * ( non - Javadoc ) * @ see java . lang . Thread # run ( ) */ @ Override public void run ( ) { } }
try { if ( pingInterval > 0 ) ActivityWatchdog . getInstance ( ) . register ( this ) ; while ( ! stopRequired ) { int actualMaxPacketSize = Integer . MAX_VALUE ; if ( maxPacketSize != - 1 ) actualMaxPacketSize = trustedConnection ? maxPacketSize : 1024 ; AbstractPacket packet = receive ( actualMaxPacketSize ) ; if ( packet == null ) { if ( stopRequired ) break ; // Report connection closed log . debug ( "#" + id + " connection closed by remote peer." ) ; transport . closeTransport ( true ) ; break ; } if ( traceEnabled ) log . trace ( "#" + id + " Received " + packet ) ; lastActivity = System . currentTimeMillis ( ) ; if ( listener != null ) trustedConnection = listener . packetReceived ( packet ) ; } } catch ( Exception e ) { if ( ! stopRequired ) { log . error ( "#" + id + " transport failed : " + e . toString ( ) ) ; transport . closeTransport ( true ) ; } } catch ( Throwable e ) { log . fatal ( "#" + id + " TCP packet receiver died" , e ) ; } log . debug ( "#" + id + " stopping." ) ;
public class Maps { /** * Removes entries from the specified { @ code map } by the the specified { @ code filter } . * @ param map * @ param filter * @ return { @ code true } if there are one or more than one entries removed from the specified map . * @ throws E */ public static < K , V , E extends Exception > boolean removeIf ( final Map < K , V > map , final Try . Predicate < ? super Map . Entry < K , V > , E > filter ) throws E { } }
List < K > keysToRemove = null ; for ( Map . Entry < K , V > entry : map . entrySet ( ) ) { if ( filter . test ( entry ) ) { if ( keysToRemove == null ) { keysToRemove = new ArrayList < > ( 7 ) ; } keysToRemove . add ( entry . getKey ( ) ) ; } } if ( N . notNullOrEmpty ( keysToRemove ) ) { for ( K key : keysToRemove ) { map . remove ( key ) ; } return true ; } return false ;
public class JsonRpcRestClient { /** * { @ inheritDoc } */ @ Override public Object invoke ( String methodName , Object argument , Type returnType , Map < String , String > extraHeaders ) throws Throwable { } }
final ObjectNode request = super . createRequest ( methodName , argument ) ; final MultiValueMap < String , String > httpHeaders = new LinkedMultiValueMap < > ( ) ; for ( Map . Entry < String , String > entry : this . headers . entrySet ( ) ) { httpHeaders . add ( entry . getKey ( ) , entry . getValue ( ) ) ; } if ( extraHeaders != null ) { for ( Map . Entry < String , String > entry : extraHeaders . entrySet ( ) ) { httpHeaders . add ( entry . getKey ( ) , entry . getValue ( ) ) ; } } final HttpEntity < ObjectNode > requestHttpEntity = new HttpEntity < > ( request , httpHeaders ) ; JsonNode response ; try { response = this . restTemplate . postForObject ( serviceUrl . get ( ) . toExternalForm ( ) , requestHttpEntity , ObjectNode . class ) ; } catch ( HttpStatusCodeException httpStatusCodeException ) { logger . error ( "HTTP Error code={} status={}\nresponse={}" , httpStatusCodeException . getStatusCode ( ) . value ( ) , httpStatusCodeException . getStatusText ( ) , httpStatusCodeException . getResponseBodyAsString ( ) ) ; Integer jsonErrorCode = DefaultHttpStatusCodeProvider . INSTANCE . getJsonRpcCode ( httpStatusCodeException . getStatusCode ( ) . value ( ) ) ; if ( jsonErrorCode == null ) { jsonErrorCode = httpStatusCodeException . getStatusCode ( ) . value ( ) ; } throw new JsonRpcClientException ( jsonErrorCode , httpStatusCodeException . getStatusText ( ) , null ) ; } catch ( HttpMessageConversionException httpMessageConversionException ) { logger . error ( "Can not convert (request/response)" , httpMessageConversionException ) ; throw new JsonRpcClientException ( 0 , "Invalid JSON-RPC response" , null ) ; } return this . readResponse ( returnType , response ) ;
public class AddJsonPropertyToObject { /** * Inserts a new name / value property into a JSON object , where the value is a valid JSON string . * If the < b > newPropertyValue < / b > input is not a valid string representation of a JSON object , the operation fails . * This operation can be used to add a property with a simple string value . * @ param jsonObject The string representation of a JSON object . * Objects in JSON are a collection of name value pairs , separated by a colon and surrounded with curly brackets { } . * The name must be a string value , and the value can be a single string or any valid JSON object or array . * Examples : { " one " : 1 , " two " : 2 } , { " one " : { " a " : " a " , " B " : " B " } , " two " : " two " , " three " : [ 1,2,3.4 ] } * @ param newPropertyName The name of the new property that will be added to the JSON object . * Examples : property1 , some _ property , another property * @ param newPropertyValue The value for the new property . This must be a valid JSON object . * Examples : 1 , { " A " : " A " } , [ 1,2,3,4] * @ return a map containing the output of the operation . Keys present in the map are : * < br > < br > < b > returnResult < / b > - This will contain the JSON with the new property / value added . * < br > < b > exception < / b > - In case of success response , this result is empty . In case of failure response , * this result contains the java stack trace of the runtime exception . * < br > < br > < b > returnCode < / b > - The returnCode of the operation : 0 for success , - 1 for failure . */ @ Action ( name = "Add JSON Property to Object" , outputs = { } }
@ Output ( OutputNames . RETURN_RESULT ) , @ Output ( OutputNames . RETURN_CODE ) , @ Output ( OutputNames . EXCEPTION ) } , responses = { @ Response ( text = ResponseNames . SUCCESS , field = OutputNames . RETURN_CODE , value = ReturnCodes . SUCCESS , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . RESOLVED ) , @ Response ( text = ResponseNames . FAILURE , field = OutputNames . RETURN_CODE , value = ReturnCodes . FAILURE , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . ERROR , isOnFail = true ) } ) public Map < String , String > execute ( @ Param ( value = Constants . InputNames . JSON_OBJECT , required = true ) String jsonObject , @ Param ( value = Constants . InputNames . NEW_PROPERTY_NAME , required = true ) String newPropertyName , @ Param ( value = Constants . InputNames . NEW_PROPERTY_VALUE , required = true ) String newPropertyValue , @ Param ( value = Constants . InputNames . VALIDATE_VALUE ) String validateValue ) { Map < String , String > returnResult = new HashMap < > ( ) ; if ( jsonObject == null || jsonObject . trim ( ) . equals ( OtherValues . EMPTY_STRING ) ) { return populateResult ( returnResult , new Exception ( "Empty jsonObject provided!" ) ) ; } ObjectMapper mapper = new ObjectMapper ( ) . configure ( JsonParser . Feature . ALLOW_SINGLE_QUOTES , true ) ; final boolean validateValueBoolean = JsonUtils . parseBooleanWithDefault ( validateValue , true ) ; if ( StringUtilities . isBlank ( newPropertyValue ) ) { final String exceptionValue = "The value for the property " + newPropertyName + " it is not a valid JSON object!" ; return populateResult ( returnResult , new Exception ( exceptionValue ) ) ; } if ( newPropertyName == null ) { return populateResult ( returnResult , new Exception ( "Null newPropertyName provided!" ) ) ; } JsonNode jsonRoot ; try { jsonRoot = mapper . readTree ( jsonObject ) ; } catch ( Exception exception ) { final String exceptionValue = "Invalid jsonObject provided! " + exception . getMessage ( ) ; return populateResult ( returnResult , exceptionValue , exception ) ; } ContainerNode jsonNodes = null ; JsonNode jsonNodeValueWrapper ; try { jsonNodeValueWrapper = mapper . readTree ( newPropertyValue ) ; } catch ( IOException exception ) { if ( ! validateValueBoolean ) { jsonNodeValueWrapper = mapper . valueToTree ( newPropertyValue ) ; } else { final String exceptionValue = "The value for the property " + newPropertyName + " it is not a valid JSON object!" ; return populateResult ( returnResult , exceptionValue , exception ) ; } } if ( jsonRoot instanceof ObjectNode ) { jsonNodes = ( ( ObjectNode ) jsonRoot ) . putPOJO ( newPropertyName , jsonNodeValueWrapper ) ; } if ( jsonRoot instanceof ArrayNode ) { jsonNodes = ( ( ArrayNode ) jsonRoot ) . add ( jsonNodeValueWrapper ) ; } if ( jsonNodes == null ) { return populateResult ( returnResult , new Exception ( "The value cannot be added!" ) ) ; } return populateResult ( returnResult , jsonNodes . toString ( ) , null ) ;
public class RedisStorage { /** * Inform the < code > JobStore < / code > that the scheduler has completed the * firing of the given < code > Trigger < / code > ( and the execution of its * associated < code > Job < / code > completed , threw an exception , or was vetoed ) , * and that the < code > { @ link org . quartz . JobDataMap } < / code > * in the given < code > JobDetail < / code > should be updated if the < code > Job < / code > * is stateful . * @ param trigger the trigger which was completed * @ param jobDetail the job which was completed * @ param triggerInstCode the status of the completed job * @ param jedis a thread - safe Redis connection */ @ Override public void triggeredJobComplete ( OperableTrigger trigger , JobDetail jobDetail , Trigger . CompletedExecutionInstruction triggerInstCode , Jedis jedis ) throws JobPersistenceException , ClassNotFoundException { } }
final String jobHashKey = redisSchema . jobHashKey ( jobDetail . getKey ( ) ) ; final String jobDataMapHashKey = redisSchema . jobDataMapHashKey ( jobDetail . getKey ( ) ) ; final String triggerHashKey = redisSchema . triggerHashKey ( trigger . getKey ( ) ) ; logger . debug ( String . format ( "Job %s completed." , jobHashKey ) ) ; if ( jedis . exists ( jobHashKey ) ) { // job was not deleted during execution Pipeline pipe ; if ( isPersistJobDataAfterExecution ( jobDetail . getJobClass ( ) ) ) { // update the job data map JobDataMap jobDataMap = jobDetail . getJobDataMap ( ) ; pipe = jedis . pipelined ( ) ; pipe . del ( jobDataMapHashKey ) ; if ( jobDataMap != null && ! jobDataMap . isEmpty ( ) ) { pipe . hmset ( jobDataMapHashKey , getStringDataMap ( jobDataMap ) ) ; } pipe . syncAndReturnAll ( ) ; } if ( isJobConcurrentExecutionDisallowed ( jobDetail . getJobClass ( ) ) ) { // unblock the job pipe = jedis . pipelined ( ) ; pipe . srem ( redisSchema . blockedJobsSet ( ) , jobHashKey ) ; pipe . del ( redisSchema . jobBlockedKey ( jobDetail . getKey ( ) ) ) ; pipe . syncAndReturnAll ( ) ; final String jobTriggersSetKey = redisSchema . jobTriggersSetKey ( jobDetail . getKey ( ) ) ; for ( String nonConcurrentTriggerHashKey : jedis . smembers ( jobTriggersSetKey ) ) { Double score = jedis . zscore ( redisSchema . triggerStateKey ( RedisTriggerState . BLOCKED ) , nonConcurrentTriggerHashKey ) ; if ( score != null ) { setTriggerState ( RedisTriggerState . WAITING , score , nonConcurrentTriggerHashKey , jedis ) ; } else { score = jedis . zscore ( redisSchema . triggerStateKey ( RedisTriggerState . PAUSED_BLOCKED ) , nonConcurrentTriggerHashKey ) ; if ( score != null ) { setTriggerState ( RedisTriggerState . PAUSED , score , nonConcurrentTriggerHashKey , jedis ) ; } } } signaler . signalSchedulingChange ( 0L ) ; } } else { // unblock the job , even if it has been deleted jedis . srem ( redisSchema . blockedJobsSet ( ) , jobHashKey ) ; } if ( jedis . exists ( triggerHashKey ) ) { // trigger was not deleted during job execution if ( triggerInstCode == Trigger . CompletedExecutionInstruction . DELETE_TRIGGER ) { if ( trigger . getNextFireTime ( ) == null ) { // double - check for possible reschedule within job execution , which would cancel the need to delete if ( isNullOrEmpty ( jedis . hget ( triggerHashKey , TRIGGER_NEXT_FIRE_TIME ) ) ) { removeTrigger ( trigger . getKey ( ) , jedis ) ; } } else { removeTrigger ( trigger . getKey ( ) , jedis ) ; signaler . signalSchedulingChange ( 0L ) ; } } else if ( triggerInstCode == Trigger . CompletedExecutionInstruction . SET_TRIGGER_COMPLETE ) { setTriggerState ( RedisTriggerState . COMPLETED , ( double ) System . currentTimeMillis ( ) , triggerHashKey , jedis ) ; signaler . signalSchedulingChange ( 0L ) ; } else if ( triggerInstCode == Trigger . CompletedExecutionInstruction . SET_TRIGGER_ERROR ) { logger . debug ( String . format ( "Trigger %s set to ERROR state." , triggerHashKey ) ) ; final double score = trigger . getNextFireTime ( ) != null ? ( double ) trigger . getNextFireTime ( ) . getTime ( ) : 0 ; setTriggerState ( RedisTriggerState . ERROR , score , triggerHashKey , jedis ) ; signaler . signalSchedulingChange ( 0L ) ; } else if ( triggerInstCode == Trigger . CompletedExecutionInstruction . SET_ALL_JOB_TRIGGERS_ERROR ) { final String jobTriggersSetKey = redisSchema . jobTriggersSetKey ( jobDetail . getKey ( ) ) ; for ( String errorTriggerHashKey : jedis . smembers ( jobTriggersSetKey ) ) { final String nextFireTime = jedis . hget ( errorTriggerHashKey , TRIGGER_NEXT_FIRE_TIME ) ; final double score = isNullOrEmpty ( nextFireTime ) ? 0 : Double . parseDouble ( nextFireTime ) ; setTriggerState ( RedisTriggerState . ERROR , score , errorTriggerHashKey , jedis ) ; } signaler . signalSchedulingChange ( 0L ) ; } else if ( triggerInstCode == Trigger . CompletedExecutionInstruction . SET_ALL_JOB_TRIGGERS_COMPLETE ) { final String jobTriggerSetKey = redisSchema . jobTriggersSetKey ( jobDetail . getKey ( ) ) ; for ( String completedTriggerHashKey : jedis . smembers ( jobTriggerSetKey ) ) { setTriggerState ( RedisTriggerState . COMPLETED , ( double ) System . currentTimeMillis ( ) , completedTriggerHashKey , jedis ) ; } signaler . signalSchedulingChange ( 0L ) ; } }
public class Context { /** * Set the value for the key in this context . */ public < T > void put ( Key < T > key , T data ) { } }
if ( data instanceof Factory < ? > ) throw new AssertionError ( "T extends Context.Factory" ) ; checkState ( ht ) ; Object old = ht . put ( key , data ) ; if ( old != null && ! ( old instanceof Factory < ? > ) && old != data && data != null ) throw new AssertionError ( "duplicate context value" ) ;
public class ThriftEnvelopeEvent { /** * Given an InputStream , extract the eventDateTime , granularity and thriftEnvelope to build * the ThriftEnvelopeEvent . * This method expects the stream to be open and won ' t close it for you . * @ param in InputStream to read * @ throws IOException generic I / O Exception */ private void deserializeFromStream ( final InputStream in ) throws IOException { } }
final byte [ ] dateTimeBytes = new byte [ 8 ] ; in . read ( dateTimeBytes , 0 , 8 ) ; eventDateTime = new DateTime ( ByteBuffer . wrap ( dateTimeBytes ) . getLong ( 0 ) ) ; final byte [ ] sizeGranularityInBytes = new byte [ 4 ] ; in . read ( sizeGranularityInBytes , 0 , 4 ) ; final byte [ ] granularityBytes = new byte [ ByteBuffer . wrap ( sizeGranularityInBytes ) . getInt ( 0 ) ] ; in . read ( granularityBytes , 0 , granularityBytes . length ) ; granularity = Granularity . valueOf ( new String ( granularityBytes , Charset . forName ( "UTF-8" ) ) ) ; thriftEnvelope = deserializer . deserialize ( null ) ;
public class WindowsInstallerLink { /** * Performs installation . */ @ RequirePOST public void doDoInstall ( StaplerRequest req , StaplerResponse rsp , @ QueryParameter ( "dir" ) String _dir ) throws IOException , ServletException { } }
Jenkins . getInstance ( ) . checkPermission ( Jenkins . ADMINISTER ) ; if ( installationDir != null ) { // installation already complete sendError ( "Installation is already complete" , req , rsp ) ; return ; } if ( ! DotNet . isInstalled ( 2 , 0 ) ) { sendError ( ".NET Framework 2.0 or later is required for this feature" , req , rsp ) ; return ; } File dir = new File ( _dir ) . getAbsoluteFile ( ) ; dir . mkdirs ( ) ; if ( ! dir . exists ( ) ) { sendError ( "Failed to create installation directory: " + dir , req , rsp ) ; return ; } try { // copy files over there copy ( req , rsp , dir , getClass ( ) . getResource ( "/windows-service/jenkins.exe" ) , "jenkins.exe" ) ; copy ( req , rsp , dir , getClass ( ) . getResource ( "/windows-service/jenkins.exe.config" ) , "jenkins.exe.config" ) ; copy ( req , rsp , dir , getClass ( ) . getResource ( "/windows-service/jenkins.xml" ) , "jenkins.xml" ) ; if ( ! hudsonWar . getCanonicalFile ( ) . equals ( new File ( dir , "jenkins.war" ) . getCanonicalFile ( ) ) ) copy ( req , rsp , dir , hudsonWar . toURI ( ) . toURL ( ) , "jenkins.war" ) ; // install as a service ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; StreamTaskListener task = new StreamTaskListener ( baos ) ; task . getLogger ( ) . println ( "Installing a service" ) ; int r = runElevated ( new File ( dir , "jenkins.exe" ) , "install" , task , dir ) ; if ( r != 0 ) { sendError ( baos . toString ( ) , req , rsp ) ; return ; } // installation was successful installationDir = dir ; rsp . sendRedirect ( "." ) ; } catch ( AbortException e ) { // this exception is used as a signal to terminate processing . the error should have been already reported } catch ( InterruptedException e ) { throw new ServletException ( e ) ; }
public class ServletContextUtils { /** * Returns the context path associated to the servlet context * @ param servletContext * the servlet context * @ return the context path associated to the servlet context */ public static String getContextPath ( ServletContext servletContext ) { } }
String contextPath = DEFAULT_CONTEXT_PATH ; // Get the context path if ( servletContext != null ) { contextPath = servletContext . getContextPath ( ) ; if ( StringUtils . isEmpty ( contextPath ) ) { contextPath = DEFAULT_CONTEXT_PATH ; } } return contextPath ;
public class EJSContainer { /** * F86406 */ public void introspect ( IntrospectionWriter writer , boolean fullBMD ) { } }
// Indicate the start of the dump , and include the toString ( ) // of EJSContainer , so this can easily be matched to a trace . writer . begin ( "EJSContainer Dump ---> " + this ) ; writer . println ( "ivName = " + ivName ) ; writer . println ( "ivEJBRuntime = " + ivEJBRuntime ) ; writer . println ( "ivEmbedded = " + ivEmbedded ) ; writer . println ( "ivEntityHelper = " + ivEntityHelper ) ; writer . println ( "ivSFSBFailoverEnabled = " + ivSFSBFailoverEnabled ) ; writer . println ( "ivUOWManager = " + ivUOWManager ) ; writer . println ( "pmiFactory = " + pmiFactory ) ; writer . println ( "uowCtrl = " + uowCtrl ) ; writer . println ( "userTransactionImpl = " + userTransactionImpl ) ; // Collaborators writer . begin ( "Collaborators" ) ; writer . println ( "securityCollaborator = " + ivSecurityCollaborator ) ; introspectCollab ( "beforeActivationCollaborators" , ivBeforeActivationCollaborators , writer ) ; introspectCollab ( "beforeActivationAfterCompletionCollaborators" , ivBeforeActivationAfterCompletionCollaborators , writer ) ; introspectCollab ( "afterActivationCollaborators" , ivAfterActivationCollaborators , writer ) ; writer . end ( ) ; // Dump metadata for all installed beans writer . begin ( "internalBeanMetaDataStore : " + internalBeanMetaDataStore . size ( ) + " installed beans" ) ; synchronized ( internalBeanMetaDataStore ) { if ( fullBMD ) { for ( Enumeration < BeanMetaData > en = internalBeanMetaDataStore . elements ( ) ; en . hasMoreElements ( ) ; ) { BeanMetaData bmd = en . nextElement ( ) ; bmd . introspect ( writer ) ; } } else { List < String > keyNames = new ArrayList < String > ( ) ; for ( J2EEName name : internalBeanMetaDataStore . keySet ( ) ) { keyNames . add ( name . toString ( ) ) ; } Collections . sort ( keyNames ) ; Set < J2EEName > keys = internalBeanMetaDataStore . keySet ( ) ; for ( String keyName : keyNames ) { for ( J2EEName key : keys ) { if ( keyName . equals ( key . toString ( ) ) ) { writer . println ( keyName + " : " + internalBeanMetaDataStore . get ( key ) ) ; break ; } } } } } // end synchronized writer . end ( ) ; activator . introspect ( writer ) ; wrapperManager . introspect ( writer ) ; writer . end ( ) ;
public class Feature { /** * Set attribute value of given type . * @ param name attribute name * @ param value attribute value */ public void setCurrencyAttribute ( String name , String value ) { } }
Attribute attribute = getAttributes ( ) . get ( name ) ; if ( ! ( attribute instanceof CurrencyAttribute ) ) { throw new IllegalStateException ( "Cannot set currency value on attribute with different type, " + attribute . getClass ( ) . getName ( ) + " setting value " + value ) ; } ( ( CurrencyAttribute ) attribute ) . setValue ( value ) ;
public class MonotonicTimestampGenerator { /** * Compute the next timestamp , given the current clock tick and the last timestamp returned . * < p > If timestamps have to drift ahead of the current clock tick to guarantee monotonicity , a * warning will be logged according to the rules defined in the configuration . */ protected long computeNext ( long last ) { } }
long currentTick = clock . currentTimeMicros ( ) ; if ( last >= currentTick ) { maybeLog ( currentTick , last ) ; return last + 1 ; } return currentTick ;
public class Filter { /** * Returns a combined filter instance that accepts records which are * accepted either by this filter or the one given . * @ param expression query filter expression to parse * @ return canonical Filter instance * @ throws IllegalArgumentException if filter is null */ public final Filter < S > or ( String expression ) { } }
return or ( new FilterParser < S > ( mType , expression ) . parseRoot ( ) ) ;
public class RadialMenu { /** * Activates the radial menu , rendering a menu around the prescribed bounds . It is expected * that the host component will subsequently call { @ link # render } if the menu invalidates the * region of the component occupied by the menu and requests it to repaint . * @ param host the host component within which the radial menu is displayed . * @ param bounds the bounds of the object that was clicked to activate the menu . * @ param argument a reference to an object that will be provided along with the command that * is issued if the user selects a menu item ( unless that item has an overriding argument , in * which case the overriding argument will be used ) . */ public void activate ( Host host , Rectangle bounds , Object argument ) { } }
setActivationArgument ( argument ) ; activate ( host , bounds ) ;
public class ConfigOptionParser { /** * Prints a suggestion to stderr for the argument based on the levenshtein distance metric * @ param arg the argument which could not be assigned to a flag * @ param co the { @ link ConfigOption } List where every flag is stored */ private void printSuggestion ( String arg , List < ConfigOption > co ) { } }
List < ConfigOption > sortedList = new ArrayList < ConfigOption > ( co ) ; Collections . sort ( sortedList , new ConfigOptionLevenshteinDistance ( arg ) ) ; System . err . println ( "Parse error for argument \"" + arg + "\", did you mean " + sortedList . get ( 0 ) . getCommandLineOption ( ) . showFlagInfo ( ) + "? Ignoring for now." ) ;
public class AbstractConfigFile { /** * Stores xml - content to file . This doesn ' t change stored url / file . * @ param file * @ throws IOException */ public void storeAs ( File file ) throws IOException { } }
try ( FileOutputStream fos = new FileOutputStream ( file ) ; OutputStreamWriter osw = new OutputStreamWriter ( fos , UTF_8 ) ; BufferedWriter bw = new BufferedWriter ( osw ) ) { store ( bw ) ; }
public class UnicodeSet { /** * Retain the specified string in this set if it is present . * Upon return this set will be empty if it did not contain s , or * will only contain s if it did contain s . * @ param cs the string to be retained * @ return this object , for chaining */ public final UnicodeSet retain ( CharSequence cs ) { } }
int cp = getSingleCP ( cs ) ; if ( cp < 0 ) { String s = cs . toString ( ) ; boolean isIn = strings . contains ( s ) ; if ( isIn && size ( ) == 1 ) { return this ; } clear ( ) ; strings . add ( s ) ; pat = null ; } else { retain ( cp , cp ) ; } return this ;
public class TimeUnit { /** * 特殊形式的规范化方法 * 该方法识别特殊形式的时间表达式单元的各个字段 */ public void norm_setTotal ( ) { } }
String rule ; Pattern pattern ; Matcher match ; String [ ] tmp_parser ; String tmp_target ; /* * 修改了函数中所有的匹配规则使之更为严格 * modified by 曹零 */ rule = "(?<!(周|星期))([0-2]?[0-9]):[0-5]?[0-9]:[0-5]?[0-9]" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { tmp_parser = new String [ 3 ] ; tmp_target = match . group ( ) ; tmp_parser = tmp_target . split ( ":" ) ; _tp . tunit [ 3 ] = Integer . parseInt ( tmp_parser [ 0 ] ) ; _tp . tunit [ 4 ] = Integer . parseInt ( tmp_parser [ 1 ] ) ; _tp . tunit [ 5 ] = Integer . parseInt ( tmp_parser [ 2 ] ) ; } /* * 添加了省略秒的 : 固定形式的时间规则匹配 * add by 曹零 */ else { rule = "(?<!(周|星期))([0-2]?[0-9]):[0-5]?[0-9]" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { tmp_parser = new String [ 2 ] ; tmp_target = match . group ( ) ; tmp_parser = tmp_target . split ( ":" ) ; _tp . tunit [ 3 ] = Integer . parseInt ( tmp_parser [ 0 ] ) ; _tp . tunit [ 4 ] = Integer . parseInt ( tmp_parser [ 1 ] ) ; } } /* * 增加了 : 固定形式时间表达式的 * 中午 , 午间 , 下午 , 午后 , 晚上 , 傍晚 , 晚间 , 晚 , pm , PM * 的正确时间计算 , 规约同上 * add by 曹零 */ rule = "(中午)|(午间)" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { if ( _tp . tunit [ 3 ] >= 0 && _tp . tunit [ 3 ] <= 10 ) { _tp . tunit [ 3 ] += 12 ; } } rule = "(下午)|(午后)|(pm)|(PM)" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { if ( _tp . tunit [ 3 ] >= 0 && _tp . tunit [ 3 ] <= 11 ) { _tp . tunit [ 3 ] += 12 ; } } rule = "晚" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { if ( _tp . tunit [ 3 ] >= 1 && _tp . tunit [ 3 ] <= 11 ) { _tp . tunit [ 3 ] += 12 ; } else if ( _tp . tunit [ 3 ] == 12 ) { _tp . tunit [ 3 ] = 0 ; } } rule = "[0-9]?[0-9]?[0-9]{2}-((10)|(11)|(12)|([1-9]))-((?<!\\d))([0-3][0-9]|[1-9])" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { tmp_parser = new String [ 3 ] ; tmp_target = match . group ( ) ; tmp_parser = tmp_target . split ( "-" ) ; _tp . tunit [ 0 ] = Integer . parseInt ( tmp_parser [ 0 ] ) ; _tp . tunit [ 1 ] = Integer . parseInt ( tmp_parser [ 1 ] ) ; _tp . tunit [ 2 ] = Integer . parseInt ( tmp_parser [ 2 ] ) ; } rule = "((10)|(11)|(12)|([1-9]))/((?<!\\d))([0-3][0-9]|[1-9])/[0-9]?[0-9]?[0-9]{2}" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { tmp_parser = new String [ 3 ] ; tmp_target = match . group ( ) ; tmp_parser = tmp_target . split ( "/" ) ; _tp . tunit [ 1 ] = Integer . parseInt ( tmp_parser [ 0 ] ) ; _tp . tunit [ 2 ] = Integer . parseInt ( tmp_parser [ 1 ] ) ; _tp . tunit [ 0 ] = Integer . parseInt ( tmp_parser [ 2 ] ) ; } /* * 增加了 : 固定形式时间表达式 年 . 月 . 日 的正确识别 * add by 曹零 */ rule = "[0-9]?[0-9]?[0-9]{2}\\.((10)|(11)|(12)|([1-9]))\\.((?<!\\d))([0-3][0-9]|[1-9])" ; pattern = Pattern . compile ( rule ) ; match = pattern . matcher ( Time_Expression ) ; if ( match . find ( ) ) { tmp_parser = new String [ 3 ] ; tmp_target = match . group ( ) ; tmp_parser = tmp_target . split ( "\\." ) ; _tp . tunit [ 0 ] = Integer . parseInt ( tmp_parser [ 0 ] ) ; _tp . tunit [ 1 ] = Integer . parseInt ( tmp_parser [ 1 ] ) ; _tp . tunit [ 2 ] = Integer . parseInt ( tmp_parser [ 2 ] ) ; }
public class ServerBuilder { /** * Binds the specified { @ link Service } at the specified path pattern of the default { @ link VirtualHost } . * e . g . * < ul > * < li > { @ code / login } ( no path parameters ) < / li > * < li > { @ code / users / { userId } } ( curly - brace style ) < / li > * < li > { @ code / list / : productType / by / : ordering } ( colon style ) < / li > * < li > { @ code exact : / foo / bar } ( exact match ) < / li > * < li > { @ code prefix : / files } ( prefix match ) < / li > * < li > < code > glob : / ~ & # 42 ; / downloads / * * < / code > ( glob pattern ) < / li > * < li > { @ code regex : ^ / files / ( ? < filePath > . * ) $ } ( regular expression ) < / li > * < / ul > * @ throws IllegalArgumentException if the specified path pattern is invalid * @ throws IllegalStateException if the default { @ link VirtualHost } has been set via * { @ link # defaultVirtualHost ( VirtualHost ) } already */ public ServerBuilder service ( String pathPattern , Service < HttpRequest , HttpResponse > service ) { } }
defaultVirtualHostBuilderUpdated ( ) ; defaultVirtualHostBuilder . service ( pathPattern , service ) ; return this ;
public class NamedPattern { /** * Matches a string against this named pattern . * @ param string * the string to match * @ return a match object , or null if there was no match */ public NamedPatternMatch < E > match ( final String string ) { } }
final Matcher matcher = pattern . matcher ( string ) ; while ( matcher . find ( ) ) { final int start = matcher . start ( ) ; final int end = matcher . end ( ) ; if ( start == 0 && end == string . length ( ) ) { final Map < E , String > resultMap = new EnumMap < > ( groupEnum ) ; final Set < Entry < E , Integer > > entries = groupIndexes . entrySet ( ) ; for ( final Entry < E , Integer > entry : entries ) { final E group = entry . getKey ( ) ; final Integer groupIndex = entry . getValue ( ) ; final String result = matcher . group ( groupIndex ) ; resultMap . put ( group , result ) ; } return new NamedPatternMatch < > ( resultMap ) ; } } return null ;
public class Computer { /** * Calling path , * means protected by Queue . withLock * Computer . doConfigSubmit - > Computer . replaceBy - > Jenkins . setNodes * - > Computer . setNode * AbstractCIBase . updateComputerList - > Computer . inflictMortalWound * * AbstractCIBase . updateComputerList - > AbstractCIBase . updateComputer * - > Computer . setNode * AbstractCIBase . updateComputerList - > AbstractCIBase . killComputer - > Computer . kill * Computer . constructor - > Computer . setNode * Computer . kill is called after numExecutors set to zero ( Computer . inflictMortalWound ) so not need the Queue . lock * @ param number of executors */ @ GuardedBy ( "hudson.model.Queue.lock" ) private void setNumExecutors ( int n ) { } }
this . numExecutors = n ; final int diff = executors . size ( ) - n ; if ( diff > 0 ) { // we have too many executors // send signal to all idle executors to potentially kill them off // need the Queue maintenance lock held to prevent concurrent job assignment on the idle executors Queue . withLock ( new Runnable ( ) { @ Override public void run ( ) { for ( Executor e : executors ) if ( e . isIdle ( ) ) e . interrupt ( ) ; } } ) ; } if ( diff < 0 ) { // if the number is increased , add new ones addNewExecutorIfNecessary ( ) ; }
public class JsonRpcResponse { /** * Generates the JSON representation of this response . */ public JsonObject toJson ( ) { } }
JsonObject body = new JsonObject ( ) ; body . add ( JsonRpcProtocol . ID , id ( ) ) ; if ( isError ( ) ) { body . add ( JsonRpcProtocol . ERROR , error ( ) . toJson ( ) ) ; } else { body . add ( JsonRpcProtocol . RESULT , result ( ) ) ; } return body ;
public class CoronaJobTracker { /** * Some perparation job needed by remote job tracker for * failover */ public void prepareFailover ( ) { } }
if ( ! RemoteJTProxy . isJTRestartingEnabled ( conf ) ) { return ; } LOG . info ( "prepareFailover done" ) ; this . isPurgingJob = false ; if ( this . parentHeartbeat != null ) { // Because our failover mechanism based on remotJTProxy can ' t // reach remote job tracker , we stop the interTrackerServer to // trigger the failover this . interTrackerServer . stop ( ) ; }
public class SRTUpgradeOutputStream31 { /** * @ see javax . servlet . ServletOutputStream # println ( ) */ public void println ( ) throws IOException { } }
if ( this . _listener != null && ! checkIfCalledFromWLonError ( ) ) { _outHelper . write_NonBlocking ( CRLF , 0 , 2 ) ; } else { this . write ( CRLF , 0 , 2 ) ; }