signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ValueNumberFrame { /** * Kill all loads . This conservatively handles method calls where we don ' t * really know what fields might be assigned . */ public void killAllLoadsOf ( @ CheckForNull ValueNumber v ) { } }
if ( ! REDUNDANT_LOAD_ELIMINATION ) { return ; } FieldSummary fieldSummary = AnalysisContext . currentAnalysisContext ( ) . getFieldSummary ( ) ; HashSet < AvailableLoad > killMe = new HashSet < > ( ) ; for ( AvailableLoad availableLoad : getAvailableLoadMap ( ) . keySet ( ) ) { if ( availableLoad . getReference ( ) != v ) { continue ; } XField field = availableLoad . getField ( ) ; if ( ! field . isFinal ( ) && ( ! USE_WRITTEN_OUTSIDE_OF_CONSTRUCTOR || fieldSummary . isWrittenOutsideOfConstructor ( field ) ) ) { if ( RLE_DEBUG ) { System . out . println ( "Killing load of " + availableLoad + " in " + this ) ; } killMe . add ( availableLoad ) ; } } killAvailableLoads ( killMe ) ;
public class FunctionsInner { /** * Gets details about the specified function . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param jobName The name of the streaming job . * @ param functionName The name of the function . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < FunctionInner > getAsync ( String resourceGroupName , String jobName , String functionName , final ServiceCallback < FunctionInner > serviceCallback ) { } }
return ServiceFuture . fromHeaderResponse ( getWithServiceResponseAsync ( resourceGroupName , jobName , functionName ) , serviceCallback ) ;
public class LastGrantedAuthoritiesProperty { /** * Persist the information with the new { @ link UserDetails } . */ public void update ( @ Nonnull Authentication auth ) throws IOException { } }
List < String > roles = new ArrayList < > ( ) ; for ( GrantedAuthority ga : auth . getAuthorities ( ) ) { roles . add ( ga . getAuthority ( ) ) ; } String [ ] a = roles . toArray ( new String [ 0 ] ) ; if ( ! Arrays . equals ( this . roles , a ) ) { this . roles = a ; this . timestamp = System . currentTimeMillis ( ) ; user . save ( ) ; }
public class EventDAOWrapper { /** * { @ inheritDoc } */ @ Override public < E extends Entity > E lookupEntity ( EntityType et , long l , Class < E > type ) { } }
return wrapper . lookupEntity ( et , l , type ) ;
public class TabbedPaneTabCloseButtonPainter { /** * Create the gradient for the " x " drop shadow . * @ param s the Shape for the gradient . * @ return the gradient paint . */ private Paint createGraphicDropShadowGradient ( Shape s ) { } }
Rectangle2D b = s . getBounds2D ( ) ; float midX = ( float ) b . getCenterX ( ) ; float y1 = ( float ) b . getMinY ( ) ; float y2 = ( float ) b . getMaxY ( ) ; return createGradient ( midX , y1 , midX , y2 , new float [ ] { 0f , 0.43f , 0.57f , 1f } , new Color [ ] { graphicDropShadow1 , graphicDropShadow2 , graphicDropShadow3 , graphicDropShadow4 } ) ;
public class AudioWife { /** * Starts playing audio file associated . Before playing the audio , visibility of appropriate UI * controls is made visible . Calling this method has no effect if the audio is already being * played . */ public void play ( ) { } }
// if play button itself is null , the whole purpose of AudioWife is // defeated . if ( mPlayButton == null ) { throw new IllegalStateException ( ERROR_PLAYVIEW_NULL ) ; } if ( mUri == null ) { throw new IllegalStateException ( "Uri cannot be null. Call init() before calling this method" ) ; } if ( mMediaPlayer == null ) { throw new IllegalStateException ( "Call init() before calling this method" ) ; } if ( mMediaPlayer . isPlaying ( ) ) { return ; } mProgressUpdateHandler . postDelayed ( mUpdateProgress , AUDIO_PROGRESS_UPDATE_TIME ) ; // enable visibility of all UI controls . setViewsVisibility ( ) ; mMediaPlayer . start ( ) ; setPausable ( ) ;
public class MigrateToExtensionSettings { /** * Returns the feed items for a feed . */ private static List < FeedItem > getFeedItems ( AdWordsServicesInterface adWordsServices , AdWordsSession session , Feed feed ) throws RemoteException { } }
// Get the FeedItemService . FeedItemServiceInterface feedItemService = adWordsServices . get ( session , FeedItemServiceInterface . class ) ; String query = String . format ( "SELECT FeedItemId, AttributeValues WHERE Status = 'ENABLED' AND FeedId = %d" , feed . getId ( ) ) ; List < FeedItem > feedItems = new ArrayList < > ( ) ; int offset = 0 ; FeedItemPage feedItemPage ; do { String pageQuery = String . format ( query + " LIMIT %d, %d" , offset , PAGE_SIZE ) ; feedItemPage = feedItemService . query ( pageQuery ) ; if ( feedItemPage . getEntries ( ) != null ) { feedItems . addAll ( Arrays . asList ( feedItemPage . getEntries ( ) ) ) ; } offset += PAGE_SIZE ; } while ( offset < feedItemPage . getTotalNumEntries ( ) ) ; return feedItems ;
public class RamlControllerVisitor { /** * Visit the Wisdom Controller source model in order to populate the raml model . * @ param element The wisdom controller model ( we visit it ) . * @ param raml The raml model ( we construct it ) . */ @ Override public void visit ( ControllerModel element , Raml raml ) { } }
raml . setTitle ( element . getName ( ) ) ; if ( element . getDescription ( ) != null && ! element . getDescription ( ) . isEmpty ( ) ) { DocumentationItem doc = new DocumentationItem ( ) ; doc . setContent ( element . getDescription ( ) ) ; doc . setTitle ( "Description" ) ; raml . setDocumentation ( singletonList ( doc ) ) ; } if ( element . getVersion ( ) != null && ! element . getVersion ( ) . isEmpty ( ) ) { raml . setVersion ( element . getVersion ( ) ) ; } // noinspection unchecked navigateTheRoutes ( element . getRoutes ( ) , null , raml ) ;
public class MarketDataBenchmark { /** * Benchmarks to allow execution outside of JMH . */ public static void main ( final String [ ] args ) { } }
for ( int i = 0 ; i < 10 ; i ++ ) { perfTestEncode ( i ) ; perfTestDecode ( i ) ; }
public class DataUtilities { /** * Convert a two - dimensional array of Strings to a List of Lists , with * type - safe individual entries * @ param oldData * @ return New data */ public static List < List < DTCellValue52 > > makeDataLists ( Object [ ] [ ] oldData ) { } }
List < List < DTCellValue52 > > newData = new ArrayList < List < DTCellValue52 > > ( ) ; for ( int iRow = 0 ; iRow < oldData . length ; iRow ++ ) { Object [ ] oldRow = oldData [ iRow ] ; List < DTCellValue52 > newRow = makeDataRowList ( oldRow ) ; newData . add ( newRow ) ; } return newData ;
public class UniverseApi { /** * Get graphic information Get information on a graphic - - - This route * expires daily at 11:05 * @ param graphicId * graphic _ id integer ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ return GraphicResponse * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public GraphicResponse getUniverseGraphicsGraphicId ( Integer graphicId , String datasource , String ifNoneMatch ) throws ApiException { } }
ApiResponse < GraphicResponse > resp = getUniverseGraphicsGraphicIdWithHttpInfo ( graphicId , datasource , ifNoneMatch ) ; return resp . getData ( ) ;
public class StatszZPageHandler { /** * Returns null if such a TreeNode doesn ' t exist . */ @ GuardedBy ( "monitor" ) private /* @ Nullable */ TreeNode findNode ( /* @ Nullable */ String path ) { } }
if ( Strings . isNullOrEmpty ( path ) || "/" . equals ( path ) ) { // Go back to the root directory . return root ; } else { List < String > dirs = PATH_SPLITTER . splitToList ( path ) ; TreeNode node = root ; for ( int i = 0 ; i < dirs . size ( ) ; i ++ ) { String dir = dirs . get ( i ) ; if ( "" . equals ( dir ) && i == 0 ) { continue ; // Skip the first " " , the path of root node . } if ( ! node . children . containsKey ( dir ) ) { return null ; } else { node = node . children . get ( dir ) ; } } return node ; }
public class WaitPageInterceptor { /** * Returns wait context based on context id found in request . * @ param executionContext execution context * @ return wait context */ private Context getContext ( ExecutionContext executionContext ) { } }
// Get context id . HttpServletRequest request = executionContext . getActionBeanContext ( ) . getRequest ( ) ; String parameter = request . getParameter ( ID_PARAMETER ) ; // Return context . if ( parameter != null ) { int id = Integer . parseInt ( parameter , 16 ) ; return contexts . get ( id ) ; } return null ;
public class HtmlSelectManyListbox { /** * < p > Set the value of the < code > enabledClass < / code > property . < / p > */ public void setEnabledClass ( java . lang . String enabledClass ) { } }
getStateHelper ( ) . put ( PropertyKeys . enabledClass , enabledClass ) ;
public class SipCall { /** * This method is equivalent to the basic sendReinvite ( ) method except that it allows the caller * to specify additional JAIN - SIP API message headers to add to or replace in the outbound * message . Use of this method requires knowledge of the JAIN - SIP API . * NOTE : The additionalHeaders parameter passed to this method must contain a ContentTypeHeader in * order for a body to be included in the message . * The extra parameters supported by this method are : * @ param additionalHeaders ArrayList of javax . sip . header . Header , each element a SIP header to add * to the outbound message . These headers are added to the message after a correct message * has been constructed . Note that if you try to add a header that there is only supposed * to be one of in a message , and it ' s already there and only one single value is allowed * for that header , then this header addition attempt will be ignored . Use the * ' replaceHeaders ' parameter instead if you want to replace the existing header with your * own . Use null for no additional message headers . * @ param replaceHeaders ArrayList of javax . sip . header . Header , each element a SIP header to add to * the outbound message , replacing existing header ( s ) of that type if present in the * message . These headers are applied to the message after a correct message has been * constructed . Use null for no replacement of message headers . * @ return A SipTransaction object if the message was successfully sent , null otherwise . You don ' t * need to anything with this returned object other than to pass it to methods that you * call subsequently for this operation , namely waitReinviteResponse ( ) and * sendReinviteOkAck ( ) . */ public SipTransaction sendReinvite ( String newContact , String displayName , ArrayList < Header > additionalHeaders , ArrayList < Header > replaceHeaders , String body ) { } }
initErrorInfo ( ) ; if ( dialog == null ) { setReturnCode ( SipSession . INVALID_OPERATION ) ; setErrorMessage ( ( String ) SipSession . statusCodeDescription . get ( new Integer ( returnCode ) ) + " - dialog hasn't been established, can't send RE-INVITE" ) ; return null ; } try { Request req = dialog . createRequest ( Request . INVITE ) ; parent . addAuthorizations ( callId . getCallId ( ) , req ) ; MaxForwardsHeader mf = parent . getHeaderFactory ( ) . createMaxForwardsHeader ( 70 ) ; req . setHeader ( mf ) ; if ( newContact != null ) { req . setHeader ( parent . updateContactInfo ( newContact , displayName ) ) ; } else { req . setHeader ( ( ContactHeader ) parent . getContactInfo ( ) . getContactHeader ( ) . clone ( ) ) ; } SipStack . dumpMessage ( "We have created this RE-INVITE" , req ) ; SipTransaction siptrans ; synchronized ( this ) // needed for asynchronous response - // processEvent ( ) , although we ' re not using that here now . // Change there would be needed because that uses attribute // ' transaction ' { siptrans = parent . sendRequestWithTransaction ( req , false , dialog , additionalHeaders , replaceHeaders , body ) ; } if ( siptrans != null ) { cseq = ( CSeqHeader ) req . getHeader ( CSeqHeader . NAME ) ; return siptrans ; } setReturnCode ( parent . getReturnCode ( ) ) ; setErrorMessage ( parent . getErrorMessage ( ) ) ; setException ( parent . getException ( ) ) ; return null ; } catch ( Exception ex ) { setReturnCode ( SipSession . EXCEPTION_ENCOUNTERED ) ; setException ( ex ) ; setErrorMessage ( "Exception: " + ex . getClass ( ) . getName ( ) + ": " + ex . getMessage ( ) ) ; return null ; }
public class KeyFile { /** * Parses a json keyfile . * @ param serialized Json content . * @ return A new KeyFile instance . */ public static KeyFile parse ( byte [ ] serialized ) { } }
try ( InputStream in = new ByteArrayInputStream ( serialized ) ; Reader reader = new InputStreamReader ( in , UTF_8 ) ; JsonReader jsonReader = GSON . newJsonReader ( reader ) ) { JsonObject jsonObj = new JsonParser ( ) . parse ( jsonReader ) . getAsJsonObject ( ) ; KeyFile result = GSON . fromJson ( jsonObj , GenericKeyFile . class ) ; result . jsonObj = jsonObj ; return result ; } catch ( IOException | JsonParseException e ) { throw new IllegalArgumentException ( "Unable to parse key file." , e ) ; }
public class FSNamesystem { /** * Updates DatanodeInfo for each LocatedBlock in locatedBlocks . */ LocatedBlocksWithMetaInfo updateDatanodeInfo ( LocatedBlocks locatedBlocks ) throws IOException { } }
if ( locatedBlocks . getLocatedBlocks ( ) . size ( ) == 0 ) return new LocatedBlocksWithMetaInfo ( locatedBlocks . getFileLength ( ) , locatedBlocks . getLocatedBlocks ( ) , false , DataTransferProtocol . DATA_TRANSFER_VERSION , getNamespaceId ( ) , this . nameNode . getClientProtocolMethodsFingerprint ( ) ) ; List < LocatedBlock > newBlocks = new ArrayList < LocatedBlock > ( ) ; readLock ( ) ; try { for ( LocatedBlock locBlock : locatedBlocks . getLocatedBlocks ( ) ) { Block block = locBlock . getBlock ( ) ; int numNodes = blocksMap . numNodes ( block ) ; int numCorruptNodes = countNodes ( block ) . corruptReplicas ( ) ; int numCorruptReplicas = corruptReplicas . numCorruptReplicas ( block ) ; if ( numCorruptNodes != numCorruptReplicas ) { LOG . warn ( "Inconsistent number of corrupt replicas for " + block + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas ) ; } boolean blockCorrupt = numCorruptNodes == numNodes ; int numMachineSet = blockCorrupt ? numNodes : ( numNodes - numCorruptNodes ) ; DatanodeDescriptor [ ] machineSet = new DatanodeDescriptor [ numMachineSet ] ; if ( numMachineSet > 0 ) { numNodes = 0 ; for ( Iterator < DatanodeDescriptor > it = blocksMap . nodeIterator ( block ) ; it . hasNext ( ) ; ) { DatanodeDescriptor dn = it . next ( ) ; boolean replicaCorrupt = corruptReplicas . isReplicaCorrupt ( block , dn ) ; if ( blockCorrupt || ( ! blockCorrupt && ! replicaCorrupt ) ) machineSet [ numNodes ++ ] = dn ; } } // We need to make a copy of the block object before releasing the lock // to prevent the state of block is changed after that and before the // object is serialized to clients , to avoid potential inconsistency . // Further optimization is possible to avoid some object copy . Since it // is so far not a critical path . We leave a safe approach here . Block blockCopy = null ; if ( block != null ) { blockCopy = new Block ( block ) ; } LocatedBlock newBlock = new LocatedBlock ( blockCopy , machineSet , 0 , blockCorrupt ) ; newBlocks . add ( newBlock ) ; } } finally { readUnlock ( ) ; } return new LocatedBlocksWithMetaInfo ( locatedBlocks . getFileLength ( ) , newBlocks , false , DataTransferProtocol . DATA_TRANSFER_VERSION , getNamespaceId ( ) , this . nameNode . getClientProtocolMethodsFingerprint ( ) ) ;
public class JcQueryResult { /** * answer a list of literal maps containing result values for the given keys * @ param key a variable number of keys which are used to calculate result - values to fill the resulting maps * @ return a list of LiteralMap ( s ) */ public LiteralMapList resultMapListOf ( JcPrimitive ... key ) { } }
List < List < ? > > results = new ArrayList < List < ? > > ( ) ; LiteralMapList ret = new LiteralMapList ( ) ; int size = - 1 ; ResultHandler . includeNullValues . set ( Boolean . TRUE ) ; try { for ( JcPrimitive k : key ) { List < ? > r = this . resultOf ( k ) ; if ( size == - 1 ) size = r . size ( ) ; results . add ( r ) ; for ( int i = 0 ; i < r . size ( ) ; i ++ ) { LiteralMap map ; if ( i > ret . size ( ) - 1 ) { map = new LiteralMap ( ) ; ret . add ( map ) ; } else map = ret . get ( i ) ; map . put ( k , r . get ( i ) ) ; } } } finally { ResultHandler . includeNullValues . remove ( ) ; } return ret ;
public class ClassFile { /** * Return a constant string at index . * @ param index * @ return */ public final String getString ( int index ) { } }
Utf8 utf8 = ( Utf8 ) getConstantInfo ( index ) ; return utf8 . getString ( ) ;
public class Operations { /** * Returns the name of the operation . * @ param op the operation * @ return the name of the operation * @ throws IllegalArgumentException if the operation was not defined . */ public static String getOperationName ( final ModelNode op ) { } }
if ( op . hasDefined ( OP ) ) { return op . get ( OP ) . asString ( ) ; } throw ControllerClientLogger . ROOT_LOGGER . operationNameNotFound ( ) ;
public class RiffFile { /** * Close Riff File . * Length is written too . */ public int close ( ) { } }
int retcode = DDC_SUCCESS ; switch ( fmode ) { case RFM_WRITE : try { file . seek ( 0 ) ; try { writeHeader_internally ( riff_header ) ; file . close ( ) ; } catch ( IOException ioe ) { retcode = DDC_FILE_ERROR ; } } catch ( IOException ioe ) { retcode = DDC_FILE_ERROR ; } break ; case RFM_READ : try { file . close ( ) ; } catch ( IOException ioe ) { retcode = DDC_FILE_ERROR ; } break ; } file = null ; fmode = RFM_UNKNOWN ; return retcode ;
public class Graph { /** * Adds a new node to the graph , given the Node object . If a node already * exists with an id equal to the given node , a DuplicatedNodeException is thrown . * @ param n the new node to be added to the graph . */ public void addNode ( Node n ) { } }
try { structure . addNode ( n ) ; for ( NodeListener listener : nodeListeners ) listener . onInsert ( n ) ; } catch ( DuplicatedNodeException e ) { duplicatedNodesCounter ++ ; }
public class SuggestedFix { /** * { @ link Builder # swap ( Tree , Tree ) } */ public static SuggestedFix swap ( Tree node1 , Tree node2 ) { } }
return builder ( ) . swap ( node1 , node2 ) . build ( ) ;
public class AttributeService { /** * Copies the attributes of the given file to the given copy file . */ public void copyAttributes ( File file , File copy , AttributeCopyOption copyOption ) { } }
switch ( copyOption ) { case ALL : file . copyAttributes ( copy ) ; break ; case BASIC : file . copyBasicAttributes ( copy ) ; break ; default : // don ' t copy }
public class VFSUtils { /** * Read the manifest from given manifest VirtualFile . * @ param manifest the VF to read from * @ return JAR ' s manifest * @ throws IOException if problems while opening VF stream occur */ public static Manifest readManifest ( VirtualFile manifest ) throws IOException { } }
if ( manifest == null ) { throw MESSAGES . nullArgument ( "manifest file" ) ; } InputStream stream = new PaddedManifestStream ( manifest . openStream ( ) ) ; try { return new Manifest ( stream ) ; } finally { safeClose ( stream ) ; }
public class RowAction { /** * returns type of commit performed on timestamp . ACTION _ NONE if none . */ synchronized int getCommitType ( long timestamp ) { } }
RowActionBase action = this ; int type = ACTION_NONE ; do { if ( action . commitTimestamp == timestamp ) { type = action . type ; } action = action . next ; } while ( action != null ) ; return type ;
public class PaperPrint { /** * Print the page footer . * @ param g2d The graphics environment . * @ param footerText The text to print in the footer . */ protected void printFooter ( Graphics2D g2d , String footerText ) { } }
FontMetrics fm = g2d . getFontMetrics ( ) ; int stringWidth = fm . stringWidth ( footerText ) ; int textX = ( pageRect . width - stringWidth ) / 2 + pageRect . x ; int textY = pageRect . y + pageRect . height - BORDER ; g2d . drawString ( footerText , textX , textY ) ;
public class ProjectTaskPredecessor { /** * Set up the key areas . */ public void setupKeys ( ) { } }
KeyAreaInfo keyArea = null ; keyArea = new KeyAreaInfo ( this , Constants . UNIQUE , ID_KEY ) ; keyArea . addKeyField ( ID , Constants . ASCENDING ) ; keyArea = new KeyAreaInfo ( this , Constants . NOT_UNIQUE , PROJECT_TASK_ID_KEY ) ; keyArea . addKeyField ( PROJECT_TASK_ID , Constants . ASCENDING ) ; keyArea . addKeyField ( PROJECT_TASK_PREDECESSOR_ID , Constants . ASCENDING ) ; keyArea = new KeyAreaInfo ( this , Constants . NOT_UNIQUE , PROJECT_TASK_PREDECESSOR_ID_KEY ) ; keyArea . addKeyField ( PROJECT_TASK_PREDECESSOR_ID , Constants . ASCENDING ) ; keyArea . addKeyField ( PROJECT_TASK_ID , Constants . ASCENDING ) ;
public class MigrationPlanner { /** * the CheckStyle warnings are suppressed intentionally , because the algorithm is followed easier within fewer methods */ @ SuppressWarnings ( { } }
"checkstyle:npathcomplexity" , "checkstyle:cyclomaticcomplexity" , "checkstyle:methodlength" } ) void planMigrations ( int partitionId , PartitionReplica [ ] oldReplicas , PartitionReplica [ ] newReplicas , MigrationDecisionCallback callback ) { assert oldReplicas . length == newReplicas . length : "Replica addresses with different lengths! Old: " + Arrays . toString ( oldReplicas ) + ", New: " + Arrays . toString ( newReplicas ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( format ( "partitionId=%d, Initial state: %s" , partitionId , Arrays . toString ( oldReplicas ) ) ) ; logger . finest ( format ( "partitionId=%d, Final state: %s" , partitionId , Arrays . toString ( newReplicas ) ) ) ; } initState ( oldReplicas ) ; assertNoDuplicate ( partitionId , oldReplicas , newReplicas ) ; // fix cyclic partition replica movements if ( fixCycle ( oldReplicas , newReplicas ) ) { if ( logger . isFinestEnabled ( ) ) { logger . finest ( format ( "partitionId=%d, Final state (after cycle fix): %s" , partitionId , Arrays . toString ( newReplicas ) ) ) ; } } int currentIndex = 0 ; while ( currentIndex < oldReplicas . length ) { if ( logger . isFinestEnabled ( ) ) { logger . finest ( format ( "partitionId=%d, Current index: %d, state: %s" , partitionId , currentIndex , Arrays . toString ( state ) ) ) ; } assertNoDuplicate ( partitionId , oldReplicas , newReplicas ) ; if ( newReplicas [ currentIndex ] == null ) { if ( state [ currentIndex ] != null ) { // replica owner is removed and no one will own this replica trace ( "partitionId=%d, New address is null at index: %d" , partitionId , currentIndex ) ; callback . migrate ( state [ currentIndex ] , currentIndex , - 1 , null , - 1 , - 1 ) ; state [ currentIndex ] = null ; } currentIndex ++ ; continue ; } if ( state [ currentIndex ] == null ) { int i = getReplicaIndex ( state , newReplicas [ currentIndex ] ) ; if ( i == - 1 ) { // fresh replica copy is needed , so COPY replica to newReplicas [ currentIndex ] from partition owner trace ( "partitionId=%d, COPY %s to index: %d" , partitionId , newReplicas [ currentIndex ] , currentIndex ) ; callback . migrate ( null , - 1 , - 1 , newReplicas [ currentIndex ] , - 1 , currentIndex ) ; state [ currentIndex ] = newReplicas [ currentIndex ] ; currentIndex ++ ; continue ; } if ( i > currentIndex ) { // SHIFT UP replica from i to currentIndex , copy data from partition owner trace ( "partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d" , partitionId , state [ i ] , i , currentIndex ) ; callback . migrate ( null , - 1 , - 1 , state [ i ] , i , currentIndex ) ; state [ currentIndex ] = state [ i ] ; state [ i ] = null ; continue ; } throw new AssertionError ( "partitionId=" + partitionId + "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays . toString ( oldReplicas ) + ", CURRENT: " + Arrays . toString ( state ) + ", FINAL: " + Arrays . toString ( newReplicas ) ) ; } if ( newReplicas [ currentIndex ] . equals ( state [ currentIndex ] ) ) { // no change , no action needed currentIndex ++ ; continue ; } if ( getReplicaIndex ( newReplicas , state [ currentIndex ] ) == - 1 && getReplicaIndex ( state , newReplicas [ currentIndex ] ) == - 1 ) { // MOVE partition replica from its old owner to new owner trace ( "partitionId=%d, MOVE %s to index: %d" , partitionId , newReplicas [ currentIndex ] , currentIndex ) ; callback . migrate ( state [ currentIndex ] , currentIndex , - 1 , newReplicas [ currentIndex ] , - 1 , currentIndex ) ; state [ currentIndex ] = newReplicas [ currentIndex ] ; currentIndex ++ ; continue ; } if ( getReplicaIndex ( state , newReplicas [ currentIndex ] ) == - 1 ) { int newIndex = getReplicaIndex ( newReplicas , state [ currentIndex ] ) ; assert newIndex > currentIndex : "partitionId=" + partitionId + ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: " + Arrays . toString ( oldReplicas ) + ", CURRENT: " + Arrays . toString ( state ) + ", FINAL: " + Arrays . toString ( newReplicas ) ; if ( state [ newIndex ] == null ) { // it is a SHIFT DOWN trace ( "partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d" , partitionId , state [ currentIndex ] , newIndex , newReplicas [ currentIndex ] , currentIndex ) ; callback . migrate ( state [ currentIndex ] , currentIndex , newIndex , newReplicas [ currentIndex ] , - 1 , currentIndex ) ; state [ newIndex ] = state [ currentIndex ] ; } else { trace ( "partitionId=%d, MOVE-3 %s to index: %d" , partitionId , newReplicas [ currentIndex ] , currentIndex ) ; callback . migrate ( state [ currentIndex ] , currentIndex , - 1 , newReplicas [ currentIndex ] , - 1 , currentIndex ) ; } state [ currentIndex ] = newReplicas [ currentIndex ] ; currentIndex ++ ; continue ; } planMigrations ( partitionId , oldReplicas , newReplicas , callback , currentIndex ) ; } assert Arrays . equals ( state , newReplicas ) : "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays . toString ( oldReplicas ) + " CURRENT: " + Arrays . toString ( state ) + ", FINAL: " + Arrays . toString ( newReplicas ) ;
public class Fingerprint { /** * Checks if the current user can Discover the item . * If yes , it may be displayed as a text in Fingerprint UIs . * @ param fullName Full name of the job * @ return { @ code true } if the user can discover the item */ private static boolean canDiscoverItem ( @ Nonnull final String fullName ) { } }
final Jenkins jenkins = Jenkins . getInstance ( ) ; // Fast check to avoid security context switches Item item = null ; try { item = jenkins . getItemByFullName ( fullName ) ; } catch ( AccessDeniedException ex ) { // ignore , we will fall - back later } if ( item != null ) { return true ; } // Probably it failed due to the missing Item . DISCOVER // We try to retrieve the job using SYSTEM user and to check permissions manually . final Authentication userAuth = Jenkins . getAuthentication ( ) ; try ( ACLContext acl = ACL . as ( ACL . SYSTEM ) ) { final Item itemBySystemUser = jenkins . getItemByFullName ( fullName ) ; if ( itemBySystemUser == null ) { return false ; } // To get the item existence fact , a user needs Item . DISCOVER for the item // and Item . READ for all container folders . boolean canDiscoverTheItem = itemBySystemUser . hasPermission ( userAuth , Item . DISCOVER ) ; if ( canDiscoverTheItem ) { ItemGroup < ? > current = itemBySystemUser . getParent ( ) ; do { if ( current instanceof Item ) { final Item i = ( Item ) current ; current = i . getParent ( ) ; if ( ! i . hasPermission ( userAuth , Item . READ ) ) { canDiscoverTheItem = false ; } } else { current = null ; } } while ( canDiscoverTheItem && current != null ) ; } return canDiscoverTheItem ; }
public class HerokuAPI { /** * Information about a specific release . * @ param appName App name . See { @ link # listApps } for a list of apps that can be used . * @ param releaseName Release name . See { @ link # listReleases } for a list of the app ' s releases . * @ return the release object */ public Release getReleaseInfo ( String appName , String releaseName ) { } }
return connection . execute ( new ReleaseInfo ( appName , releaseName ) , apiKey ) ;
public class NestedRuntimeException { /** * Retrieve the most specific cause of this exception , that is , * either the innermost cause ( root cause ) or this exception itself . * @ return * the most specific cause ( never < code > null < / code > ) */ public Throwable getMostSpecificCause ( ) { } }
Throwable rootCause = null ; Throwable cause = getCause ( ) ; while ( cause != null && cause != rootCause ) { rootCause = cause ; cause = cause . getCause ( ) ; } return ( rootCause == null ? this : rootCause ) ;
public class TangoEventsAdapter { public void addTangoPipeListener ( ITangoPipeListener listener , String attrName , boolean stateless ) throws DevFailed { } }
addTangoPipeListener ( listener , attrName , new String [ 0 ] , stateless ) ;
public class TrackView { /** * Set the track which must be displayed . * @ param track view model . */ public void setModel ( SoundCloudTrack track ) { } }
mModel = track ; if ( mModel != null ) { Picasso . with ( getContext ( ) ) . load ( SoundCloudArtworkHelper . getArtworkUrl ( mModel , SoundCloudArtworkHelper . XLARGE ) ) . placeholder ( R . color . grey_light ) . fit ( ) . centerInside ( ) . into ( mArtwork ) ; mArtist . setText ( mModel . getArtist ( ) ) ; mTitle . setText ( mModel . getTitle ( ) ) ; long min = mModel . getDurationInMilli ( ) / 60000 ; long sec = ( mModel . getDurationInMilli ( ) % 60000 ) / 1000 ; mDuration . setText ( String . format ( getResources ( ) . getString ( R . string . duration ) , min , sec ) ) ; }
public class SegmentGroupMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SegmentGroup segmentGroup , ProtocolMarshaller protocolMarshaller ) { } }
if ( segmentGroup == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( segmentGroup . getDimensions ( ) , DIMENSIONS_BINDING ) ; protocolMarshaller . marshall ( segmentGroup . getSourceSegments ( ) , SOURCESEGMENTS_BINDING ) ; protocolMarshaller . marshall ( segmentGroup . getSourceType ( ) , SOURCETYPE_BINDING ) ; protocolMarshaller . marshall ( segmentGroup . getType ( ) , TYPE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Properties { /** * Gets a dynamic property value on an object * @ param object the object from which one wants to get the property value * @ param propertyName the property name */ public static < T > T getObjectDynamicProperty ( Object object , String propertyName ) { } }
return propertyValues . getObjectDynamicProperty ( object , propertyName ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link PolicyIssuerType } { @ code > } } */ @ XmlElementDecl ( namespace = "urn:oasis:names:tc:xacml:3.0:core:schema:wd-17" , name = "PolicyIssuer" ) public JAXBElement < PolicyIssuerType > createPolicyIssuer ( PolicyIssuerType value ) { } }
return new JAXBElement < PolicyIssuerType > ( _PolicyIssuer_QNAME , PolicyIssuerType . class , null , value ) ;
public class ConverterManager { /** * Register converters supported by default . */ protected void registerDefaults ( ) { } }
register ( Long . class , new LongConverter ( ) ) ; register ( Integer . class , new IntegerConverter ( ) ) ; register ( Short . class , new ShortConverter ( ) ) ; register ( Byte . class , new ByteConverter ( ) ) ; register ( Double . class , new DoubleConverter ( ) ) ; register ( Float . class , new FloatConverter ( ) ) ; register ( Character . class , new CharacterConverter ( ) ) ; register ( Boolean . class , new BooleanConverter ( ) ) ; register ( String . class , new StringConverter ( ) ) ; register ( URL . class , new UrlConverter ( ) ) ; register ( URI . class , new UriConverter ( ) ) ; register ( Charset . class , new CharsetConverter ( ) ) ; register ( File . class , new FileConverter ( ) ) ; register ( Path . class , new PathConverter ( ) ) ; register ( Locale . class , new LocaleConverter ( ) ) ; register ( Pattern . class , new PatternConverter ( ) ) ; register ( Long . TYPE , new LongConverter ( ) ) ; register ( Integer . TYPE , new IntegerConverter ( ) ) ; register ( Short . TYPE , new ShortConverter ( ) ) ; register ( Byte . TYPE , new ByteConverter ( ) ) ; register ( Character . TYPE , new CharacterConverter ( ) ) ; register ( Double . TYPE , new DoubleConverter ( ) ) ; register ( Float . TYPE , new FloatConverter ( ) ) ; register ( Boolean . TYPE , new BooleanConverter ( ) ) ;
public class AdigeUtilities { /** * Generates { @ link HillSlope } s from the informations gathered in the provided feature layers . * @ param netFeatureCollection the network features * @ param hillslopeFeatureCollection the hillslope features * @ param out a printstream for logging * @ return the list of ordered hillslopes , starting from the most downstream one * @ throws Exception */ public static List < IHillSlope > generateHillSlopes ( FeatureCollection < SimpleFeatureType , SimpleFeature > netFeatureCollection , FeatureCollection < SimpleFeatureType , SimpleFeature > hillslopeFeatureCollection , IHMProgressMonitor out ) throws Exception { } }
out . message ( "Analizing the network layer..." ) ; List < SimpleFeature > netFeaturesList = new ArrayList < SimpleFeature > ( ) ; List < Integer > netIdsList = new ArrayList < Integer > ( ) ; ArrayList < PfafstetterNumber > netPfaffsList = new ArrayList < PfafstetterNumber > ( ) ; FeatureIterator < SimpleFeature > netFeatureIterator = netFeatureCollection . features ( ) ; PfafstetterNumber mostDownStreamPNumber = null ; SimpleFeature mostDownStreamNetFeature = null ; Integer mostDownStreamLinkId = - 1 ; while ( netFeatureIterator . hasNext ( ) ) { SimpleFeature netFeature = ( SimpleFeature ) netFeatureIterator . next ( ) ; String attribute = ( String ) netFeature . getAttribute ( PFAFNAME ) ; PfafstetterNumber current = new PfafstetterNumber ( attribute ) ; int tmpId = ( ( Number ) netFeature . getAttribute ( NETNUMNAME ) ) . intValue ( ) ; if ( mostDownStreamPNumber == null ) { mostDownStreamPNumber = current ; } else { if ( current . isDownStreamOf ( mostDownStreamPNumber ) ) { mostDownStreamLinkId = tmpId ; mostDownStreamNetFeature = netFeature ; mostDownStreamPNumber = current ; } } netFeaturesList . add ( netFeature ) ; netIdsList . add ( tmpId ) ; netPfaffsList . add ( current ) ; } netFeatureIterator . close ( ) ; /* * search subbasins */ out . message ( "Analyzing the hillslopes layer..." ) ; List < SimpleFeature > hillslopeFeaturesList = new ArrayList < SimpleFeature > ( ) ; List < Integer > hillslopeIdsList = new ArrayList < Integer > ( ) ; FeatureIterator < SimpleFeature > hillslopeIterator = hillslopeFeatureCollection . features ( ) ; SimpleFeature mostDownstreamHillslopeFeature = null ; while ( hillslopeIterator . hasNext ( ) ) { SimpleFeature f = hillslopeIterator . next ( ) ; int linkAttribute = ( ( Number ) f . getAttribute ( NETNUMNAME ) ) . intValue ( ) ; if ( mostDownStreamLinkId == linkAttribute ) { mostDownstreamHillslopeFeature = f ; } hillslopeIdsList . add ( linkAttribute ) ; hillslopeFeaturesList . add ( f ) ; } /* * create all the hillslopes and connect them with their net feature and other hillslopes */ out . message ( "Linking together network and hillslopes layers..." ) ; ArrayList < IHillSlope > hillslopeElements = new ArrayList < IHillSlope > ( ) ; IHillSlope mostDownstreamHillslope = null ; if ( mostDownStreamPNumber . isEndPiece ( ) ) { Integer basinId = hillslopeIdsList . get ( 0 ) ; IHillSlope tmpHslp = new HillSlope ( mostDownStreamNetFeature , mostDownstreamHillslopeFeature , mostDownStreamPNumber , basinId . intValue ( ) ) ; hillslopeElements . add ( tmpHslp ) ; mostDownstreamHillslope = tmpHslp ; } else { /* * almost there , now get from the basins list the ones with that netNums */ ArrayList < SimpleFeature > selectedNetFeatureList = new ArrayList < SimpleFeature > ( ) ; ArrayList < Integer > selectedNetId = new ArrayList < Integer > ( ) ; for ( int i = 0 ; i < hillslopeFeaturesList . size ( ) ; i ++ ) { SimpleFeature basinFeature = hillslopeFeaturesList . get ( i ) ; Integer link = hillslopeIdsList . get ( i ) ; for ( int j = 0 ; j < netFeaturesList . size ( ) ; j ++ ) { Integer netNum = netIdsList . get ( j ) ; if ( netNum . equals ( link ) ) { SimpleFeature netFeature = netFeaturesList . get ( j ) ; IHillSlope tmpHslp = new HillSlope ( netFeature , basinFeature , netPfaffsList . get ( j ) , netNum . intValue ( ) ) ; hillslopeElements . add ( tmpHslp ) ; selectedNetFeatureList . add ( netFeature ) ; selectedNetId . add ( netNum ) ; break ; } } } mostDownStreamPNumber = null ; Integer mostDownStreamNetId = null ; for ( SimpleFeature feature : selectedNetFeatureList ) { String attribute = ( String ) feature . getAttribute ( PFAFNAME ) ; PfafstetterNumber current = new PfafstetterNumber ( attribute ) ; Integer tmpId = ( ( Number ) feature . getAttribute ( NETNUMNAME ) ) . intValue ( ) ; if ( mostDownStreamPNumber == null ) { mostDownStreamPNumber = current ; } else { if ( current . isDownStreamOf ( mostDownStreamPNumber ) ) { mostDownStreamNetId = tmpId ; mostDownStreamPNumber = current ; } } } for ( int i = 0 ; i < hillslopeElements . size ( ) ; i ++ ) { Integer hId = hillslopeIdsList . get ( i ) ; if ( hId . equals ( mostDownStreamNetId ) ) { mostDownstreamHillslope = hillslopeElements . get ( i ) ; break ; } } if ( hillslopeElements . size ( ) == 1 ) { mostDownstreamHillslope = hillslopeElements . get ( 0 ) ; } } if ( mostDownstreamHillslope == null ) throw new RuntimeException ( ) ; HillSlope . connectElements ( hillslopeElements ) ; List < IHillSlope > orderedHillslopes = new ArrayList < IHillSlope > ( ) ; mostDownstreamHillslope . getAllUpstreamElements ( orderedHillslopes , null ) ; return orderedHillslopes ;
public class MetatypeUtils { /** * Collapses contiguous sequences of whitespace to a single 0x20. * Leading and trailing whitespace is removed . */ @ Trivial private static String collapseWhitespace ( String value ) { } }
final int length = value . length ( ) ; for ( int i = 0 ; i < length ; ++ i ) { if ( isSpace ( value . charAt ( i ) ) ) { return collapse0 ( value , i , length ) ; } } return value ;
public class SimpleTrigger { /** * Get a { @ link IScheduleBuilder } that is configured to produce a schedule * identical to this trigger ' s schedule . * @ see # getTriggerBuilder ( ) */ @ Override public IScheduleBuilder < ISimpleTrigger > getScheduleBuilder ( ) { } }
final SimpleScheduleBuilder sb = SimpleScheduleBuilder . simpleSchedule ( ) . withIntervalInMilliseconds ( getRepeatInterval ( ) ) . withRepeatCount ( getRepeatCount ( ) ) ; switch ( getMisfireInstruction ( ) ) { case MISFIRE_INSTRUCTION_FIRE_NOW : sb . withMisfireHandlingInstructionFireNow ( ) ; break ; case MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_EXISTING_COUNT : sb . withMisfireHandlingInstructionNextWithExistingCount ( ) ; break ; case MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT : sb . withMisfireHandlingInstructionNextWithRemainingCount ( ) ; break ; case MISFIRE_INSTRUCTION_RESCHEDULE_NOW_WITH_EXISTING_REPEAT_COUNT : sb . withMisfireHandlingInstructionNowWithExistingCount ( ) ; break ; case MISFIRE_INSTRUCTION_RESCHEDULE_NOW_WITH_REMAINING_REPEAT_COUNT : sb . withMisfireHandlingInstructionNowWithRemainingCount ( ) ; break ; } return sb ;
public class LastaAction { /** * Convert to URL string to move the action . * < pre > * < span style = " color : # 3F7E5E " > / / / product / list / 3 < / span > * String url = toActionUrl ( ProductListAction . < span style = " color : # 70226C " > class < / span > , moreUrl ( 3 ) ) ; * < / pre > * @ param actionType The class type of action that it redirects to . ( NotNull ) * @ param chain The chain of URL to build additional info on URL . ( NotNull ) * @ return The URL string to move to the action . ( NotNull ) */ protected String toActionUrl ( Class < ? > actionType , UrlChain chain ) { } }
assertArgumentNotNull ( "actionType" , actionType ) ; assertArgumentNotNull ( "chain" , chain ) ; return actionPathResolver . toActionUrl ( actionType , chain ) ;
public class LObjIntObjPredicateBuilder { /** * One of ways of creating builder . This might be the only way ( considering all _ functional _ builders ) that might be utilize to specify generic params only once . */ @ Nonnull public static < T1 , T2 > LObjIntObjPredicateBuilder < T1 , T2 > objIntObjPredicate ( Consumer < LObjIntObjPredicate < T1 , T2 > > consumer ) { } }
return new LObjIntObjPredicateBuilder ( consumer ) ;
public class TypeCompatibilityObligation { /** * Factory Method since we need to return null STOs ( which should be discarded * @ param exp * The expression to be checked * @ param etype * The expected type * @ param atype * The actual type * @ param ctxt * Context Information * @ param assistantFactory * @ return * @ throws AnalysisException */ public static TypeCompatibilityObligation newInstance ( PExp exp , PType etype , PType atype , IPOContextStack ctxt , IPogAssistantFactory assistantFactory ) throws AnalysisException { } }
TypeCompatibilityObligation sto = new TypeCompatibilityObligation ( exp , etype , atype , ctxt , assistantFactory ) ; if ( sto . getValueTree ( ) != null ) { return sto ; } return null ;
public class OptRuntime { /** * Implement x . property ( ) call shrinking optimizer code . */ public static Object callProp0 ( Object value , String property , Context cx , Scriptable scope ) { } }
Callable f = getPropFunctionAndThis ( value , property , cx , scope ) ; Scriptable thisObj = lastStoredScriptable ( cx ) ; return f . call ( cx , scope , thisObj , ScriptRuntime . emptyArgs ) ;
public class ApiOvhOrder { /** * Get prices and contracts information * REST : GET / order / email / domain / new / { duration } * @ param domain [ required ] Domain name which will be linked to this mx account * @ param offer [ required ] Offer for your new mx account * @ param duration [ required ] Duration * @ deprecated */ public OvhOrder email_domain_new_duration_GET ( String duration , String domain , OvhOfferEnum offer ) throws IOException { } }
String qPath = "/order/email/domain/new/{duration}" ; StringBuilder sb = path ( qPath , duration ) ; query ( sb , "domain" , domain ) ; query ( sb , "offer" , offer ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhOrder . class ) ;
public class MaybeReachingVariableUse { /** * Removes the variable for the given name from the node value in the upward * exposed lattice . Do nothing if the variable name is one of the escaped * variable . */ private void removeFromUseIfLocal ( String name , ReachingUses use ) { } }
Var var = allVarsInFn . get ( name ) ; if ( var == null ) { return ; } if ( ! escaped . contains ( var ) ) { use . mayUseMap . removeAll ( var ) ; }
public class CollectorRegistry { /** * Returns the given value , or null if it doesn ' t exist . * This is inefficient , and intended only for use in unittests . */ public Double getSampleValue ( String name , String [ ] labelNames , String [ ] labelValues ) { } }
for ( Collector . MetricFamilySamples metricFamilySamples : Collections . list ( metricFamilySamples ( ) ) ) { for ( Collector . MetricFamilySamples . Sample sample : metricFamilySamples . samples ) { if ( sample . name . equals ( name ) && Arrays . equals ( sample . labelNames . toArray ( ) , labelNames ) && Arrays . equals ( sample . labelValues . toArray ( ) , labelValues ) ) { return sample . value ; } } } return null ;
public class ZooKeeperHelper { /** * Create an empty normal ( persistent ) Znode . * @ param zookeeper ZooKeeper instance to work with . * @ param znode Znode to create . * @ throws KeeperException * @ throws InterruptedException */ static void create ( ZooKeeper zookeeper , String znode ) throws KeeperException , InterruptedException { } }
zookeeper . create ( znode , new byte [ 0 ] , ZooDefs . Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ;
public class Boxing { /** * Transforms a primitive array into an array of boxed values . * @ param < T > * @ param src source array * @ param srcPos start position * @ param len length * @ param type target type * @ return array */ public static < T > T boxAllAs ( Object src , int srcPos , int len , Class < T > type ) { } }
return ( T ) boxAll ( type , src , srcPos , len ) ;
public class BPMSwitchYardScanner { /** * { @ inheritDoc } */ @ Override public ScannerOutput < SwitchYardModel > scan ( ScannerInput < SwitchYardModel > input ) throws IOException { } }
SwitchYardNamespace switchyardNamespace = input . getSwitchyardNamespace ( ) ; SwitchYardModel switchyardModel = new V1SwitchYardModel ( switchyardNamespace . uri ( ) ) ; CompositeModel compositeModel = new V1CompositeModel ( ) ; compositeModel . setName ( input . getCompositeName ( ) ) ; ClasspathScanner bpmScanner = new ClasspathScanner ( _bpmFilter ) ; for ( URL url : input . getURLs ( ) ) { bpmScanner . scan ( url ) ; } List < Class < ? > > bpmClasses = _bpmFilter . getMatchedTypes ( ) ; for ( Class < ? > bpmClass : bpmClasses ) { compositeModel . addComponent ( scan ( bpmClass , switchyardNamespace ) ) ; } if ( ! compositeModel . getModelChildren ( ) . isEmpty ( ) ) { switchyardModel . setComposite ( compositeModel ) ; } return new ScannerOutput < SwitchYardModel > ( ) . setModel ( switchyardModel ) ;
public class HFCAClient { /** * Create HFCAClient from a NetworkConfig . CAInfo using default crypto suite . * @ param caInfo created from NetworkConfig . getOrganizationInfo ( " org _ name " ) . getCertificateAuthorities ( ) * @ return HFCAClient * @ throws MalformedURLException * @ throws InvalidArgumentException */ public static HFCAClient createNewInstance ( NetworkConfig . CAInfo caInfo ) throws MalformedURLException , InvalidArgumentException { } }
try { return createNewInstance ( caInfo , CryptoSuite . Factory . getCryptoSuite ( ) ) ; } catch ( MalformedURLException e ) { throw e ; } catch ( Exception e ) { throw new InvalidArgumentException ( e ) ; }
public class LocalRequestDispatcher { /** * Initialise this request dispatcher , which will register a { @ link ConfigMBean } for easy external * access to the { @ link HistoryStore } and { @ link DebugStore } . Also a { @ link JolokiaDiscoveryMBean } * is registered * @ param pHistoryStore history store to be managed from within an MBean * @ param pDebugStore managed debug store * @ throws MalformedObjectNameException if our MBean ' s name is wrong ( which cannot happen ) * @ throws MBeanRegistrationException if registration fails * @ throws NotCompliantMBeanException if we have a non compliant MBean ( cannot happen , too ) */ public void initMBeans ( HistoryStore pHistoryStore , DebugStore pDebugStore ) throws MalformedObjectNameException , MBeanRegistrationException , NotCompliantMBeanException { } }
// Register the Config MBean String oName = createObjectNameWithQualifier ( Config . OBJECT_NAME ) ; try { Config config = new Config ( pHistoryStore , pDebugStore , oName ) ; mBeanServerHandler . registerMBean ( config , oName ) ; } catch ( InstanceAlreadyExistsException exp ) { String alternativeOName = oName + ",uuid=" + UUID . randomUUID ( ) ; try { // Another instance has already started a Jolokia agent within the JVM . We are trying to add the MBean nevertheless with // a dynamically generated ObjectName . Of course , it would be good to have a more semantic meaning instead of // a random number , but this can already be performed with a qualifier log . info ( oName + " is already registered. Adding it with " + alternativeOName + ", but you should revise your setup in " + "order to either use a qualifier or ensure, that only a single agent gets registered (otherwise history functionality might not work)" ) ; Config config = new Config ( pHistoryStore , pDebugStore , alternativeOName ) ; mBeanServerHandler . registerMBean ( config , alternativeOName ) ; } catch ( InstanceAlreadyExistsException e ) { log . error ( "Cannot even register fallback MBean with name " + alternativeOName + ". Should never happen. Really." , e ) ; } } // Register another Config MBean ( which dispatched to the stores anyway ) for access by // jmx4perl version < 0.80 String legacyOName = createObjectNameWithQualifier ( Config . LEGACY_OBJECT_NAME ) ; try { Config legacyConfig = new Config ( pHistoryStore , pDebugStore , legacyOName ) ; mBeanServerHandler . registerMBean ( legacyConfig , legacyOName ) ; } catch ( InstanceAlreadyExistsException exp ) { log . info ( "Cannot register (legacy) MBean handler for config store with name " + legacyOName + " since it already exists. " + "This is the case if another agent has been already started within the same JVM. The registration is skipped." ) ; } try { mBeanServerHandler . registerMBean ( new JolokiaDiscovery ( agentId , log ) , JolokiaDiscoveryMBean . OBJECT_NAME ) ; } catch ( InstanceAlreadyExistsException e ) { // Ignore since there is already one registered . log . info ( "Jolokia Discovery MBean registration is skipped because there is already one registered." ) ; }
public class DownloadDispatcher { /** * / * update download success */ @ SuppressWarnings ( "ResultOfMethodCallIgnored" ) private void updateSuccess ( DownloadRequest request ) { } }
updateState ( request , DownloadState . SUCCESSFUL ) ; /* notify the request download finish */ request . finish ( ) ; File file = new File ( request . tempFilePath ( ) ) ; if ( file . exists ( ) ) { file . renameTo ( new File ( request . destinationFilePath ( ) ) ) ; } /* deliver success message */ delivery . postSuccess ( request ) ;
public class SubscriberRegistry { /** * Registers all subscriber methods on the given listener object . */ void register ( Object listener ) { } }
Multimap < Class < ? > , Subscriber > listenerMethods = findAllSubscribers ( listener ) ; for ( Map . Entry < Class < ? > , Collection < Subscriber > > entry : listenerMethods . asMap ( ) . entrySet ( ) ) { Class < ? > eventType = entry . getKey ( ) ; Collection < Subscriber > eventMethodsInListener = entry . getValue ( ) ; CopyOnWriteArraySet < Subscriber > eventSubscribers = subscribers . get ( eventType ) ; if ( eventSubscribers == null ) { CopyOnWriteArraySet < Subscriber > newSet = new CopyOnWriteArraySet < Subscriber > ( ) ; eventSubscribers = MoreObjects . firstNonNull ( subscribers . putIfAbsent ( eventType , newSet ) , newSet ) ; } eventSubscribers . addAll ( eventMethodsInListener ) ; }
public class SearchPlaylistsRequest { /** * Search for playlists . * @ return A { @ link PlaylistSimplified } paging . * @ throws IOException In case of networking issues . * @ throws SpotifyWebApiException The Web API returned an error further specified in this exception ' s root cause . */ @ SuppressWarnings ( "unchecked" ) public Paging < PlaylistSimplified > execute ( ) throws IOException , SpotifyWebApiException { } }
return new PlaylistSimplified . JsonUtil ( ) . createModelObjectPaging ( getJson ( ) , "playlists" ) ;
public class SqlFunctionUtils { /** * Calculate the hash value of a given string . * @ param algorithm message digest algorithm . * @ param str string to hash . * @ param charsetName charset of string . * @ return hash value of string . */ public static String hash ( String algorithm , String str , String charsetName ) { } }
try { byte [ ] digest = MessageDigest . getInstance ( algorithm ) . digest ( strToBytesWithCharset ( str , charsetName ) ) ; return EncodingUtils . hex ( digest ) ; } catch ( NoSuchAlgorithmException e ) { throw new IllegalArgumentException ( "Unsupported algorithm: " + algorithm , e ) ; }
public class JSONReader { /** * Method for reading a JSON Object from input and building a { @ link java . util . Map } * out of it . Note that if input does NOT contain a * JSON Object , { @ link JSONObjectException } will be thrown . */ public Map < Object , Object > readMap ( ) throws IOException { } }
if ( _parser . isExpectedStartObjectToken ( ) ) { return AnyReader . std . readFromObject ( this , _parser , _mapBuilder ) ; } if ( _parser . hasToken ( JsonToken . VALUE_NULL ) ) { return null ; } throw JSONObjectException . from ( _parser , "Can not read a Map: expect to see START_OBJECT ('{'), instead got: " + ValueReader . _tokenDesc ( _parser ) ) ;
public class SpotifyApi { /** * Returns a builder that can be used to build requests for client credential grants . < br > * Requires client ID and client secret to be set . * @ return A { @ link ClientCredentialsRequest . Builder } . */ public ClientCredentialsRequest . Builder clientCredentials ( ) { } }
return new ClientCredentialsRequest . Builder ( clientId , clientSecret ) . setDefaults ( httpManager , scheme , host , port ) . grant_type ( "client_credentials" ) ;
public class CatalogUtil { /** * Given an index return its expressions or list of indexed columns * @ param index Catalog Index * @ param tableScan table * @ param indexedExprs index expressions . This list remains empty if the index is just on simple columns . * @ param indexedColRefs indexed columns . This list remains empty if indexedExprs is in use . * @ return true if this is a column based index */ public static boolean getCatalogIndexExpressions ( Index index , StmtTableScan tableScan , List < AbstractExpression > indexedExprs , List < ColumnRef > indexedColRefs ) { } }
String exprsjson = index . getExpressionsjson ( ) ; if ( exprsjson . isEmpty ( ) ) { CatalogUtil . getSortedCatalogItems ( index . getColumns ( ) , "index" , indexedColRefs ) ; } else { try { AbstractExpression . fromJSONArrayString ( exprsjson , tableScan , indexedExprs ) ; } catch ( JSONException e ) { e . printStackTrace ( ) ; assert ( false ) ; } } return exprsjson . isEmpty ( ) ;
public class JMXBridgeListener { /** * Creates JMX name for a producer . * @ param producerId target producerId . * @ param statName target statName . * @ return the name for JMXBean . */ private String createName ( String producerId , String statName ) { } }
String appName = encodeAppName ( MoskitoConfigurationHolder . getConfiguration ( ) . getApplicationName ( ) ) ; return "MoSKito." + ( appName . length ( ) > 0 ? appName + '.' : "" ) + "producers:type=" + producerId + '.' + statName ;
public class HeatChart { /** * Finds and returns the maximum value in a 2 - dimensional array of doubles . * @ param values the data to use . * @ return the largest value in the array . */ public static double max ( double [ ] [ ] values ) { } }
double max = 0 ; for ( int i = 0 ; i < values . length ; i ++ ) { for ( int j = 0 ; j < values [ i ] . length ; j ++ ) { max = ( values [ i ] [ j ] > max ) ? values [ i ] [ j ] : max ; } } return max ;
public class GeneratedMessage { /** * Calls Class . getMethod and throws a RuntimeException if it fails . */ @ SuppressWarnings ( "unchecked" ) private static Method getMethodOrDie ( final Class clazz , final String name , final Class ... params ) { } }
try { return clazz . getMethod ( name , params ) ; } catch ( NoSuchMethodException e ) { throw new RuntimeException ( "Generated message class \"" + clazz . getName ( ) + "\" missing method \"" + name + "\"." , e ) ; }
public class SettableDataSource { /** * Sets the value of this data source . * < p > This method will return { @ code true } if the value was successfully set , or * { @ code false } if the data source has already been set , failed or closed . * < p > Passed CloseableReference is cloned , caller of this method still owns passed reference * after the method returns . * @ param valueRef closeable reference to the value the data source should hold . * @ return true if the value was successfully set . */ public boolean set ( @ Nullable CloseableReference < T > valueRef ) { } }
CloseableReference < T > clonedRef = CloseableReference . cloneOrNull ( valueRef ) ; return super . setResult ( clonedRef , /* isLast */ true ) ;
public class EventTypeAwareListener { /** * Returns if the backing listener consumes this type of event . */ @ SuppressWarnings ( "PMD.SwitchStmtsShouldHaveDefault" ) public boolean isCompatible ( @ NonNull EventType eventType ) { } }
switch ( eventType ) { case CREATED : return ( listener instanceof CacheEntryCreatedListener < ? , ? > ) ; case UPDATED : return ( listener instanceof CacheEntryUpdatedListener < ? , ? > ) ; case REMOVED : return ( listener instanceof CacheEntryRemovedListener < ? , ? > ) ; case EXPIRED : return ( listener instanceof CacheEntryExpiredListener < ? , ? > ) ; } throw new IllegalStateException ( "Unknown event type: " + eventType ) ;
public class base_resource { /** * Use this method to perform a delete operation on netscaler resources . * @ param service nitro _ service object . * @ param resources Nitro resources to be deleted on netscaler . * @ param option options class object . * @ return status of the performed operation . * @ throws Exception Nitro exception is thrown . */ protected static base_responses delete_bulk_request ( nitro_service service , base_resource resources [ ] ) throws Exception { } }
if ( ! service . isLogin ( ) ) service . login ( ) ; options option = new options ( ) ; option . set_action ( "rm" ) ; String type = resources [ 0 ] . get_object_type ( ) ; if ( type . indexOf ( "_binding" ) > 0 ) { option . set_action ( "unbind" ) ; } String id = service . get_sessionid ( ) ; String onerror = service . get_onerror ( ) ; Boolean warning = service . get_warning ( ) ; String request = service . get_payload_formatter ( ) . resource_to_string ( resources , id , option , warning , onerror ) ; base_responses result = post_bulk_data ( service , request ) ; return result ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcDamperTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class JBossWSBusFactory { /** * Gets the default bus for the given classloader ; if a new Bus is needed , * the creation is delegated to the specified ClientBusSelector instance . * @ param classloader * @ param clientBusSelector * @ return */ public static Bus getClassLoaderDefaultBus ( final ClassLoader classloader , final ClientBusSelector clientBusSelector ) { } }
Bus classLoaderBus ; synchronized ( classLoaderBusses ) { classLoaderBus = classLoaderBusses . get ( classloader ) ; if ( classLoaderBus == null ) { classLoaderBus = clientBusSelector . createNewBus ( ) ; // register a listener for cleaning up the bus from the classloader association in the JBossWSBusFactory BusLifeCycleListener listener = new ClassLoaderDefaultBusLifeCycleListener ( classLoaderBus ) ; classLoaderBus . getExtension ( BusLifeCycleManager . class ) . registerLifeCycleListener ( listener ) ; classLoaderBusses . put ( classloader , classLoaderBus ) ; } } return classLoaderBus ;
public class SimpleSolrPersistentProperty { /** * ( non - Javadoc ) * @ see org . springframework . data . solr . core . mapping . SolrPersistentProperty # getCopyFields ( ) */ @ SuppressWarnings ( "unchecked" ) @ Override public Collection < String > getCopyFields ( ) { } }
Indexed indexedAnnotation = getIndexAnnotation ( ) ; if ( indexedAnnotation != null && indexedAnnotation . copyTo ( ) . length > 0 ) { return CollectionUtils . arrayToList ( indexedAnnotation . copyTo ( ) ) ; } return Collections . emptyList ( ) ;
public class MockResponse { /** * Sets the response body to the UTF - 8 encoded bytes of { @ code body } . */ public MockResponse setBody ( String body ) { } }
try { return setBody ( body . getBytes ( "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new AssertionError ( ) ; }
public class ConciseSet { /** * { @ inheritDoc } */ public ByteBuffer toByteBuffer ( ) { } }
ByteBuffer buffer = ByteBuffer . allocate ( ( lastWordIndex + 1 ) * 4 ) ; buffer . asIntBuffer ( ) . put ( Arrays . copyOf ( words , lastWordIndex + 1 ) ) ; return buffer ;
public class AES256Ciphertext { /** * Checks the length of a byte array . * @ param data * the data to check * @ param dataName * the name of the field ( to include in the exception ) * @ param expectedLength * the length the data should be * @ throws IllegalArgumentException * if the data is not of the correct length */ private static void validateLength ( byte [ ] data , String dataName , int expectedLength ) throws IllegalArgumentException { } }
if ( data . length != expectedLength ) { throw new IllegalArgumentException ( String . format ( "Invalid %s length. Expected %d bytes but found %d." , dataName , expectedLength , data . length ) ) ; }
public class MicrometerApnsClientMetricsListener { /** * Records a failed attempt to send a notification and updates metrics accordingly . * @ param apnsClient the client that failed to write the notification ; note that this is ignored by * { @ code MicrometerApnsClientMetricsListener } instances , which should always be used for exactly one client * @ param notificationId an opaque , unique identifier for the notification that could not be written */ @ Override public void handleWriteFailure ( final ApnsClient apnsClient , final long notificationId ) { } }
this . notificationStartTimes . remove ( notificationId ) ; this . writeFailures . increment ( ) ;
public class AutoJsonRpcServiceExporter { /** * Find a { @ link BeanDefinition } in the { @ link BeanFactory } or it ' s parents . */ private BeanDefinition findBeanDefinition ( ConfigurableListableBeanFactory beanFactory , String serviceBeanName ) { } }
if ( beanFactory . containsLocalBean ( serviceBeanName ) ) return beanFactory . getBeanDefinition ( serviceBeanName ) ; BeanFactory parentBeanFactory = beanFactory . getParentBeanFactory ( ) ; if ( parentBeanFactory != null && ConfigurableListableBeanFactory . class . isInstance ( parentBeanFactory ) ) return findBeanDefinition ( ( ConfigurableListableBeanFactory ) parentBeanFactory , serviceBeanName ) ; throw new RuntimeException ( format ( "Bean with name '%s' can no longer be found." , serviceBeanName ) ) ;
public class Years { /** * Creates a < code > Years < / code > representing the number of whole years * between the two specified partial datetimes . * The two partials must contain the same fields , for example you can specify * two < code > LocalDate < / code > objects . * @ param start the start partial date , must not be null * @ param end the end partial date , must not be null * @ return the period in years * @ throws IllegalArgumentException if the partials are null or invalid */ public static Years yearsBetween ( ReadablePartial start , ReadablePartial end ) { } }
if ( start instanceof LocalDate && end instanceof LocalDate ) { Chronology chrono = DateTimeUtils . getChronology ( start . getChronology ( ) ) ; int years = chrono . years ( ) . getDifference ( ( ( LocalDate ) end ) . getLocalMillis ( ) , ( ( LocalDate ) start ) . getLocalMillis ( ) ) ; return Years . years ( years ) ; } int amount = BaseSingleFieldPeriod . between ( start , end , ZERO ) ; return Years . years ( amount ) ;
public class KnowledgeRuntimeLoggerFactory { /** * Creates a file logger that executes in a different thread , where information is written on given intervals ( in milliseconds ) . * The file is in XML format , suitable for interpretation by Eclipse ' s Drools Audit View or other tools . * @ param session * @ param fileName - . log is appended to this . * @ param interval - in milliseconds . * @ return */ public static KieRuntimeLogger newThreadedFileLogger ( KieRuntimeEventManager session , String fileName , int interval ) { } }
return getKnowledgeRuntimeLoggerProvider ( ) . newThreadedFileLogger ( session , fileName , interval ) ;
public class WebPage { /** * Prints the form that is used to login . */ public void printLoginForm ( WebPage page , LoginException loginException , WebSiteRequest req , HttpServletResponse resp ) throws ServletException , IOException , SQLException { } }
getParent ( ) . printLoginForm ( page , loginException , req , resp ) ;
public class TypeUtils { /** * < p > Return a map of the type arguments of a class in the context of { @ code toClass } . < / p > * @ param cls the class in question * @ param toClass the context class * @ param subtypeVarAssigns a map with type variables * @ return the { @ code Map } with type arguments */ private static Map < TypeVariable < ? > , Type > getTypeArguments ( Class < ? > cls , final Class < ? > toClass , final Map < TypeVariable < ? > , Type > subtypeVarAssigns ) { } }
// make sure they ' re assignable if ( ! isAssignable ( cls , toClass ) ) { return null ; } // can ' t work with primitives if ( cls . isPrimitive ( ) ) { // both classes are primitives ? if ( toClass . isPrimitive ( ) ) { // dealing with widening here . No type arguments to be // harvested with these two types . return new HashMap < > ( ) ; } // work with wrapper the wrapper class instead of the primitive cls = ReflectionHelper . getWrapperClass ( cls ) . orElse ( cls ) ; } // create a copy of the incoming map , or an empty one if it ' s null final HashMap < TypeVariable < ? > , Type > typeVarAssigns = subtypeVarAssigns == null ? new HashMap < TypeVariable < ? > , Type > ( ) : new HashMap < > ( subtypeVarAssigns ) ; // has target class been reached ? if ( toClass . equals ( cls ) ) { return typeVarAssigns ; } // walk the inheritance hierarchy until the target class is reached return getTypeArguments ( getClosestParentType ( cls , toClass ) , toClass , typeVarAssigns ) ;
public class GetApplicationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetApplicationRequest getApplicationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getApplicationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getApplicationRequest . getApplicationName ( ) , APPLICATIONNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ReorderReceiptRuleSetRequest { /** * A list of the specified receipt rule set ' s receipt rules in the order that you want to put them . * @ return A list of the specified receipt rule set ' s receipt rules in the order that you want to put them . */ public java . util . List < String > getRuleNames ( ) { } }
if ( ruleNames == null ) { ruleNames = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return ruleNames ;
public class HCatInputFormatBase { /** * Specifies that the InputFormat returns Flink tuples instead of * { @ link org . apache . hive . hcatalog . data . HCatRecord } . * < p > Note : Flink tuples might only support a limited number of fields ( depending on the API ) . * @ return This InputFormat . * @ throws org . apache . hive . hcatalog . common . HCatException */ public HCatInputFormatBase < T > asFlinkTuples ( ) throws HCatException { } }
// build type information int numFields = outputSchema . getFields ( ) . size ( ) ; if ( numFields > this . getMaxFlinkTupleSize ( ) ) { throw new IllegalArgumentException ( "Only up to " + this . getMaxFlinkTupleSize ( ) + " fields can be returned as Flink tuples." ) ; } TypeInformation [ ] fieldTypes = new TypeInformation [ numFields ] ; fieldNames = new String [ numFields ] ; for ( String fieldName : outputSchema . getFieldNames ( ) ) { HCatFieldSchema field = outputSchema . get ( fieldName ) ; int fieldPos = outputSchema . getPosition ( fieldName ) ; TypeInformation fieldType = getFieldType ( field ) ; fieldTypes [ fieldPos ] = fieldType ; fieldNames [ fieldPos ] = fieldName ; } this . resultType = new TupleTypeInfo ( fieldTypes ) ; return this ;
public class LongTuples { /** * Returns a < i > view < / i > on the given tuple as an unmodifiable list . * Changes in the backing tuple will be visible in the returned list . * @ param t The tuple * @ return The list * @ throws NullPointerException If the given tuple is < code > null < / code > */ public static List < Long > asList ( final LongTuple t ) { } }
if ( t == null ) { throw new NullPointerException ( "The tuple may not be null" ) ; } return new AbstractList < Long > ( ) { @ Override public Long get ( int index ) { return t . get ( index ) ; } @ Override public int size ( ) { return t . getSize ( ) ; } } ;
public class PlanAssembler { /** * For each sub - query or CTE node in the plan tree , * attach the corresponding plans to the parent node . * @ param initial plan */ private void connectChildrenBestPlans ( AbstractPlanNode parentPlan ) { } }
if ( parentPlan instanceof AbstractScanPlanNode ) { AbstractScanPlanNode scanNode = ( AbstractScanPlanNode ) parentPlan ; StmtTableScan tableScan = scanNode . getTableScan ( ) ; if ( tableScan instanceof StmtSubqueryScan ) { CompiledPlan bestCostPlan = ( ( StmtSubqueryScan ) tableScan ) . getBestCostPlan ( ) ; assert ( bestCostPlan != null ) ; AbstractPlanNode subQueryRoot = bestCostPlan . rootPlanGraph ; subQueryRoot . disconnectParents ( ) ; scanNode . clearChildren ( ) ; scanNode . addAndLinkChild ( subQueryRoot ) ; } else if ( tableScan instanceof StmtCommonTableScan ) { assert ( parentPlan instanceof SeqScanPlanNode ) ; SeqScanPlanNode scanPlanNode = ( SeqScanPlanNode ) parentPlan ; StmtCommonTableScan cteScan = ( StmtCommonTableScan ) tableScan ; CompiledPlan bestCostBasePlan = cteScan . getBestCostBasePlan ( ) ; CompiledPlan bestCostRecursivePlan = cteScan . getBestCostRecursivePlan ( ) ; assert ( bestCostBasePlan != null ) ; AbstractPlanNode basePlanRoot = bestCostBasePlan . rootPlanGraph ; scanPlanNode . setCTEBaseNode ( basePlanRoot ) ; if ( bestCostRecursivePlan != null ) { // Either the CTE is not recursive , or this is a recursive CTE but we // got here during the planning of the recurse query when the recurse // query plan is still being worked on . AbstractPlanNode recursePlanRoot = bestCostRecursivePlan . rootPlanGraph ; assert ( basePlanRoot instanceof CommonTablePlanNode ) ; CommonTablePlanNode ctePlanNode = ( CommonTablePlanNode ) basePlanRoot ; ctePlanNode . setRecursiveNode ( recursePlanRoot ) ; } } } else { for ( int i = 0 ; i < parentPlan . getChildCount ( ) ; ++ i ) { connectChildrenBestPlans ( parentPlan . getChild ( i ) ) ; } }
public class CommerceNotificationTemplatePersistenceImpl { /** * Returns the last commerce notification template in the ordered set where groupId = & # 63 ; . * @ param groupId the group ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce notification template * @ throws NoSuchNotificationTemplateException if a matching commerce notification template could not be found */ @ Override public CommerceNotificationTemplate findByGroupId_Last ( long groupId , OrderByComparator < CommerceNotificationTemplate > orderByComparator ) throws NoSuchNotificationTemplateException { } }
CommerceNotificationTemplate commerceNotificationTemplate = fetchByGroupId_Last ( groupId , orderByComparator ) ; if ( commerceNotificationTemplate != null ) { return commerceNotificationTemplate ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "groupId=" ) ; msg . append ( groupId ) ; msg . append ( "}" ) ; throw new NoSuchNotificationTemplateException ( msg . toString ( ) ) ;
public class CreateCommitRequest { /** * The file modes to update for files in this commit . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setSetFileModes ( java . util . Collection ) } or { @ link # withSetFileModes ( java . util . Collection ) } if you want to * override the existing values . * @ param setFileModes * The file modes to update for files in this commit . * @ return Returns a reference to this object so that method calls can be chained together . */ public CreateCommitRequest withSetFileModes ( SetFileModeEntry ... setFileModes ) { } }
if ( this . setFileModes == null ) { setSetFileModes ( new java . util . ArrayList < SetFileModeEntry > ( setFileModes . length ) ) ; } for ( SetFileModeEntry ele : setFileModes ) { this . setFileModes . add ( ele ) ; } return this ;
public class ClientFactory { /** * Utility method to create client and load balancer ( if enabled in client config ) given the name and client config . * Instances are created using reflection ( see { @ link # instantiateInstanceWithClientConfig ( String , IClientConfig ) } * @ param restClientName * @ param clientConfig * @ throws ClientException if any errors occurs in the process , or if the client with the same name already exists */ public static synchronized IClient < ? , ? > registerClientFromProperties ( String restClientName , IClientConfig clientConfig ) throws ClientException { } }
IClient < ? , ? > client = null ; ILoadBalancer loadBalancer = null ; if ( simpleClientMap . get ( restClientName ) != null ) { throw new ClientException ( ClientException . ErrorType . GENERAL , "A Rest Client with this name is already registered. Please use a different name" ) ; } try { String clientClassName = clientConfig . getOrDefault ( CommonClientConfigKey . ClientClassName ) ; client = ( IClient < ? , ? > ) instantiateInstanceWithClientConfig ( clientClassName , clientConfig ) ; boolean initializeNFLoadBalancer = clientConfig . getOrDefault ( CommonClientConfigKey . InitializeNFLoadBalancer ) ; if ( initializeNFLoadBalancer ) { loadBalancer = registerNamedLoadBalancerFromclientConfig ( restClientName , clientConfig ) ; } if ( client instanceof AbstractLoadBalancerAwareClient ) { ( ( AbstractLoadBalancerAwareClient ) client ) . setLoadBalancer ( loadBalancer ) ; } } catch ( Throwable e ) { String message = "Unable to InitializeAndAssociateNFLoadBalancer set for RestClient:" + restClientName ; logger . warn ( message , e ) ; throw new ClientException ( ClientException . ErrorType . CONFIGURATION , message , e ) ; } simpleClientMap . put ( restClientName , client ) ; Monitors . registerObject ( "Client_" + restClientName , client ) ; logger . info ( "Client Registered:" + client . toString ( ) ) ; return client ;
public class MethodWriter { /** * Writes a short value in the given byte array . * @ param b a byte array . * @ param index where the first byte of the short value must be written . * @ param s the value to be written in the given byte array . */ static void writeShort ( final byte [ ] b , final int index , final int s ) { } }
b [ index ] = ( byte ) ( s >>> 8 ) ; b [ index + 1 ] = ( byte ) s ;
public class NewJFrame { /** * Validate and set the datetime field on the screen given a date . * @ param dateTime The datetime object */ public void setDate ( Date date ) { } }
String dateString = "" ; if ( date != null ) dateString = dateFormat . format ( date ) ; jTextField2 . setText ( dateString ) ; jCalendarButton1 . setTargetDate ( date ) ;
public class MBeanRoutedNotificationHelper { /** * Post an event to EventAdmin , instructing the Target - Client Manager to register or unregister a listener for a given target . */ private void postRoutedNotificationListenerRegistrationEvent ( String operation , NotificationTargetInformation nti ) { } }
Map < String , Object > props = createListenerRegistrationEvent ( operation , nti ) ; safePostEvent ( new Event ( REGISTER_JMX_NOTIFICATION_LISTENER_TOPIC , props ) ) ;
public class AbstractCalculator { /** * Set decimal separator for entire expression * @ param decimalSeparator * @ return */ public CALC setDecimalSeparator ( char decimalSeparator ) { } }
getProperties ( ) . setInputDecimalSeparator ( decimalSeparator ) ; getProperties ( ) . setOutputDecimalSeparator ( decimalSeparator ) ; return getThis ( ) ;
public class MigrationResource { /** * < p > migrate . < / p > * @ param desc a { @ link java . lang . String } object . * @ param uuid a { @ link java . lang . String } object . * @ return a { @ link ameba . util . Result } object . */ @ POST @ Path ( "{uuid}" ) public Result migrate ( @ FormParam ( "description" ) String desc , @ PathParam ( "uuid" ) String uuid ) { } }
MigrationFeature . checkMigrationId ( uuid ) ; String generatedDesc = ( mode . isDev ( ) ? "dev " : "" ) + "migrate" ; if ( StringUtils . isNotBlank ( desc ) ) { generatedDesc = desc ; } Map < String , Migration > migrations = getMigrations ( ) ; for ( String dbName : migrations . keySet ( ) ) { Migration migration = migrations . get ( dbName ) ; ScriptInfo info = migration . generate ( ) ; info . setDescription ( generatedDesc ) ; Flyway flyway = locator . getService ( Flyway . class , dbName ) ; flyway . setBaselineDescription ( info . getDescription ( ) ) ; flyway . setBaselineVersionAsString ( info . getRevision ( ) ) ; flyway . setValidateOnMigrate ( false ) ; try { flyway . migrate ( ) ; migration . persist ( ) ; migration . reset ( ) ; } catch ( Throwable err ) { if ( failMigrations == null ) { synchronized ( this ) { if ( failMigrations == null ) { failMigrations = Maps . newHashMap ( ) ; } } } failMigrations . put ( dbName , MigrationFail . create ( flyway , err , migration ) ) ; } } if ( failMigrations == null || failMigrations . isEmpty ( ) ) { return Result . success ( ) ; } else { return Result . failure ( ) ; }
public class JSONWriter { /** * Begin appending a new array . All values until the balancing * < code > endArray < / code > will be appended to this array . The * < code > endArray < / code > method must be called to mark the array ' s end . * @ return this */ public JSONWriter array ( ) { } }
if ( this . mode == INIT || this . mode == OBJECT || this . mode == ARRAY ) { this . push ( ARRAY ) ; this . append ( "[" ) ; this . comma = false ; return this ; } throw new JSONException ( "Misplaced array: expected mode of INIT, OBJECT or ARRAY but was " + this . mode ) ;
public class VarianceOfVolume { /** * Compute variance of volumes . * @ param knnq KNN query * @ param ids IDs to process * @ param vols Volumes * @ param vovs Variance of Volume storage * @ param vovminmax Score minimum / maximum tracker */ private void computeVOVs ( KNNQuery < O > knnq , DBIDs ids , DoubleDataStore vols , WritableDoubleDataStore vovs , DoubleMinMax vovminmax ) { } }
FiniteProgress prog = LOG . isVerbose ( ) ? new FiniteProgress ( "Variance of Volume" , ids . size ( ) , LOG ) : null ; boolean warned = false ; for ( DBIDIter iter = ids . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { KNNList knns = knnq . getKNNForDBID ( iter , k ) ; DoubleDBIDListIter it = knns . iter ( ) ; double vbar = 0. ; for ( ; it . valid ( ) ; it . advance ( ) ) { vbar += vols . doubleValue ( it ) ; } vbar /= knns . size ( ) ; // Average double vov = 0. ; for ( it . seek ( 0 ) ; it . valid ( ) ; it . advance ( ) ) { double v = vols . doubleValue ( it ) - vbar ; vov += v * v ; } if ( ! ( vov < Double . POSITIVE_INFINITY ) && ! warned ) { LOG . warning ( "Variance of Volumes has hit double precision limits, results are not reliable." ) ; warned = true ; } vov = ( vov < Double . POSITIVE_INFINITY ) ? vov / ( knns . size ( ) - 1 ) : Double . POSITIVE_INFINITY ; vovs . putDouble ( iter , vov ) ; // update minimum and maximum vovminmax . put ( vov ) ; LOG . incrementProcessed ( prog ) ; } LOG . ensureCompleted ( prog ) ;
public class PickleUtils { /** * Convert a signed integer to its 4 - byte representation . ( little endian ) */ public static byte [ ] integer_to_bytes ( int i ) { } }
final byte [ ] b = new byte [ 4 ] ; b [ 0 ] = ( byte ) ( i & 0xff ) ; i >>= 8 ; b [ 1 ] = ( byte ) ( i & 0xff ) ; i >>= 8 ; b [ 2 ] = ( byte ) ( i & 0xff ) ; i >>= 8 ; b [ 3 ] = ( byte ) ( i & 0xff ) ; return b ;
public class Specifier { /** * Accessor for the colon - separated , < tt > String < / tt > representation of the field names for this specifier . * @ return The combined field names . */ private String getFieldsString ( ) { } }
final StringBuilder fieldsString = new StringBuilder ( ) ; for ( final String field : fields ) { // XML namespaces require that the colon be percent - escaped but nothing else fieldsString . append ( ':' ) . append ( field . replace ( ":" , "%3A" ) ) ; } return fieldsString . toString ( ) ;
public class JSType { /** * Dereferences a type for property access . * Filters null / undefined and autoboxes the resulting type . * Never returns null . */ public JSType autobox ( ) { } }
JSType restricted = restrictByNotNullOrUndefined ( ) ; JSType autobox = restricted . autoboxesTo ( ) ; return autobox == null ? restricted : autobox ;
public class Log { /** * Checks to see whether or not a log for the specified tag is loggable at the specified level . * The default level of any tag is set to INFO . This means that any level above and including * INFO will be logged . Before you make any calls to a logging method you should check to see * if your tag should be logged . You can change the default level by setting a system property : * ' setprop log . tag . & lt ; YOUR _ LOG _ TAG > & lt ; LEVEL > ' * Where level is either VERBOSE , DEBUG , INFO , WARN , ERROR , ASSERT , or SUPPRESS . SUPPRESS will * turn off all logging for your tag . You can also create a local . prop file that with the * following in it : * ' log . tag . & lt ; YOUR _ LOG _ TAG > = & lt ; LEVEL > ' * and place that in / data / local . prop . * @ param tag The tag to check . * @ param level The level to check . * @ return Whether or not that this is allowed to be logged . * @ throws IllegalArgumentException is thrown if the tag . length ( ) > 23. */ public static boolean isLoggable ( String tag , int level ) { } }
Integer minimumLevel = tagLevels . get ( tag ) ; if ( minimumLevel != null ) { return level > minimumLevel . intValue ( ) ; } return true ; // Let java . util . logging filter it .
public class OutputStreamEncryption { /** * Writes < code > len < / code > bytes from the specified byte array * starting at offset < code > off < / code > to this output stream . * The general contract for < code > write ( b , off , len ) < / code > is that * some of the bytes in the array < code > b < / code > are written to the * output stream in order ; element < code > b [ off ] < / code > is the first * byte written and < code > b [ off + len - 1 ] < / code > is the last byte written * by this operation . * The < code > write < / code > method of < code > OutputStream < / code > calls * the write method of one argument on each of the bytes to be * written out . Subclasses are encouraged to override this method and * provide a more efficient implementation . * If < code > b < / code > is < code > null < / code > , a * < code > NullPointerException < / code > is thrown . * If < code > off < / code > is negative , or < code > len < / code > is negative , or * < code > off + len < / code > is greater than the length of the array * < code > b < / code > , then an < tt > IndexOutOfBoundsException < / tt > is thrown . * @ param b the data . * @ param off the start offset in the data . * @ param len the number of bytes to write . * @ exception IOException if an I / O error occurs . In particular , * an < code > IOException < / code > is thrown if the output * stream is closed . */ public void write ( byte [ ] b , int off , int len ) throws IOException { } }
if ( aes ) { byte [ ] b2 = cipher . update ( b , off , len ) ; if ( b2 == null || b2 . length == 0 ) return ; out . write ( b2 , 0 , b2 . length ) ; } else { byte [ ] b2 = new byte [ Math . min ( len , 4192 ) ] ; while ( len > 0 ) { int sz = Math . min ( len , b2 . length ) ; arcfour . encryptARCFOUR ( b , off , sz , b2 , 0 ) ; out . write ( b2 , 0 , sz ) ; len -= sz ; off += sz ; } }
public class TemplateLoader { /** * creates a group containing all tokens not contained in any other sections - an " all others " group * @ param template - */ private void processGroups ( MutableTemplateInfo template ) throws TemplateException { } }
List < TemplateTokenGroupInfo > groupInfos = template . groupInfo ; List < String > allGroupTokens = new ArrayList < String > ( ) ; for ( TemplateTokenGroupInfo groupInfo : groupInfos ) { for ( String newToken : groupInfo . getContainsString ( ) ) { if ( allGroupTokens . contains ( newToken ) ) { throw new TemplateException ( "Token " + newToken + " in template group : '" + groupInfo . getName ( ) + "' already exists in another group" ) ; } else { allGroupTokens . add ( newToken ) ; } } } List < String > containsList = new ArrayList < String > ( ) ; for ( TemplateTokenInfo tokenInfo : template . tokenInfos ) { if ( ! allGroupTokens . contains ( tokenInfo . getName ( ) ) ) { // if its not already in another group , add containsList . add ( tokenInfo . getName ( ) ) ; } } if ( containsList . size ( ) != 0 ) { TemplateTokenGroupInfo allOthersGroup = new TemplateTokenGroupInfo ( null , containsList , "" , true ) ; // this will have a group order of ' 0 ' by default and will appear at the top when sorted template . groupInfo . add ( allOthersGroup ) ; } for ( TemplateTokenGroupInfo groupInfo : template . groupInfo ) { groupInfo . setParentGroups ( template . tokenInfos ) ; }
public class DbsUtilities { /** * Quick method to convert a query to a map . * @ param db the db to use . * @ param sql the query to run . It has to have at least 2 parameters . The first will be used as key , the second as value . * @ param optionalType can be null . Optional parameter in case one needs a { @ link TreeMap } or something the like . * @ return the map of values from the query . * @ throws Exception */ public static Map < String , String > queryToMap ( ADb db , String sql , Map < String , String > optionalType ) throws Exception { } }
Map < String , String > map = optionalType ; if ( map == null ) { map = new HashMap < > ( ) ; } Map < String , String > _map = map ; return db . execOnConnection ( connection -> { try ( IHMStatement stmt = connection . createStatement ( ) ; IHMResultSet rs = stmt . executeQuery ( sql ) ) { while ( rs . next ( ) ) { String key = rs . getObject ( 1 ) . toString ( ) ; String value = rs . getObject ( 2 ) . toString ( ) ; _map . put ( key , value ) ; } return _map ; } } ) ;