signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ProjectiveStructureFromHomographies { /** * Identifies points which lie on the plane at infinity . That is done by computing x ' = H * x and seeing if the w * term is nearly zero , e . g . x ' = ( x , y , w ) */ void filterPointsOnPlaneAtInfinity ( List < DMatrixRMaj > homographies_view1_to_view0 , List < List < PointIndex2D_F64 > > observations , int totalFeatures ) { } }
filtered . clear ( ) ; for ( int viewIdx = 0 ; viewIdx < homographies_view1_to_view0 . size ( ) ; viewIdx ++ ) { List < PointIndex2D_F64 > filter = new ArrayList < > ( ) ; filtered . add ( filter ) ; DMatrixRMaj H = homographies_view1_to_view0 . get ( viewIdx ) ; List < PointIndex2D_F64 > obs = observations . get ( viewIdx ) ; for ( int i = 0 ; i < obs . size ( ) ; i ++ ) { PointIndex2D_F64 p = obs . get ( i ) ; if ( p . index < 0 || p . index >= totalFeatures ) throw new IllegalArgumentException ( "Feature index outside of bounds. Must be from 0 to " + ( totalFeatures - 1 ) ) ; GeometryMath_F64 . mult ( H , p , tmp ) ; // Homogenous coordinates are scale invariant . A scale // needs to be picked for consistency . I picked the largest x or y value double m = Math . max ( Math . abs ( tmp . x ) , Math . abs ( tmp . y ) ) ; if ( m == 0 ) m = 1 ; tmp . z /= m ; // See if it ' s zero or almost zero , meaning it ' s on the plane at infinity if ( Math . abs ( tmp . z ) > infinityThreshold ) { filter . add ( p ) ; } } }
public class KeyPath { /** * Returns a new KeyPath with the key added . * This is used during keypath resolution . Children normally don ' t know about all of their parent * elements so this is used to keep track of the fully qualified keypath . * This returns a key keypath because during resolution , the full keypath element tree is walked * and if this modified the original copy , it would remain after popping back up the element tree . */ @ CheckResult @ RestrictTo ( RestrictTo . Scope . LIBRARY ) public KeyPath addKey ( String key ) { } }
KeyPath newKeyPath = new KeyPath ( this ) ; newKeyPath . keys . add ( key ) ; return newKeyPath ;
public class WebUtils { /** * Gets the actual client IP address inspecting the X - Forwarded - For HTTP header or using the * provided ' remote IP address ' from the low level TCP connection from the client . * If multiple IP addresses are provided in the X - Forwarded - For header then the first one ( first * hop ) is used * @ param httpHeaders List of HTTP headers for the current request * @ param remoteAddr The client IP address and port from the current request ' s TCP connection * @ return The actual client IP address */ public static String getRealClientIpAddr ( final Map < String , String > httpHeaders , final String remoteAddr ) { } }
// If some upstream device added an X - Forwarded - For header // use it for the client ip // This will support scenarios where load balancers or gateways // front the Azkaban web server and a changing Ip address invalidates the session String clientIp = httpHeaders . getOrDefault ( X_FORWARDED_FOR_HEADER , null ) ; if ( clientIp == null ) { clientIp = remoteAddr ; } else { // header can contain comma separated list of upstream servers - get the first one final String [ ] ips = clientIp . split ( "," ) ; clientIp = ips [ 0 ] ; } // Strip off port and only get IP address final String [ ] parts = clientIp . split ( ":" ) ; clientIp = parts [ 0 ] ; return clientIp ;
public class CommonOps_DDF3 { /** * Returns the value of the element in the matrix that has the minimum value . < br > * < br > * Min { a < sub > ij < / sub > } for all i and j < br > * @ param a A matrix . Not modified . * @ return The value of element in the matrix with the minimum value . */ public static double elementMin ( DMatrix3x3 a ) { } }
double min = a . a11 ; if ( a . a12 < min ) min = a . a12 ; if ( a . a13 < min ) min = a . a13 ; if ( a . a21 < min ) min = a . a21 ; if ( a . a22 < min ) min = a . a22 ; if ( a . a23 < min ) min = a . a23 ; if ( a . a31 < min ) min = a . a31 ; if ( a . a32 < min ) min = a . a32 ; if ( a . a33 < min ) min = a . a33 ; return min ;
public class DescribeLifecycleHooksResult { /** * The lifecycle hooks for the specified group . * @ param lifecycleHooks * The lifecycle hooks for the specified group . */ public void setLifecycleHooks ( java . util . Collection < LifecycleHook > lifecycleHooks ) { } }
if ( lifecycleHooks == null ) { this . lifecycleHooks = null ; return ; } this . lifecycleHooks = new com . amazonaws . internal . SdkInternalList < LifecycleHook > ( lifecycleHooks ) ;
public class WebSocket { /** * Send a binary frame to the server . * This method is an alias of { @ link # sendFrame ( WebSocketFrame ) * sendFrame } { @ code ( WebSocketFrame . } { @ link * WebSocketFrame # createBinaryFrame ( byte [ ] ) * createBinaryFrame } { @ code ( payload ) . } { @ link * WebSocketFrame # setFin ( boolean ) setFin } { @ code ( fin ) ) } . * @ param payload * The payload of a binary frame . * @ param fin * The FIN bit value . * @ return * { @ code this } object . */ public WebSocket sendBinary ( byte [ ] payload , boolean fin ) { } }
return sendFrame ( WebSocketFrame . createBinaryFrame ( payload ) . setFin ( fin ) ) ;
public class UserCustomTableReader { /** * Read the table * @ param connection * GeoPackage connection * @ param tableName * table name * @ return table */ public static UserCustomTable readTable ( GeoPackageConnection connection , String tableName ) { } }
UserCustomConnection userDb = new UserCustomConnection ( connection ) ; return readTable ( userDb , tableName ) ;
public class Document { /** * Convenience method for creating a document using the default document factory . * @ param text the text content making up the document * @ param attributes the attributes , i . e . metadata , associated with the document * @ return the document */ public static Document create ( @ NonNull String text , @ NonNull Map < AttributeType , ? > attributes ) { } }
return DocumentFactory . getInstance ( ) . create ( text , Hermes . defaultLanguage ( ) , attributes ) ;
public class SurfaceConfig { /** * Create the surface data from node . * @ param root The root reference ( must not be < code > null < / code > ) . * @ return The surface data . * @ throws LionEngineException If unable to read node . */ public static SurfaceConfig imports ( Xml root ) { } }
Check . notNull ( root ) ; final Xml node = root . getChild ( NODE_SURFACE ) ; final String surface = node . readString ( ATT_IMAGE ) ; final String icon = node . readString ( null , ATT_ICON ) ; return new SurfaceConfig ( surface , icon ) ;
public class MisoScenePanel { /** * documentation inherited from interface */ public void repaintRect ( int x , int y , int width , int height ) { } }
// translate back into view coordinates x -= _vbounds . x ; y -= _vbounds . y ; repaint ( x , y , width , height ) ;
public class InstanceClient { /** * Starts an instance that was stopped using the instances ( ) . stop method . For more information , * see Restart an instance . * < p > Sample code : * < pre > < code > * try ( InstanceClient instanceClient = InstanceClient . create ( ) ) { * ProjectZoneInstanceName instance = ProjectZoneInstanceName . of ( " [ PROJECT ] " , " [ ZONE ] " , " [ INSTANCE ] " ) ; * InstancesStartWithEncryptionKeyRequest instancesStartWithEncryptionKeyRequestResource = InstancesStartWithEncryptionKeyRequest . newBuilder ( ) . build ( ) ; * Operation response = instanceClient . startWithEncryptionKeyInstance ( instance . toString ( ) , instancesStartWithEncryptionKeyRequestResource ) ; * < / code > < / pre > * @ param instance Name of the instance resource to start . * @ param instancesStartWithEncryptionKeyRequestResource * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation startWithEncryptionKeyInstance ( String instance , InstancesStartWithEncryptionKeyRequest instancesStartWithEncryptionKeyRequestResource ) { } }
StartWithEncryptionKeyInstanceHttpRequest request = StartWithEncryptionKeyInstanceHttpRequest . newBuilder ( ) . setInstance ( instance ) . setInstancesStartWithEncryptionKeyRequestResource ( instancesStartWithEncryptionKeyRequestResource ) . build ( ) ; return startWithEncryptionKeyInstance ( request ) ;
public class Table { /** * Match two valid , equal length , columns arrays for type of columns * @ param col column array from this Table * @ param other the other Table object * @ param othercol column array from the other Table */ void checkColumnsMatch ( int [ ] col , Table other , int [ ] othercol ) { } }
for ( int i = 0 ; i < col . length ; i ++ ) { Type type = colTypes [ col [ i ] ] ; Type otherType = other . colTypes [ othercol [ i ] ] ; if ( type . typeComparisonGroup != otherType . typeComparisonGroup ) { throw Error . error ( ErrorCode . X_42562 ) ; } }
public class ValueWritable { /** * { @ inheritDoc } */ @ Override public void write ( DataOutput out ) throws IOException { } }
label . write ( out ) ; out . writeByte ( ObjectUtil . getId ( value . getClass ( ) ) ) ; value . write ( out ) ;
public class CreateLoadBalancerRequest { /** * The IDs of the public subnets . You can specify only one subnet per Availability Zone . You must specify either * subnets or subnet mappings . * [ Application Load Balancers ] You must specify subnets from at least two Availability Zones . * [ Network Load Balancers ] You can specify subnets from one or more Availability Zones . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setSubnets ( java . util . Collection ) } or { @ link # withSubnets ( java . util . Collection ) } if you want to override * the existing values . * @ param subnets * The IDs of the public subnets . You can specify only one subnet per Availability Zone . You must specify * either subnets or subnet mappings . < / p > * [ Application Load Balancers ] You must specify subnets from at least two Availability Zones . * [ Network Load Balancers ] You can specify subnets from one or more Availability Zones . * @ return Returns a reference to this object so that method calls can be chained together . */ public CreateLoadBalancerRequest withSubnets ( String ... subnets ) { } }
if ( this . subnets == null ) { setSubnets ( new java . util . ArrayList < String > ( subnets . length ) ) ; } for ( String ele : subnets ) { this . subnets . add ( ele ) ; } return this ;
public class SFTrustManager { /** * Calculates the tolerable validity time beyond the next update . * Sometimes CA ' s OCSP response update is delayed beyond the clock skew * as the update is not populated to all OCSP servers for certain period . * @ param thisUpdate the last update * @ param nextUpdate the next update * @ return the tolerable validity beyond the next update . */ private static long calculateTolerableVadility ( Date thisUpdate , Date nextUpdate ) { } }
return maxLong ( ( long ) ( ( float ) ( nextUpdate . getTime ( ) - thisUpdate . getTime ( ) ) * TOLERABLE_VALIDITY_RANGE_RATIO ) , MIN_CACHE_WARMUP_TIME_IN_MILLISECONDS ) ;
public class ConditionalCheck { /** * Ensures that an object reference passed as a parameter to the calling method is not { @ code null } . * @ param condition * condition must be { @ code true } ^ so that the check will be performed * @ param reference * an object reference * @ param name * name of object reference ( in source code ) * @ throws IllegalNullArgumentException * if the given argument { @ code reference } is { @ code null } */ @ Throws ( IllegalNullArgumentException . class ) public static < T > void notNull ( final boolean condition , @ Nonnull final T reference , @ Nullable final String name ) { } }
if ( condition ) { Check . notNull ( reference , name ) ; }
public class PhaseOneApplication { /** * Stage four semantic verification of the document . * @ param document BEL common document * @ return boolean true if success , false otherwise */ private boolean stage4 ( final Document document ) { } }
beginStage ( PHASE1_STAGE4_HDR , "4" , NUM_PHASES ) ; final StringBuilder bldr = new StringBuilder ( ) ; if ( hasOption ( NO_SEMANTIC_CHECK ) ) { bldr . append ( SEMANTIC_CHECKS_DISABLED ) ; markEndStage ( bldr ) ; stageOutput ( bldr . toString ( ) ) ; return true ; } bldr . append ( "Verifying semantics in " ) ; bldr . append ( document . getName ( ) ) ; stageOutput ( bldr . toString ( ) ) ; boolean warnings = false ; long t1 = currentTimeMillis ( ) ; try { p1 . stage4SemanticVerification ( document ) ; } catch ( SemanticFailure sf ) { warnings = true ; String resname = sf . getName ( ) ; if ( resname == null ) { sf . setName ( document . getName ( ) ) ; } else { sf . setName ( resname + " (" + document . getName ( ) + ")" ) ; } stageWarning ( sf . getUserFacingMessage ( ) ) ; } catch ( IndexingFailure e ) { stageError ( "Failed to process namespace index files for semantic" + " verification." ) ; } long t2 = currentTimeMillis ( ) ; bldr . setLength ( 0 ) ; if ( warnings ) { bldr . append ( "Semantic verification resulted in warnings in " ) ; bldr . append ( document . getName ( ) ) ; bldr . append ( "\n" ) ; } markTime ( bldr , t1 , t2 ) ; markEndStage ( bldr ) ; stageOutput ( bldr . toString ( ) ) ; if ( warnings ) { return false ; } return true ;
public class ICalParameters { /** * Sets the FBTYPE ( free busy type ) parameter value . * This parameter is used by the { @ link FreeBusy } property . It defines * whether the person is " free " or " busy " over the time periods that are * specified in the property value . If this parameter is not set , the user * should be considered " busy " during these times . * @ param freeBusyType the free busy type or null to remove * @ see < a href = " http : / / tools . ietf . org / html / rfc5545 # page - 20 " > RFC 5545 * p . 20 < / a > */ public void setFreeBusyType ( FreeBusyType freeBusyType ) { } }
replace ( FBTYPE , ( freeBusyType == null ) ? null : freeBusyType . getValue ( ) ) ;
public class JaxWsHttpServletRequestAdapter { /** * ( non - Javadoc ) * @ see javax . servlet . http . HttpServletRequest # getPathInfo ( ) */ @ Override public String getPathInfo ( ) { } }
try { collaborator . preInvoke ( componentMetaData ) ; return request . getPathInfo ( ) ; } finally { collaborator . postInvoke ( ) ; }
public class MisoScenePanel { /** * Computes the fringe tile for the specified coordinate . */ protected BaseTile computeFringeTile ( int tx , int ty ) { } }
return _ctx . getTileManager ( ) . getAutoFringer ( ) . getFringeTile ( _model , tx , ty , _fringes , _masks ) ;
public class MemcachedClient { /** * Flush all caches from all servers with a delay of application . * @ param delay the period of time to delay , in seconds * @ return whether or not the operation was accepted * @ throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ @ Override public OperationFuture < Boolean > flush ( final int delay ) { } }
final AtomicReference < Boolean > flushResult = new AtomicReference < Boolean > ( null ) ; final ConcurrentLinkedQueue < Operation > ops = new ConcurrentLinkedQueue < Operation > ( ) ; CountDownLatch blatch = broadcastOp ( new BroadcastOpFactory ( ) { @ Override public Operation newOp ( final MemcachedNode n , final CountDownLatch latch ) { Operation op = opFact . flush ( delay , new OperationCallback ( ) { @ Override public void receivedStatus ( OperationStatus s ) { flushResult . set ( s . isSuccess ( ) ) ; } @ Override public void complete ( ) { latch . countDown ( ) ; } } ) ; ops . add ( op ) ; return op ; } } ) ; return new OperationFuture < Boolean > ( null , blatch , flushResult , operationTimeout , executorService ) { @ Override public void set ( Boolean o , OperationStatus s ) { super . set ( o , s ) ; notifyListeners ( ) ; } @ Override public boolean cancel ( boolean ign ) { boolean rv = false ; for ( Operation op : ops ) { op . cancel ( ) ; rv |= op . getState ( ) == OperationState . WRITE_QUEUED ; } notifyListeners ( ) ; return rv ; } @ Override public Boolean get ( long duration , TimeUnit units ) throws InterruptedException , TimeoutException , ExecutionException { status = new OperationStatus ( true , "OK" , StatusCode . SUCCESS ) ; return super . get ( duration , units ) ; } @ Override public boolean isCancelled ( ) { boolean rv = false ; for ( Operation op : ops ) { rv |= op . isCancelled ( ) ; } return rv ; } @ Override public boolean isDone ( ) { boolean rv = true ; for ( Operation op : ops ) { rv &= op . getState ( ) == OperationState . COMPLETE ; } return rv || isCancelled ( ) ; } } ;
public class MarginalLogLikelihood { /** * Gets the " observed " feature counts . */ public static FeatureVector getObservedFeatureCounts ( FgExampleList data , FgInferencerFactory infFactory , FgModel model , double [ ] params ) { } }
model . updateModelFromDoubles ( params ) ; FgModel feats = model . getDenseCopy ( ) ; feats . zero ( ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { LFgExample ex = data . get ( i ) ; FactorGraph fgLat = getFgLat ( ex . getFactorGraph ( ) , ex . getGoldConfig ( ) ) ; fgLat . updateFromModel ( model ) ; FgInferencer infLat = infFactory . getInferencer ( fgLat ) ; infLat . run ( ) ; addExpectedPartials ( fgLat , infLat , 1.0 * ex . getWeight ( ) , feats ) ; } double [ ] f = new double [ model . getNumParams ( ) ] ; feats . updateDoublesFromModel ( f ) ; return new FeatureVector ( f ) ;
public class Pixel { /** * Sets an ARGB value at the position currently referenced by this Pixel . < br > * Each channel value is assumed to be within [ 0.0 . . 1.0 ] . Channel values * outside these bounds will be clamped to them . * @ param a normalized alpha * @ param r normalized red * @ param g normalized green * @ param b normalized blue * @ throws ArrayIndexOutOfBoundsException if this Pixel ' s index is not in * range of the Img ' s data array . * @ see # setRGB _ fromDouble ( double , double , double ) * @ see # setRGB _ fromDouble _ preserveAlpha ( double , double , double ) * @ see # setARGB ( int , int , int , int ) * @ see # a _ asDouble ( ) * @ see # r _ asDouble ( ) * @ see # g _ asDouble ( ) * @ see # b _ asDouble ( ) * @ since 1.2 */ public Pixel setARGB_fromDouble ( double a , double r , double g , double b ) { } }
return setValue ( Pixel . argb_fromNormalized ( a , r , g , b ) ) ;
public class EventManager { /** * This method is called to alert project listeners to the fact that * a resource assignment has been written to a project file . * @ param resourceAssignment resourceAssignment instance */ public void fireAssignmentWrittenEvent ( ResourceAssignment resourceAssignment ) { } }
if ( m_projectListeners != null ) { for ( ProjectListener listener : m_projectListeners ) { listener . assignmentWritten ( resourceAssignment ) ; } }
public class Area { /** * Returns true if this area is rectangular . */ public boolean isRectangular ( ) { } }
return ( _isPolygonal ) && ( _rulesSize <= 5 ) && ( _coordsSize <= 8 ) && ( _coords [ 1 ] == _coords [ 3 ] ) && ( _coords [ 7 ] == _coords [ 5 ] ) && ( _coords [ 0 ] == _coords [ 6 ] ) && ( _coords [ 2 ] == _coords [ 4 ] ) ;
public class BaseNDArrayFactory { /** * Create a random ndarray with the given shape using * the current time as the seed * @ param rows the number of rows in the matrix * @ param columns the number of columns in the matrix * @ return the random ndarray with the specified shape */ @ Override public INDArray rand ( long rows , long columns ) { } }
return rand ( new long [ ] { rows , columns } , System . currentTimeMillis ( ) ) ;
public class ESQuery { /** * Gets the top hits aggregation . * @ param selectStatement * the select statement * @ param size * the size * @ param entityMetadata * the entity metadata * @ return the top hits aggregation */ private TopHitsBuilder getTopHitsAggregation ( SelectStatement selectStatement , Integer size , EntityMetadata entityMetadata ) { } }
TopHitsBuilder topHitsBuilder = AggregationBuilders . topHits ( ESConstants . TOP_HITS ) ; if ( size != null ) { topHitsBuilder . setSize ( size ) ; } return topHitsBuilder ;
public class authenticationradiuspolicy_authenticationvserver_binding { /** * Use this API to fetch authenticationradiuspolicy _ authenticationvserver _ binding resources of given name . */ public static authenticationradiuspolicy_authenticationvserver_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
authenticationradiuspolicy_authenticationvserver_binding obj = new authenticationradiuspolicy_authenticationvserver_binding ( ) ; obj . set_name ( name ) ; authenticationradiuspolicy_authenticationvserver_binding response [ ] = ( authenticationradiuspolicy_authenticationvserver_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class Sphere3d { /** * Set the center properties with the properties in parameter . * @ param x * @ param y * @ param z */ public void setCenterProperties ( DoubleProperty x , DoubleProperty y , DoubleProperty z ) { } }
this . cxProperty = x ; this . cyProperty = y ; this . czProperty = z ;
public class MissingDoPrivDetectionSecurityManager { /** * The following method is used to print out the code base or code source * location . This would be the path / URL that a class is loaded from . This * information is useful when trying to debug AccessControlExceptions * because the AccessControlException stack trace does not include where the * class was loaded from . Where a class is loaded from is very important * because that is one of the essential items contributing to a policy in a * policy file . */ public String [ ] getCodeBaseLocForPerm ( Permission perm ) { } }
final Permission inPerm = perm ; return AccessController . doPrivileged ( new java . security . PrivilegedAction < String [ ] > ( ) { @ Override public String [ ] run ( ) { Class < ? > [ ] classes = getClassContext ( ) ; StringBuffer sb = new StringBuffer ( classes . length * 100 ) ; sb . append ( lineSep ) ; // one for offending class and the other for code base // location String [ ] retMsg = new String [ 2 ] ; ProtectionDomain pd2 = null ; for ( int i = 0 ; i < classes . length ; i ++ ) { Class < ? > clazz = classes [ i ] ; ProtectionDomain pd = clazz . getProtectionDomain ( ) ; // check for occurrence of checkPermission from stack if ( classes [ i ] . getName ( ) . indexOf ( "com.ibm.ws.kernel.launch.internal.MissingDoPrivDetectionSecurityManager" ) != - 1 ) { // found SecurityManager , start to go through // the stack starting next class for ( int j = i + 1 ; j < classes . length ; j ++ ) { pd2 = classes [ j ] . getProtectionDomain ( ) ; if ( isOffendingClass ( classes , j , pd2 , inPerm ) ) { retMsg [ 0 ] = lineSep + lineSep + " " + classes [ j ] . getName ( ) + " in " + "{" + getCodeSource ( pd2 ) + "}" + lineSep + lineSep ; StringBuffer sb2 = new StringBuffer ( classes . length * 100 ) ; sb2 . append ( lineSep ) ; sb2 . append ( classes [ j ] . getName ( ) ) . append ( " : " ) . append ( getCodeSource ( pd2 ) + lineSep ) ; sb2 . append ( " " ) . append ( permissionToString ( pd2 . getCodeSource ( ) , classes [ j ] . getClassLoader ( ) , pd2 . getPermissions ( ) ) ) . append ( lineSep ) ; break ; } } } java . security . CodeSource cs = pd . getCodeSource ( ) ; String csStr = getCodeSource ( pd ) ; // class name : location sb . append ( classes [ i ] . getName ( ) ) . append ( " : " ) . append ( csStr + lineSep ) ; sb . append ( " " ) . append ( permissionToString ( cs , clazz . getClassLoader ( ) , pd . getPermissions ( ) ) ) . append ( lineSep ) ; } Tr . info ( tc , "java.security.permdenied.class.info" , retMsg [ 0 ] ) ; Tr . info ( tc , "java.security.permdenied.codebaseloc.info" , sb . toString ( ) ) ; retMsg [ 1 ] = getCodeSource ( pd2 ) . concat ( lineSep ) ; return retMsg ; } } ) ;
public class StreamDescription { /** * Represents the current enhanced monitoring settings of the stream . * @ return Represents the current enhanced monitoring settings of the stream . */ public java . util . List < EnhancedMetrics > getEnhancedMonitoring ( ) { } }
if ( enhancedMonitoring == null ) { enhancedMonitoring = new com . amazonaws . internal . SdkInternalList < EnhancedMetrics > ( ) ; } return enhancedMonitoring ;
public class VListIterator { /** * ( non - Javadoc ) * @ see java . util . ListIterator # set ( java . lang . Object ) * Replaces the last element returned by next or previous with the specified * element ( optional operation ) . This call can be made only if neither * ListIterator . remove nor ListIterator . add have been called after the last * call to next or previous . */ public void set ( Versioned < E > element ) { } }
if ( element == null ) throw new NullPointerException ( "cannot set a null element" ) ; if ( _lastCall != LastCall . NEXT && _lastCall != LastCall . PREVIOUS ) throw new IllegalStateException ( "neither next() nor previous() has been called" ) ; _stack . setById ( _lastId , element ) ; afterSet ( element . getValue ( ) ) ;
public class CmsDetailPageDuplicateEliminatingSitemapGenerator { /** * Gets the contents for the given folder path and type name . < p > * @ param folderPath the content folder path * @ param type the type name * @ return the list of contents * @ throws CmsException if something goes wrong */ private List < CmsResource > getContents ( String folderPath , String type ) throws CmsException { } }
CmsPathMap < CmsResource > pathMap = getPathMapForType ( type ) ; return pathMap . getChildValues ( folderPath ) ;
public class FrameworkProjectConfig { /** * Create PropertyLookup for a project from the framework basedir * @ param filesystemFramework the filesystem */ private static PropertyLookup createDirectProjectPropertyLookup ( IFilesystemFramework filesystemFramework , String projectName ) { } }
PropertyLookup lookup ; final Properties ownProps = new Properties ( ) ; ownProps . setProperty ( "project.name" , projectName ) ; File projectsBaseDir = filesystemFramework . getFrameworkProjectsBaseDir ( ) ; // generic framework properties for a project final File propertyFile = getProjectPropertyFile ( new File ( projectsBaseDir , projectName ) ) ; final Properties projectProps = PropertyLookup . fetchProperties ( propertyFile ) ; lookup = PropertyLookup . create ( projectProps , PropertyLookup . create ( ownProps ) ) ; lookup . expand ( ) ; return lookup ;
public class JettyServer { /** * Creates a Jetty server with supplied thread pool * @ param threadPool thread pool * @ return a new jetty server instance */ @ Override public Server create ( ThreadPool threadPool ) { } }
return threadPool != null ? new Server ( threadPool ) : new Server ( ) ;
public class JcrRepository { /** * Terminate all active sessions . * @ return a future representing the asynchronous session termination process . */ Future < Boolean > shutdown ( ) { } }
// Create a simple executor that will do the backgrounding for us . . . final ExecutorService executor = Executors . newSingleThreadExecutor ( new NamedThreadFactory ( "modeshape-repository-stop" ) ) ; try { // Submit a runnable to terminate all sessions . . . return executor . submit ( ( ) -> doShutdown ( false ) ) ; } finally { // Now shutdown the executor and return the future . . . executor . shutdown ( ) ; }
public class HoconConfigurationFactory { /** * Loads , parses , binds , and validates a configuration object . * @ param provider the provider to to use for reading configuration files * @ param path the path of the configuration file * @ return a validated configuration object * @ throws IOException if there is an error reading the file * @ throws ConfigurationException if there is an error parsing or validating the file */ public T build ( ConfigurationSourceProvider provider , String path ) throws IOException , ConfigurationException { } }
try ( InputStream input = provider . open ( checkNotNull ( path ) ) ) { final JsonNode node = mapper . readTree ( hoconFactory . createParser ( input ) ) ; return build ( node , path ) ; } catch ( ConfigException e ) { ConfigurationParsingException . Builder builder = ConfigurationParsingException . builder ( "Malformed HOCON" ) . setCause ( e ) . setDetail ( e . getMessage ( ) ) ; ConfigOrigin origin = e . origin ( ) ; if ( origin != null ) { builder . setLocation ( origin . lineNumber ( ) , 0 ) ; } throw builder . build ( path ) ; }
public class DefaultQueryTreeComponent { /** * TODO : optimize by it but materializing ( and maintaining ) the results . */ @ Override public ImmutableSet < Variable > getVariables ( QueryNode node ) { } }
if ( node instanceof ExplicitVariableProjectionNode ) { return ( ( ExplicitVariableProjectionNode ) node ) . getVariables ( ) ; } else { return getProjectedVariableStream ( node ) . collect ( ImmutableCollectors . toSet ( ) ) ; }
public class SwingGui { /** * Records a new internal frame . */ void addTopLevel ( String key , JFrame frame ) { } }
if ( frame != this ) { toplevels . put ( key , frame ) ; }
public class OverlayIcon { /** * Adds an overlay icon to the base icon . * @ param overlays * the list of overlay icons to add . * @ return * the object itself , for method chaining . */ public OverlayIcon withOverlay ( ImageIcon ... overlays ) { } }
if ( overlays != null ) { for ( ImageIcon overlay : overlays ) { this . overlays . add ( overlay ) ; } } return this ;
public class SmsBase { /** * Handle http status error * @ param response raw http response * @ return response raw http response * @ throws HTTPException http status exception */ public HTTPResponse handleError ( HTTPResponse response ) throws HTTPException { } }
if ( response . statusCode < 200 || response . statusCode >= 300 ) { throw new HTTPException ( response . statusCode , response . reason ) ; } return response ;
public class Emitter { /** * syck _ emit _ map */ public void emitMap ( String tag , MapStyle style ) { } }
Level parent = parentLevel ( ) ; Level lvl = currentLevel ( ) ; if ( parent . status == LevelStatus . map && parent . ncount % 2 == 1 ) { write ( QUESTION_MARK_SPACE , 2 ) ; parent . status = LevelStatus . mapx ; } emitTag ( tag , "tag:yaml.org,2002:map" ) ; if ( style == MapStyle . Inline || ( parent . status == LevelStatus . imap || parent . status == LevelStatus . iseq ) ) { write ( CURLY_OPEN , 1 ) ; lvl . status = LevelStatus . imap ; } else { lvl . status = LevelStatus . map ; }
public class CPOptionPersistenceImpl { /** * Returns the first cp option in the ordered set where groupId = & # 63 ; . * @ param groupId the group ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching cp option * @ throws NoSuchCPOptionException if a matching cp option could not be found */ @ Override public CPOption findByGroupId_First ( long groupId , OrderByComparator < CPOption > orderByComparator ) throws NoSuchCPOptionException { } }
CPOption cpOption = fetchByGroupId_First ( groupId , orderByComparator ) ; if ( cpOption != null ) { return cpOption ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "groupId=" ) ; msg . append ( groupId ) ; msg . append ( "}" ) ; throw new NoSuchCPOptionException ( msg . toString ( ) ) ;
public class UpdateApplicationSettingsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateApplicationSettingsRequest updateApplicationSettingsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateApplicationSettingsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateApplicationSettingsRequest . getApplicationId ( ) , APPLICATIONID_BINDING ) ; protocolMarshaller . marshall ( updateApplicationSettingsRequest . getWriteApplicationSettingsRequest ( ) , WRITEAPPLICATIONSETTINGSREQUEST_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class GanttProjectReader { /** * Read custom property definitions for tasks . * @ param gpTasks GanttProject tasks */ private void readTaskCustomPropertyDefinitions ( Tasks gpTasks ) { } }
for ( Taskproperty definition : gpTasks . getTaskproperties ( ) . getTaskproperty ( ) ) { // Ignore everything but custom values if ( ! "custom" . equals ( definition . getType ( ) ) ) { continue ; } // Find the next available field of the correct type . String type = definition . getValuetype ( ) ; FieldType fieldType = TASK_PROPERTY_TYPES . get ( type ) . getField ( ) ; // If we have run out of fields of the right type , try using a text field . if ( fieldType == null ) { fieldType = TASK_PROPERTY_TYPES . get ( "text" ) . getField ( ) ; } // If we actually have a field available , set the alias to match // the name used in GanttProject . if ( fieldType != null ) { CustomField field = m_projectFile . getCustomFields ( ) . getCustomField ( fieldType ) ; field . setAlias ( definition . getName ( ) ) ; String defaultValue = definition . getDefaultvalue ( ) ; if ( defaultValue != null && defaultValue . isEmpty ( ) ) { defaultValue = null ; } m_taskPropertyDefinitions . put ( definition . getId ( ) , new Pair < FieldType , String > ( fieldType , defaultValue ) ) ; } }
public class GenericTableColumnsModel { /** * Callback method for set the column names array from the generic given type . This method is * invoked in the constructor from the derived classes and can be overridden so users can * provide their own version of a column names */ protected void onSetColumnNames ( ) { } }
columnNames = ReflectionExtensions . getDeclaredFieldNames ( getType ( ) , "serialVersionUID" ) ; for ( int i = 0 ; i < columnNames . length ; i ++ ) { columnNames [ i ] = StringUtils . capitalize ( columnNames [ i ] ) ; }
public class AllConnectConnectionHolder { /** * 两次验证检查ClientTransport是否存活 * @ param interfaceId 接口 * @ param transport ClientTransport对象 * @ return 是否存活 */ protected boolean doubleCheck ( String interfaceId , ProviderInfo providerInfo , ClientTransport transport ) { } }
if ( transport . isAvailable ( ) ) { try { // 睡一下下 防止被连上又被服务端踢下线 Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { // ignore } if ( transport . isAvailable ( ) ) { // double check return true ; } else { // 可能在黑名单里 , 刚连上就断开了 if ( LOGGER . isWarnEnabled ( consumerConfig . getAppName ( ) ) ) { LOGGER . warnWithApp ( consumerConfig . getAppName ( ) , "Connection has been closed after connected (in last 100ms)!" + " Maybe connectionNum of provider has been reached limit," + " or your host is in the blacklist of provider {}/{}" , interfaceId , transport . getConfig ( ) . getProviderInfo ( ) ) ; } providerInfo . setDynamicAttr ( ProviderInfoAttrs . ATTR_RC_PERIOD_COEFFICIENT , 5 ) ; return false ; } } else { return false ; }
public class Duration { /** * Returns a copy of this duration with the specified duration in standard 24 hour days subtracted . * The number of days is multiplied by 86400 to obtain the number of seconds to subtract . * This is based on the standard definition of a day as 24 hours . * This instance is immutable and unaffected by this method call . * @ param daysToSubtract the days to subtract , positive or negative * @ return a { @ code Duration } based on this duration with the specified days subtracted , not null * @ throws ArithmeticException if numeric overflow occurs */ public Duration minusDays ( long daysToSubtract ) { } }
return ( daysToSubtract == Long . MIN_VALUE ? plusDays ( Long . MAX_VALUE ) . plusDays ( 1 ) : plusDays ( - daysToSubtract ) ) ;
public class FTPFileSystem { /** * Resolve against given working directory . * * @ param workDir * @ param path * @ return */ private Path makeAbsolute ( Path workDir , Path path ) { } }
if ( path . isAbsolute ( ) ) { return path ; } return new Path ( workDir , path ) ;
public class VirtualMachineScaleSetsInner { /** * Gets list of OS upgrades on a VM scale set instance . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ param serviceFuture the ServiceFuture object tracking the Retrofit calls * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < UpgradeOperationHistoricalStatusInfoInner > > getOSUpgradeHistoryNextAsync ( final String nextPageLink , final ServiceFuture < List < UpgradeOperationHistoricalStatusInfoInner > > serviceFuture , final ListOperationCallback < UpgradeOperationHistoricalStatusInfoInner > serviceCallback ) { } }
return AzureServiceFuture . fromPageResponse ( getOSUpgradeHistoryNextSinglePageAsync ( nextPageLink ) , new Func1 < String , Observable < ServiceResponse < Page < UpgradeOperationHistoricalStatusInfoInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < UpgradeOperationHistoricalStatusInfoInner > > > call ( String nextPageLink ) { return getOSUpgradeHistoryNextSinglePageAsync ( nextPageLink ) ; } } , serviceCallback ) ;
public class Util { /** * Validate that a method is public , has no arguments . * @ param method the method to be tested * @ param errors a list to place the errors */ @ SuppressWarnings ( { } }
"ThrowableInstanceNeverThrown" } ) public static void validatePublicNoArg ( Method method , List < Throwable > errors ) { if ( ! Modifier . isPublic ( method . getModifiers ( ) ) ) { errors . add ( new Exception ( "Method " + method . getName ( ) + "() should be public" ) ) ; } if ( method . getParameterTypes ( ) . length != 0 ) { errors . add ( new Exception ( "Method " + method . getName ( ) + " should have no parameters" ) ) ; }
public class ExecutionEnvironment { /** * Creates a new data set that contains elements in the iterator . The iterator is splittable , allowing the * framework to create a parallel data source that returns the elements in the iterator . * < p > Because the iterator will remain unmodified until the actual execution happens , the type of data * returned by the iterator must be given explicitly in the form of the type information . * This method is useful for cases where the type is generic . In that case , the type class * ( as given in { @ link # fromParallelCollection ( SplittableIterator , Class ) } does not supply all type information . * @ param iterator The iterator that produces the elements of the data set . * @ param type The TypeInformation for the produced data set . * @ return A DataSet representing the elements in the iterator . * @ see # fromParallelCollection ( SplittableIterator , Class ) */ public < X > DataSource < X > fromParallelCollection ( SplittableIterator < X > iterator , TypeInformation < X > type ) { } }
return fromParallelCollection ( iterator , type , Utils . getCallLocationName ( ) ) ;
public class TouchManager { /** * This method should be called externally from touch event dispatcher to run the logic for * widget lib * @ param pickedObjectList list of picked objects * @ param event touch event code * @ return true if the input has been accepted and processed by some object , otherwise - false */ public boolean handleClick ( List < GVRPickedObject > pickedObjectList , int event ) { } }
Log . d ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): new click event" ) ; boolean isClickableItem = false ; if ( pickedObjectList == null ) { Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): NULL pickedObjectList!" ) ; return event == LEFT_CLICK_EVENT ? takeDefaultLeftClickAction ( ) : takeDefaultRightClickAction ( ) ; } else if ( pickedObjectList . isEmpty ( ) ) { Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): EMPTY pickedObjectList!" ) ; return event == LEFT_CLICK_EVENT ? takeDefaultLeftClickAction ( ) : takeDefaultRightClickAction ( ) ; } // Process result ( s ) for ( GVRPickedObject pickedObject : pickedObjectList ) { if ( pickedObject == null ) { Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): got a null reference in the pickedObject" ) ; continue ; } GVRSceneObject sceneObject = pickedObject . getHitObject ( ) ; if ( sceneObject == null ) { Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): got a null reference in the pickedObject.getHitObject()" ) ; continue ; } Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): trying '%s' ..." , Helpers . getFullName ( sceneObject ) ) ; final float [ ] hit = pickedObject . getHitLocation ( ) ; synchronized ( mOnTouchInterceptors ) { for ( OnTouch interceptor : mOnTouchInterceptors ) { isClickableItem = event == LEFT_CLICK_EVENT ? interceptor . touch ( sceneObject , hit ) : interceptor . onBackKey ( sceneObject , hit ) ; } } if ( ! isClickableItem ) { Set < TouchManagerFilter > filters = event == LEFT_CLICK_EVENT ? mTouchFilters : mBackKeyFilters ; synchronized ( mTouchFilters ) { boolean processTouch = true ; for ( TouchManagerFilter filter : filters ) { if ( ! filter . select ( sceneObject ) ) { processTouch = false ; break ; } } if ( ! processTouch ) { continue ; } } final WeakReference < OnTouch > handler = touchHandlers . get ( sceneObject ) ; final OnTouch h = null != handler ? handler . get ( ) : null ; if ( null != h ) { isClickableItem = event == LEFT_CLICK_EVENT ? h . touch ( sceneObject , hit ) : h . onBackKey ( sceneObject , hit ) ; Log . d ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): handler for '%s' hit = %s handled event: %b" , sceneObject . getName ( ) , hit , isClickableItem ) ; } else { Log . e ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): No handler or displayID for %s" , Helpers . getFullName ( sceneObject ) ) ; touchHandlers . remove ( sceneObject ) ; } } if ( isClickableItem ) { Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): '%s' was clicked!" , Helpers . getFullName ( sceneObject ) ) ; break ; } Log . w ( Log . SUBSYSTEM . INPUT , TAG , "handleClick(): '%s' not clickable" , Helpers . getFullName ( sceneObject ) ) ; } if ( ! isClickableItem ) { Log . d ( Log . SUBSYSTEM . INPUT , TAG , "No clickable items" ) ; isClickableItem = event == LEFT_CLICK_EVENT ? takeDefaultLeftClickAction ( ) : takeDefaultRightClickAction ( ) ; } return isClickableItem ;
public class MiniTemplator { /** * Reads the contents of a file into a string variable . */ private String readFileIntoString ( String fileName , InputStream is ) throws IOException { } }
FileInputStream stream = null ; InputStreamReader reader = null ; try { if ( is != null ) { reader = new InputStreamReader ( is , charset ) ; } else { stream = new FileInputStream ( fileName ) ; reader = new InputStreamReader ( stream , charset ) ; } return readStreamIntoString ( reader ) ; } finally { if ( reader != null ) { reader . close ( ) ; } if ( stream != null ) { stream . close ( ) ; } }
public class XmlReader { /** * Initialize XML reader for processing * @ param inputStream input stream */ private void init ( InputStream inputStream ) { } }
this . inputStream = inputStream ; currentPath = new LinkedList < Node > ( ) ; nodeQueue = new LinkedList < XmlNode > ( ) ; XMLInputFactory factory = XMLInputFactory . newInstance ( ) ; factory . setProperty ( "javax.xml.stream.isCoalescing" , true ) ; factory . setProperty ( "javax.xml.stream.isReplacingEntityReferences" , true ) ; try { reader = factory . createXMLStreamReader ( inputStream ) ; } catch ( XMLStreamException e ) { throw new XmlReaderException ( e ) ; }
public class LDblToByteFunctionBuilder { /** * One of ways of creating builder . This is possibly the least verbose way where compiler should be able to guess the generic parameters . */ @ Nonnull public static LDblToByteFunction dblToByteFunctionFrom ( Consumer < LDblToByteFunctionBuilder > buildingFunction ) { } }
LDblToByteFunctionBuilder builder = new LDblToByteFunctionBuilder ( ) ; buildingFunction . accept ( builder ) ; return builder . build ( ) ;
public class HeapElement { /** * Discards the current element and move to the next . * @ return true if there is a next element , false if not */ boolean shift ( ) { } }
if ( ! it . hasNext ( ) ) { return false ; } head = it . next ( ) ; comparable = DateValueComparison . comparable ( head ) ; return true ;
public class Shape { /** * Serializes this shape as a { @ link JSONObject } * @ return JSONObject */ @ Override public JSONObject toJSONObject ( ) { } }
final JSONObject object = new JSONObject ( ) ; object . put ( "type" , new JSONString ( getShapeType ( ) . getValue ( ) ) ) ; if ( hasMetaData ( ) ) { final MetaData meta = getMetaData ( ) ; if ( false == meta . isEmpty ( ) ) { object . put ( "meta" , new JSONObject ( meta . getJSO ( ) ) ) ; } } object . put ( "attributes" , new JSONObject ( getAttributes ( ) . getJSO ( ) ) ) ; return object ;
public class ShutdownSystem { /** * Start the server shutdown */ public static void shutdownOutOfMemory ( String msg ) { } }
freeMemoryBuffers ( ) ; ShutdownSystem shutdown = _activeService . get ( ) ; if ( shutdown != null && ! shutdown . isShutdownOnOutOfMemory ( ) ) { System . err . println ( msg ) ; return ; } else { shutdownActive ( ExitCode . MEMORY , msg ) ; }
public class DoubleMatrix { /** * Returns new DoubleMatrix which is transpose of this . * @ return */ @ Override public DoubleMatrix transpose ( ) { } }
int m = rows ( ) ; int n = columns ( ) ; ItemSupplier s = supplier ; DoubleMatrix tr = getInstance ( n , m ) ; ItemConsumer c = tr . consumer ; for ( int i = 0 ; i < m ; i ++ ) { for ( int j = 0 ; j < n ; j ++ ) { c . set ( j , i , s . get ( i , j ) ) ; } } return tr ;
public class ReplicatedQueryAlloc { /** * Returns { @ code true } if at least one old deleted entry was removed . * @ param prevPos position to skip during cleanup ( because cleaned up separately ) */ public boolean forcedOldDeletedEntriesCleanup ( long prevPos ) { } }
ReplicatedChronicleMap < ? , ? , ? > map = mh . m ( ) ; if ( ! map . cleanupRemovedEntries ) return false ; try ( MapSegmentContext < ? , ? , ? > sc = map . segmentContext ( s . segmentIndex ) ) { cleanupAction . removedCompletely = 0 ; cleanupAction . posToSkip = prevPos ; cleanupAction . iterationContext = ( IterationContext < ? , ? , ? > ) sc ; ( ( ReplicatedHashSegmentContext < ? , ? > ) sc ) . forEachSegmentReplicableEntry ( cleanupAction ) ; return cleanupAction . removedCompletely > 0 ; }
public class PackageManagerUtils { /** * Checks if the device has a WIFI . * @ param manager the package manager . * @ return { @ code true } if the device has a WIFI . */ @ TargetApi ( Build . VERSION_CODES . FROYO ) public static boolean hasWifiFeature ( PackageManager manager ) { } }
return manager . hasSystemFeature ( PackageManager . FEATURE_WIFI ) ;
public class AtomTypeAwareSaturationChecker { /** * This method tries to set the bond order on the current bond . * @ param atomContainer The molecule * @ param index The index of the current bond * @ throws CDKException when no suitable solution can be found */ private void checkBond ( IAtomContainer atomContainer , int index ) throws CDKException { } }
IBond bond = atomContainer . getBond ( index ) ; if ( bond != null && bond . getFlag ( CDKConstants . SINGLE_OR_DOUBLE ) ) { try { oldBondOrder = bond . getOrder ( ) ; bond . setOrder ( IBond . Order . SINGLE ) ; setMaxBondOrder ( bond , atomContainer ) ; } catch ( CDKException e ) { bond . setOrder ( oldBondOrder ) ; logger . debug ( e ) ; } }
public class TargetVpnGatewayClient { /** * Retrieves an aggregated list of target VPN gateways . * < p > Sample code : * < pre > < code > * try ( TargetVpnGatewayClient targetVpnGatewayClient = TargetVpnGatewayClient . create ( ) ) { * ProjectName project = ProjectName . of ( " [ PROJECT ] " ) ; * for ( TargetVpnGatewaysScopedList element : targetVpnGatewayClient . aggregatedListTargetVpnGateways ( project ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param project Project ID for this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final AggregatedListTargetVpnGatewaysPagedResponse aggregatedListTargetVpnGateways ( ProjectName project ) { } }
AggregatedListTargetVpnGatewaysHttpRequest request = AggregatedListTargetVpnGatewaysHttpRequest . newBuilder ( ) . setProject ( project == null ? null : project . toString ( ) ) . build ( ) ; return aggregatedListTargetVpnGateways ( request ) ;
public class NBTInputStream { /** * Reads an NBT { @ link Tag } from the stream . * @ param depth The depth of this tag . * @ return The tag that was read . * @ throws java . io . IOException if an I / O error occurs . */ private Tag readTag ( int depth ) throws IOException { } }
int typeId = is . readByte ( ) & 0xFF ; TagType type = TagType . getById ( typeId ) ; String name ; if ( type != TagType . TAG_END ) { int nameLength = is . readShort ( ) & 0xFFFF ; byte [ ] nameBytes = new byte [ nameLength ] ; is . readFully ( nameBytes ) ; name = new String ( nameBytes , NBTConstants . CHARSET . name ( ) ) ; } else { name = "" ; } return readTagPayload ( type , name , depth ) ;
public class ServerFilter { /** * Method allow to find server that description contains one of specified keywords . * Matching is case insensitive . * @ param subStrings is list of not null keywords * @ return { @ link ServerFilter } */ public ServerFilter descriptionContains ( String ... subStrings ) { } }
allItemsNotNull ( subStrings , "Description keywords" ) ; predicate = predicate . and ( combine ( ServerMetadata :: getDescription , in ( asList ( subStrings ) , Predicates :: containsIgnoreCase ) ) ) ; return this ;
public class AccountTakeoverRiskConfigurationTypeMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AccountTakeoverRiskConfigurationType accountTakeoverRiskConfigurationType , ProtocolMarshaller protocolMarshaller ) { } }
if ( accountTakeoverRiskConfigurationType == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( accountTakeoverRiskConfigurationType . getNotifyConfiguration ( ) , NOTIFYCONFIGURATION_BINDING ) ; protocolMarshaller . marshall ( accountTakeoverRiskConfigurationType . getActions ( ) , ACTIONS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AWSSimpleSystemsManagementClient { /** * Retrieves information about a patch baseline . * @ param getPatchBaselineRequest * @ return Result of the GetPatchBaseline operation returned by the service . * @ throws DoesNotExistException * Error returned when the ID specified for a resource , such as a Maintenance Window or Patch baseline , * doesn ' t exist . < / p > * For information about resource limits in Systems Manager , see < a * href = " http : / / docs . aws . amazon . com / general / latest / gr / aws _ service _ limits . html # limits _ ssm " > AWS Systems * Manager Limits < / a > . * @ throws InvalidResourceIdException * The resource ID is not valid . Verify that you entered the correct ID and try again . * @ throws InternalServerErrorException * An error occurred on the server side . * @ sample AWSSimpleSystemsManagement . GetPatchBaseline * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ssm - 2014-11-06 / GetPatchBaseline " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetPatchBaselineResult getPatchBaseline ( GetPatchBaselineRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetPatchBaseline ( request ) ;
public class UpdateRouteResponseRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateRouteResponseRequest updateRouteResponseRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateRouteResponseRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateRouteResponseRequest . getApiId ( ) , APIID_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getModelSelectionExpression ( ) , MODELSELECTIONEXPRESSION_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getResponseModels ( ) , RESPONSEMODELS_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getResponseParameters ( ) , RESPONSEPARAMETERS_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getRouteId ( ) , ROUTEID_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getRouteResponseId ( ) , ROUTERESPONSEID_BINDING ) ; protocolMarshaller . marshall ( updateRouteResponseRequest . getRouteResponseKey ( ) , ROUTERESPONSEKEY_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Sneaky { /** * Wrap a { @ link CheckedPredicate } in a { @ link Predicate } . * Example : * < code > < pre > * Stream . of ( " a " , " b " , " c " ) . filter ( Unchecked . predicate ( s - > { * if ( s . length ( ) > 10) * throw new Exception ( " Only short strings allowed " ) ; * return true ; * < / pre > < / code > */ public static < T > Predicate < T > predicate ( CheckedPredicate < T > predicate ) { } }
return Unchecked . predicate ( predicate , Unchecked . RETHROW_ALL ) ;
public class FreightStreamer { /** * Print from src to stdout . */ private void printToStdout ( InputStream in ) throws IOException { } }
try { IOUtils . copyBytes ( in , System . out , getConf ( ) , false ) ; } finally { in . close ( ) ; }
public class SymbolTable { /** * Check whether a given name is registered . That is , whether or not there is a * corresponding name or not . * @ param name * @ return */ public boolean contains ( QualifiedName name ) { } }
Name unit = name . getUnit ( ) ; // Get information associated with this unit SymbolTable . Group group = symbolTable . get ( unit ) ; return group != null && group . isValid ( name . getName ( ) ) ;
public class ViewPoolImpl { /** * Generates an unique key according to the metadata information stored * in the passed UIViewRoot instance that can affect the way how the view is generated . * By default , the " view " params are the viewId , the locale , the renderKit and * the contracts associated to the view . * @ param facesContext * @ param root * @ return */ protected MetadataViewKey deriveViewKey ( FacesContext facesContext , UIViewRoot root ) { } }
MetadataViewKey viewKey ; if ( ! facesContext . getResourceLibraryContracts ( ) . isEmpty ( ) ) { String [ ] contracts = new String [ facesContext . getResourceLibraryContracts ( ) . size ( ) ] ; contracts = facesContext . getResourceLibraryContracts ( ) . toArray ( contracts ) ; viewKey = new MetadataViewKeyImpl ( root . getViewId ( ) , root . getRenderKitId ( ) , root . getLocale ( ) , contracts ) ; } else { viewKey = new MetadataViewKeyImpl ( root . getViewId ( ) , root . getRenderKitId ( ) , root . getLocale ( ) ) ; } return viewKey ;
public class VRPResourceManager { /** * Deletes a VRP with the given Id if it exists . During deletion , all of the child resource pools under the hubs * will be deleted . If there are any VMs under those resource pools , they will be moved directly under the hub . For * hubs that are cluster , those VMs will be moved to the root resource pool . * @ param vrpId The unique Id of the VRP * @ throws InvalidState * @ throws NotFound * @ throws RuntimeFault * @ throws RemoteException */ public void deleteVRP ( String vrpId ) throws InvalidState , NotFound , RuntimeFault , RemoteException { } }
getVimService ( ) . deleteVRP ( getMOR ( ) , vrpId ) ;
public class CredentialUtil { /** * Defined in rfc 2617 as H ( data ) = MD5 ( data ) ; * @ param data data * @ return MD5 ( data ) */ public static String encryptMD5 ( String data ) { } }
try { MessageDigest digest = MessageDigest . getInstance ( "MD5" ) ; return copyValueOf ( Hex . encodeHex ( digest . digest ( data . getBytes ( StandardCharsets . UTF_8 ) ) ) ) ; } catch ( final NoSuchAlgorithmException ex ) { throw new TechnicalException ( "Failed to instantiate an MD5 algorithm" , ex ) ; }
public class VorbisStyleComments { /** * Adds a comment for a given tag */ public void addComment ( String tag , String comment ) { } }
String nt = normaliseTag ( tag ) ; if ( ! comments . containsKey ( nt ) ) { comments . put ( nt , new ArrayList < String > ( ) ) ; } comments . get ( nt ) . add ( comment ) ;
public class OptionalUtils { /** * Throws an { @ link IllegalArgumentException } if it is not true that exactly one of the provided * { @ link Optional } s { @ link Optional # isPresent ( ) } . */ public static < T > void exactlyOnePresentOrIllegalArgument ( Iterable < ? extends Optional < ? extends T > > optionals , final String msg ) { } }
if ( numPresent ( optionals ) != 1 ) { throw new IllegalArgumentException ( msg ) ; }
public class EnvironmentSettingsInner { /** * Create or replace an existing Environment Setting . This operation can take a while to complete . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param labName The name of the lab . * @ param environmentSettingName The name of the environment Setting . * @ param environmentSetting Represents settings of an environment , from which environment instances would be created * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < EnvironmentSettingInner > createOrUpdateAsync ( String resourceGroupName , String labAccountName , String labName , String environmentSettingName , EnvironmentSettingInner environmentSetting , final ServiceCallback < EnvironmentSettingInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( createOrUpdateWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , environmentSettingName , environmentSetting ) , serviceCallback ) ;
public class RopertyImpl { /** * / * ( non - Javadoc ) * @ see com . parship . roperty . Roperty # get ( java . lang . String , com . parship . roperty . DomainResolver ) */ @ Override public < T > T get ( final String key , DomainResolver resolver ) { } }
return get ( key , null , resolver ) ;
public class AttributeIndexRegistry { /** * Matches an index for the given attribute and match hint . * @ param attribute the attribute to match an index for . * @ param matchHint the match hint ; { @ link QueryContext . IndexMatchHint # EXACT _ NAME } * is not supported by this method . * @ return the matched index or { @ code null } if nothing matched . * @ see QueryContext . IndexMatchHint */ public InternalIndex match ( String attribute , QueryContext . IndexMatchHint matchHint ) { } }
Record record = registry . get ( attribute ) ; if ( record == null ) { return null ; } switch ( matchHint ) { case NONE : // Intentional fallthrough . We still prefer ordered indexes // under the cover since they are more universal in terms of // supported fast queries . case PREFER_ORDERED : InternalIndex ordered = record . ordered ; return ordered == null ? record . unordered : ordered ; case PREFER_UNORDERED : InternalIndex unordered = record . unordered ; return unordered == null ? record . ordered : unordered ; default : throw new IllegalStateException ( "unexpected match hint: " + matchHint ) ; }
public class ManagedObject { /** * Driven when the object changes to commited state within a transaction . * Cannot be overriden by a subclass of ManagedObject . * @ param transaction commiting the update . * @ param serializedBytes the bytes that were given to the * transaction when the last update was made or null if the object has been deleted , * or just locked and not changed . * @ param savedSequenceNumber when the update was made and which has now been saved . * @ param requiresCurrentCheckpoint true if the managed object must be updated as part of the current checkpoint . * @ throws ObjectManagerException */ protected void commit ( Transaction transaction , ObjectManagerByteArrayOutputStream serializedBytes , long savedSequenceNumber , boolean requiresCurrentCheckpoint ) throws ObjectManagerException { } }
if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , "commit" , new Object [ ] { transaction , serializedBytes , new Long ( savedSequenceNumber ) , new Boolean ( requiresCurrentCheckpoint ) } ) ; // No need to synchronize when we copy the entry state as only the locking transaction can cause a state change . int entryState = state ; setState ( nextStateForCommit ) ; // Make the state change . // Update the object in its store . // We do this only after the add / replace / delete records has been written and forced // to the log as part of commit processing . switch ( entryState ) { case stateAdded : case stateReplaced : synchronized ( forcedUpdateSequenceLock ) { // Now the object to give the ObjectStore , if we have a newer version . if ( savedSequenceNumber > forcedUpdateSequence ) { forcedUpdateSequence = savedSequenceNumber ; latestSerializedBytes = serializedBytes ; owningToken . objectStore . add ( this , requiresCurrentCheckpoint ) ; } // if ( savedSequenceNumber > forcedUpdateSequence ) . } // synchronized ( forcedUpdateSequenceNumberLock ) . break ; case stateToBeDeleted : case stateMustBeDeleted : // An OptimisticReplace for another transaction might have been executed and commited after our delete but // but before this commit , in which case the commited version could legitimately be greater than the // one that deleted the Object . // Taking the forcedUpdateSequenceLock , ensures that any OptimisticReplace // updates see our state is now deleted . int tempLatestSerializedSize ; synchronized ( forcedUpdateSequenceLock ) { if ( savedSequenceNumber > forcedUpdateSequence ) { forcedUpdateSequence = savedSequenceNumber ; // The forcedSerializedBytes passed in may not be null if another OptimisticReplace happened to this // ManagedObject after Delete had been logged . This happens when two links are deleted in a LinkedList // under one transaction . // latestSerializedBytes might be null if they have been written by the store . if ( latestSerializedBytes != null ) { owningToken . objectStore . getObjectManagerState ( ) . returnByteArrayOutputStreamToPool ( latestSerializedBytes ) ; latestSerializedBytes = null ; } // if ( latestSerializedBytes ! = null ) . } // if ( savedSequenceNumber > forcedUpdateSequenceNumber ) . // Remove the object from its object store . If an OptimisticReplace committed before we committed // the delete , then the log version will have already been bumped up , so delete the object // irrespective of whether it was changed by an optimisticReplace in another transaction after // we deleted it in this transaction . Also a checkpoint may have already noticed that the // logSequence number has moved on . owningToken . objectStore . remove ( owningToken , requiresCurrentCheckpoint ) ; tempLatestSerializedSize = latestSerializedSize ; latestSerializedSize = 0 ; } // synchronized ( forcedUpdateSequenceLock ) . // following statement moved out of lock in PM41418 to prevent deadlock owningToken . objectStore . reserve ( - tempLatestSerializedSize , false ) ; break ; } // switch . beforeImmage = null ; // Help garbage collector . numberOfLocksTaken = 0 ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , "commit" ) ;
public class HttpContext { /** * Perform a secure HTTPS POST at the given path sending in the given post * data returning the results of the response . * @ param host The hostname of the request * @ param path The path of the request * @ param postData The POST data to send in the request * @ param port The port of the request * @ param headers The headers to pass in the request * @ param timeout The timeout of the request in milliseconds * @ return The data of the resposne * @ throws UnknownHostException if the host cannot be found * @ throws ConnectException if the HTTP server does not respond * @ throws IOException if an I / O error occurs processing the request */ public String doSecurePost ( String host , String path , String postData , int port , Map < String , String > headers , int timeout ) throws UnknownHostException , ConnectException , IOException { } }
return doHttpCall ( host , path , postData , port , headers , timeout , true ) ;
public class Spider { /** * Checks if is stopped , i . e . a shutdown was issued or it is not running . * @ return true , if is stopped */ public boolean isStopped ( ) { } }
if ( ! stopped && this . spiderParam . getMaxDuration ( ) > 0 ) { // Check to see if the scan has exceeded the specified maxDuration if ( TimeUnit . MILLISECONDS . toMinutes ( System . currentTimeMillis ( ) - this . timeStarted ) > this . spiderParam . getMaxDuration ( ) ) { log . info ( "Spidering process has exceeded maxDuration of " + this . spiderParam . getMaxDuration ( ) + " minute(s)" ) ; this . complete ( ) ; } } return stopped ;
public class CollationKeys { /** * Writes the sort key bytes for minLevel up to the iterator data ' s strength . Optionally writes * the case level . Stops writing levels when callback . needToWrite ( level ) returns false . * Separates levels with the LEVEL _ SEPARATOR _ BYTE but does not write a TERMINATOR _ BYTE . */ public static void writeSortKeyUpToQuaternary ( CollationIterator iter , boolean [ ] compressibleBytes , CollationSettings settings , SortKeyByteSink sink , int minLevel , LevelCallback callback , boolean preflight ) { } }
int options = settings . options ; // Set of levels to process and write . int levels = levelMasks [ CollationSettings . getStrength ( options ) ] ; if ( ( options & CollationSettings . CASE_LEVEL ) != 0 ) { levels |= Collation . CASE_LEVEL_FLAG ; } // Minus the levels below minLevel . levels &= ~ ( ( 1 << minLevel ) - 1 ) ; if ( levels == 0 ) { return ; } long variableTop ; if ( ( options & CollationSettings . ALTERNATE_MASK ) == 0 ) { variableTop = 0 ; } else { // + 1 so that we can use " < " and primary ignorables test out early . variableTop = settings . variableTop + 1 ; } int tertiaryMask = CollationSettings . getTertiaryMask ( options ) ; byte [ ] p234 = new byte [ 3 ] ; SortKeyLevel cases = getSortKeyLevel ( levels , Collation . CASE_LEVEL_FLAG ) ; SortKeyLevel secondaries = getSortKeyLevel ( levels , Collation . SECONDARY_LEVEL_FLAG ) ; SortKeyLevel tertiaries = getSortKeyLevel ( levels , Collation . TERTIARY_LEVEL_FLAG ) ; SortKeyLevel quaternaries = getSortKeyLevel ( levels , Collation . QUATERNARY_LEVEL_FLAG ) ; long prevReorderedPrimary = 0 ; // 0 = = no compression int commonCases = 0 ; int commonSecondaries = 0 ; int commonTertiaries = 0 ; int commonQuaternaries = 0 ; int prevSecondary = 0 ; int secSegmentStart = 0 ; for ( ; ; ) { // No need to keep all CEs in the buffer when we write a sort key . iter . clearCEsIfNoneRemaining ( ) ; long ce = iter . nextCE ( ) ; long p = ce >>> 32 ; if ( p < variableTop && p > Collation . MERGE_SEPARATOR_PRIMARY ) { // Variable CE , shift it to quaternary level . // Ignore all following primary ignorables , and shift further variable CEs . if ( commonQuaternaries != 0 ) { -- commonQuaternaries ; while ( commonQuaternaries >= QUAT_COMMON_MAX_COUNT ) { quaternaries . appendByte ( QUAT_COMMON_MIDDLE ) ; commonQuaternaries -= QUAT_COMMON_MAX_COUNT ; } // Shifted primary weights are lower than the common weight . quaternaries . appendByte ( QUAT_COMMON_LOW + commonQuaternaries ) ; commonQuaternaries = 0 ; } do { if ( ( levels & Collation . QUATERNARY_LEVEL_FLAG ) != 0 ) { if ( settings . hasReordering ( ) ) { p = settings . reorder ( p ) ; } if ( ( ( int ) p >>> 24 ) >= QUAT_SHIFTED_LIMIT_BYTE ) { // Prevent shifted primary lead bytes from // overlapping with the common compression range . quaternaries . appendByte ( QUAT_SHIFTED_LIMIT_BYTE ) ; } quaternaries . appendWeight32 ( p ) ; } do { ce = iter . nextCE ( ) ; p = ce >>> 32 ; } while ( p == 0 ) ; } while ( p < variableTop && p > Collation . MERGE_SEPARATOR_PRIMARY ) ; } // ce could be primary ignorable , or NO _ CE , or the merge separator , // or a regular primary CE , but it is not variable . // If ce = = NO _ CE , then write nothing for the primary level but // terminate compression on all levels and then exit the loop . if ( p > Collation . NO_CE_PRIMARY && ( levels & Collation . PRIMARY_LEVEL_FLAG ) != 0 ) { // Test the un - reordered primary for compressibility . boolean isCompressible = compressibleBytes [ ( int ) p >>> 24 ] ; if ( settings . hasReordering ( ) ) { p = settings . reorder ( p ) ; } int p1 = ( int ) p >>> 24 ; if ( ! isCompressible || p1 != ( ( int ) prevReorderedPrimary >>> 24 ) ) { if ( prevReorderedPrimary != 0 ) { if ( p < prevReorderedPrimary ) { // No primary compression terminator // at the end of the level or merged segment . if ( p1 > Collation . MERGE_SEPARATOR_BYTE ) { sink . Append ( Collation . PRIMARY_COMPRESSION_LOW_BYTE ) ; } } else { sink . Append ( Collation . PRIMARY_COMPRESSION_HIGH_BYTE ) ; } } sink . Append ( p1 ) ; if ( isCompressible ) { prevReorderedPrimary = p ; } else { prevReorderedPrimary = 0 ; } } byte p2 = ( byte ) ( p >>> 16 ) ; if ( p2 != 0 ) { p234 [ 0 ] = p2 ; p234 [ 1 ] = ( byte ) ( p >>> 8 ) ; p234 [ 2 ] = ( byte ) p ; sink . Append ( p234 , ( p234 [ 1 ] == 0 ) ? 1 : ( p234 [ 2 ] == 0 ) ? 2 : 3 ) ; } // Optimization for internalNextSortKeyPart ( ) : // When the primary level overflows we can stop because we need not // calculate ( preflight ) the whole sort key length . if ( ! preflight && sink . Overflowed ( ) ) { // not used in Java - - if ( ! sink . IsOk ( ) ) { // Java porting note : U _ MEMORY _ ALLOCATION _ ERROR is set here in // C implementation . IsOk ( ) in Java always returns true , so this // is a dead code . return ; } } int lower32 = ( int ) ce ; if ( lower32 == 0 ) { continue ; } // completely ignorable , no secondary / case / tertiary / quaternary if ( ( levels & Collation . SECONDARY_LEVEL_FLAG ) != 0 ) { int s = lower32 >>> 16 ; // 16 bits if ( s == 0 ) { // secondary ignorable } else if ( s == Collation . COMMON_WEIGHT16 && ( ( options & CollationSettings . BACKWARD_SECONDARY ) == 0 || p != Collation . MERGE_SEPARATOR_PRIMARY ) ) { // s is a common secondary weight , and // backwards - secondary is off or the ce is not the merge separator . ++ commonSecondaries ; } else if ( ( options & CollationSettings . BACKWARD_SECONDARY ) == 0 ) { if ( commonSecondaries != 0 ) { -- commonSecondaries ; while ( commonSecondaries >= SEC_COMMON_MAX_COUNT ) { secondaries . appendByte ( SEC_COMMON_MIDDLE ) ; commonSecondaries -= SEC_COMMON_MAX_COUNT ; } int b ; if ( s < Collation . COMMON_WEIGHT16 ) { b = SEC_COMMON_LOW + commonSecondaries ; } else { b = SEC_COMMON_HIGH - commonSecondaries ; } secondaries . appendByte ( b ) ; commonSecondaries = 0 ; } secondaries . appendWeight16 ( s ) ; } else { if ( commonSecondaries != 0 ) { -- commonSecondaries ; // Append reverse weights . The level will be re - reversed later . int remainder = commonSecondaries % SEC_COMMON_MAX_COUNT ; int b ; if ( prevSecondary < Collation . COMMON_WEIGHT16 ) { b = SEC_COMMON_LOW + remainder ; } else { b = SEC_COMMON_HIGH - remainder ; } secondaries . appendByte ( b ) ; commonSecondaries -= remainder ; // commonSecondaries is now a multiple of SEC _ COMMON _ MAX _ COUNT . while ( commonSecondaries > 0 ) { // same as > = SEC _ COMMON _ MAX _ COUNT secondaries . appendByte ( SEC_COMMON_MIDDLE ) ; commonSecondaries -= SEC_COMMON_MAX_COUNT ; } // commonSecondaries = = 0 } if ( 0 < p && p <= Collation . MERGE_SEPARATOR_PRIMARY ) { // The backwards secondary level compares secondary weights backwards // within segments separated by the merge separator ( U + FFFE ) . byte [ ] secs = secondaries . data ( ) ; int last = secondaries . length ( ) - 1 ; while ( secSegmentStart < last ) { byte b = secs [ secSegmentStart ] ; secs [ secSegmentStart ++ ] = secs [ last ] ; secs [ last -- ] = b ; } secondaries . appendByte ( p == Collation . NO_CE_PRIMARY ? Collation . LEVEL_SEPARATOR_BYTE : Collation . MERGE_SEPARATOR_BYTE ) ; prevSecondary = 0 ; secSegmentStart = secondaries . length ( ) ; } else { secondaries . appendReverseWeight16 ( s ) ; prevSecondary = s ; } } } if ( ( levels & Collation . CASE_LEVEL_FLAG ) != 0 ) { if ( ( CollationSettings . getStrength ( options ) == Collator . PRIMARY ) ? p == 0 : ( lower32 >>> 16 ) == 0 ) { // Primary + caseLevel : Ignore case level weights of primary ignorables . // Otherwise : Ignore case level weights of secondary ignorables . // For details see the comments in the CollationCompare class . } else { int c = ( lower32 >>> 8 ) & 0xff ; // case bits & tertiary lead byte assert ( ( c & 0xc0 ) != 0xc0 ) ; if ( ( c & 0xc0 ) == 0 && c > Collation . LEVEL_SEPARATOR_BYTE ) { ++ commonCases ; } else { if ( ( options & CollationSettings . UPPER_FIRST ) == 0 ) { // lowerFirst : Compress common weights to nibbles 1 . . 7 . . 13 , mixed = 14, // upper = 15. // If there are only common ( = lowest ) weights in the whole level , // then we need not write anything . // Level length differences are handled already on the next - higher level . if ( commonCases != 0 && ( c > Collation . LEVEL_SEPARATOR_BYTE || ! cases . isEmpty ( ) ) ) { -- commonCases ; while ( commonCases >= CASE_LOWER_FIRST_COMMON_MAX_COUNT ) { cases . appendByte ( CASE_LOWER_FIRST_COMMON_MIDDLE << 4 ) ; commonCases -= CASE_LOWER_FIRST_COMMON_MAX_COUNT ; } int b ; if ( c <= Collation . LEVEL_SEPARATOR_BYTE ) { b = CASE_LOWER_FIRST_COMMON_LOW + commonCases ; } else { b = CASE_LOWER_FIRST_COMMON_HIGH - commonCases ; } cases . appendByte ( b << 4 ) ; commonCases = 0 ; } if ( c > Collation . LEVEL_SEPARATOR_BYTE ) { c = ( CASE_LOWER_FIRST_COMMON_HIGH + ( c >>> 6 ) ) << 4 ; // 14 or 15 } } else { // upperFirst : Compress common weights to nibbles 3 . . 15 , mixed = 2, // upper = 1. // The compressed common case weights only go up from the " low " value // because with upperFirst the common weight is the highest one . if ( commonCases != 0 ) { -- commonCases ; while ( commonCases >= CASE_UPPER_FIRST_COMMON_MAX_COUNT ) { cases . appendByte ( CASE_UPPER_FIRST_COMMON_LOW << 4 ) ; commonCases -= CASE_UPPER_FIRST_COMMON_MAX_COUNT ; } cases . appendByte ( ( CASE_UPPER_FIRST_COMMON_LOW + commonCases ) << 4 ) ; commonCases = 0 ; } if ( c > Collation . LEVEL_SEPARATOR_BYTE ) { c = ( CASE_UPPER_FIRST_COMMON_LOW - ( c >>> 6 ) ) << 4 ; // 2 or 1 } } // c is a separator byte 01, // or a left - shifted nibble 0x10 , 0x20 , . . . 0xf0. cases . appendByte ( c ) ; } } } if ( ( levels & Collation . TERTIARY_LEVEL_FLAG ) != 0 ) { int t = lower32 & tertiaryMask ; assert ( ( lower32 & 0xc000 ) != 0xc000 ) ; if ( t == Collation . COMMON_WEIGHT16 ) { ++ commonTertiaries ; } else if ( ( tertiaryMask & 0x8000 ) == 0 ) { // Tertiary weights without case bits . // Move lead bytes 06 . . 3F to C6 . . FF for a large common - weight range . if ( commonTertiaries != 0 ) { -- commonTertiaries ; while ( commonTertiaries >= TER_ONLY_COMMON_MAX_COUNT ) { tertiaries . appendByte ( TER_ONLY_COMMON_MIDDLE ) ; commonTertiaries -= TER_ONLY_COMMON_MAX_COUNT ; } int b ; if ( t < Collation . COMMON_WEIGHT16 ) { b = TER_ONLY_COMMON_LOW + commonTertiaries ; } else { b = TER_ONLY_COMMON_HIGH - commonTertiaries ; } tertiaries . appendByte ( b ) ; commonTertiaries = 0 ; } if ( t > Collation . COMMON_WEIGHT16 ) { t += 0xc000 ; } tertiaries . appendWeight16 ( t ) ; } else if ( ( options & CollationSettings . UPPER_FIRST ) == 0 ) { // Tertiary weights with caseFirst = lowerFirst . // Move lead bytes 06 . . BF to 46 . . FF for the common - weight range . if ( commonTertiaries != 0 ) { -- commonTertiaries ; while ( commonTertiaries >= TER_LOWER_FIRST_COMMON_MAX_COUNT ) { tertiaries . appendByte ( TER_LOWER_FIRST_COMMON_MIDDLE ) ; commonTertiaries -= TER_LOWER_FIRST_COMMON_MAX_COUNT ; } int b ; if ( t < Collation . COMMON_WEIGHT16 ) { b = TER_LOWER_FIRST_COMMON_LOW + commonTertiaries ; } else { b = TER_LOWER_FIRST_COMMON_HIGH - commonTertiaries ; } tertiaries . appendByte ( b ) ; commonTertiaries = 0 ; } if ( t > Collation . COMMON_WEIGHT16 ) { t += 0x4000 ; } tertiaries . appendWeight16 ( t ) ; } else { // Tertiary weights with caseFirst = upperFirst . // Do not change the artificial uppercase weight of a tertiary CE ( 0.0 . ut ) , // to keep tertiary CEs well - formed . // Their case + tertiary weights must be greater than those of // primary and secondary CEs . // Separator 01 - > 01 ( unchanged ) // Lowercase 02 . . 04 - > 82 . . 84 ( includes uncased ) // Common weight 05 - > 85 . . C5 ( common - weight compression range ) // Lowercase 06 . . 3F - > C6 . . FF // Mixed case 42 . . 7F - > 42 . . 7F // Uppercase 82 . . BF - > 02 . . 3F // Tertiary CE 86 . . BF - > C6 . . FF if ( t <= Collation . NO_CE_WEIGHT16 ) { // Keep separators unchanged . } else if ( ( lower32 >>> 16 ) != 0 ) { // Invert case bits of primary & secondary CEs . t ^= 0xc000 ; if ( t < ( TER_UPPER_FIRST_COMMON_HIGH << 8 ) ) { t -= 0x4000 ; } } else { // Keep uppercase bits of tertiary CEs . assert ( 0x8600 <= t && t <= 0xbfff ) ; t += 0x4000 ; } if ( commonTertiaries != 0 ) { -- commonTertiaries ; while ( commonTertiaries >= TER_UPPER_FIRST_COMMON_MAX_COUNT ) { tertiaries . appendByte ( TER_UPPER_FIRST_COMMON_MIDDLE ) ; commonTertiaries -= TER_UPPER_FIRST_COMMON_MAX_COUNT ; } int b ; if ( t < ( TER_UPPER_FIRST_COMMON_LOW << 8 ) ) { b = TER_UPPER_FIRST_COMMON_LOW + commonTertiaries ; } else { b = TER_UPPER_FIRST_COMMON_HIGH - commonTertiaries ; } tertiaries . appendByte ( b ) ; commonTertiaries = 0 ; } tertiaries . appendWeight16 ( t ) ; } } if ( ( levels & Collation . QUATERNARY_LEVEL_FLAG ) != 0 ) { int q = lower32 & 0xffff ; if ( ( q & 0xc0 ) == 0 && q > Collation . NO_CE_WEIGHT16 ) { ++ commonQuaternaries ; } else if ( q == Collation . NO_CE_WEIGHT16 && ( options & CollationSettings . ALTERNATE_MASK ) == 0 && quaternaries . isEmpty ( ) ) { // If alternate = non - ignorable and there are only common quaternary weights , // then we need not write anything . // The only weights greater than the merge separator and less than the common // weight // are shifted primary weights , which are not generated for // alternate = non - ignorable . // There are also exactly as many quaternary weights as tertiary weights , // so level length differences are handled already on tertiary level . // Any above - common quaternary weight will compare greater regardless . quaternaries . appendByte ( Collation . LEVEL_SEPARATOR_BYTE ) ; } else { if ( q == Collation . NO_CE_WEIGHT16 ) { q = Collation . LEVEL_SEPARATOR_BYTE ; } else { q = 0xfc + ( ( q >>> 6 ) & 3 ) ; } if ( commonQuaternaries != 0 ) { -- commonQuaternaries ; while ( commonQuaternaries >= QUAT_COMMON_MAX_COUNT ) { quaternaries . appendByte ( QUAT_COMMON_MIDDLE ) ; commonQuaternaries -= QUAT_COMMON_MAX_COUNT ; } int b ; if ( q < QUAT_COMMON_LOW ) { b = QUAT_COMMON_LOW + commonQuaternaries ; } else { b = QUAT_COMMON_HIGH - commonQuaternaries ; } quaternaries . appendByte ( b ) ; commonQuaternaries = 0 ; } quaternaries . appendByte ( q ) ; } } if ( ( lower32 >>> 24 ) == Collation . LEVEL_SEPARATOR_BYTE ) { break ; } // ce = = NO _ CE } // Append the beyond - primary levels . // not used in Java - - boolean ok = true ; if ( ( levels & Collation . SECONDARY_LEVEL_FLAG ) != 0 ) { if ( ! callback . needToWrite ( Collation . SECONDARY_LEVEL ) ) { return ; } // not used in Java - - ok & = secondaries . isOk ( ) ; sink . Append ( Collation . LEVEL_SEPARATOR_BYTE ) ; secondaries . appendTo ( sink ) ; } if ( ( levels & Collation . CASE_LEVEL_FLAG ) != 0 ) { if ( ! callback . needToWrite ( Collation . CASE_LEVEL ) ) { return ; } // not used in Java - - ok & = cases . isOk ( ) ; sink . Append ( Collation . LEVEL_SEPARATOR_BYTE ) ; // Write pairs of nibbles as bytes , except separator bytes as themselves . int length = cases . length ( ) - 1 ; // Ignore the trailing NO _ CE . byte b = 0 ; for ( int i = 0 ; i < length ; ++ i ) { byte c = cases . getAt ( i ) ; assert ( ( c & 0xf ) == 0 && c != 0 ) ; if ( b == 0 ) { b = c ; } else { sink . Append ( b | ( ( c >> 4 ) & 0xf ) ) ; b = 0 ; } } if ( b != 0 ) { sink . Append ( b ) ; } } if ( ( levels & Collation . TERTIARY_LEVEL_FLAG ) != 0 ) { if ( ! callback . needToWrite ( Collation . TERTIARY_LEVEL ) ) { return ; } // not used in Java - - ok & = tertiaries . isOk ( ) ; sink . Append ( Collation . LEVEL_SEPARATOR_BYTE ) ; tertiaries . appendTo ( sink ) ; } if ( ( levels & Collation . QUATERNARY_LEVEL_FLAG ) != 0 ) { if ( ! callback . needToWrite ( Collation . QUATERNARY_LEVEL ) ) { return ; } // not used in Java - - ok & = quaternaries . isOk ( ) ; sink . Append ( Collation . LEVEL_SEPARATOR_BYTE ) ; quaternaries . appendTo ( sink ) ; } // not used in Java - - if ( ! ok | | ! sink . IsOk ( ) ) { // Java porting note : U _ MEMORY _ ALLOCATION _ ERROR is set here in // C implementation . IsOk ( ) in Java always returns true , so this // is a dead code .
public class ServerHandshaker { /** * Fault detected during handshake . */ void handshakeAlert ( byte description ) throws SSLProtocolException { } }
String message = Alerts . alertDescription ( description ) ; if ( debug != null && Debug . isOn ( "handshake" ) ) { System . out . println ( "SSL -- handshake alert: " + message ) ; } /* * It ' s ok to get a no _ certificate alert from a client of which * we * requested * authentication information . * However , if we * required * it , then this is not acceptable . * Anyone calling getPeerCertificates ( ) on the * session will get an SSLPeerUnverifiedException . */ if ( ( description == Alerts . alert_no_certificate ) && ( doClientAuth == SSLEngineImpl . clauth_requested ) ) { return ; } throw new SSLProtocolException ( "handshake alert: " + message ) ;
public class StorageBuilder { /** * Set the app informations repository . This setter must always be called during the build * @ param appInfoRepo The repository to use * @ param appName The application name * @ return The builder */ public StorageBuilder setAppInfoRepository ( AppInfoRepository appInfoRepo , String appName ) { } }
this . appInfoRepo = appInfoRepo ; this . appName = appName ; return this ;
public class ListMeshesRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListMeshesRequest listMeshesRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listMeshesRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listMeshesRequest . getLimit ( ) , LIMIT_BINDING ) ; protocolMarshaller . marshall ( listMeshesRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class PMCImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eUnset ( int featureID ) { } }
switch ( featureID ) { case AfplibPackage . PMC__PM_CID : setPMCid ( PM_CID_EDEFAULT ) ; return ; case AfplibPackage . PMC__TRIPLETS : getTriplets ( ) . clear ( ) ; return ; } super . eUnset ( featureID ) ;
public class MimeTypeUtils { /** * Get an appropriate extension for a MIME type . * @ param mimeType * the String MIME type * @ return the appropriate file name extension , or a default extension if * not found . The extension will not have the leading " . " character . */ public static String fileExtensionForMIMEType ( String mimeType ) { } }
loadMappings ( ) ; String ext = mimeTypeToExtensionMap . get ( mimeType ) ; if ( ext == null ) ext = "dat" ; return ext ;
public class FragmentListController { /** * Returns a model of fragments - - > List < FragmentBean > , sorted by precedence ( default ) or by * fragment name depending on sort parameter , to be rendered by the jsonView . * @ param req the servlet request , bound via SpringWebMVC to GET method invocations of this * controller . * @ param sortParam PRECEDENCE , NAME , or null . * @ return ModelAndView with a List of FragmentBeans to be rendered by the jsonView . * @ throws ServletException on Exception in underlying attempt to get at the fragments * @ throws AuthorizationException if request is for any user other than a Portal Administrator . * @ throws IllegalArgumentException if sort parameter has an unrecognized value */ @ RequestMapping ( method = RequestMethod . GET ) public ModelAndView listFragments ( HttpServletRequest req , @ RequestParam ( value = "sort" , required = false ) String sortParam ) throws ServletException { } }
// Verify that the user is allowed to use this service IPerson user = personManager . getPerson ( req ) ; if ( ! AdminEvaluator . isAdmin ( user ) ) { throw new AuthorizationException ( "User " + user . getUserName ( ) + " not an administrator." ) ; } Map < String , Document > fragmentLayoutMap = null ; if ( userLayoutStore != null ) { try { fragmentLayoutMap = userLayoutStore . getFragmentLayoutCopies ( ) ; } catch ( Exception e ) { String msg = "Failed to access fragment layouts" ; log . error ( msg , e ) ; throw new ServletException ( msg , e ) ; } } List < FragmentBean > fragments = new ArrayList < FragmentBean > ( ) ; for ( FragmentDefinition frag : dlmConfig . getFragments ( ) ) { Document layout = fragmentLayoutMap != null ? fragmentLayoutMap . get ( frag . getOwnerId ( ) ) : null ; List < String > portlets = null ; if ( layout != null ) { portlets = new ArrayList < String > ( ) ; NodeList channelFNames = this . xpathOperations . evaluate ( CHANNEL_FNAME_XPATH , layout , XPathConstants . NODESET ) ; for ( int i = 0 ; i < channelFNames . getLength ( ) ; i ++ ) { String fname = channelFNames . item ( i ) . getTextContent ( ) ; IPortletDefinition pDef = portletRegistry . getPortletDefinitionByFname ( fname ) ; if ( null != pDef ) { portlets . add ( pDef . getTitle ( ) ) ; } } } fragments . add ( FragmentBean . create ( frag , portlets ) ) ; } // Determine & follow sorting preference . . . Sort sort = DEFAULT_SORT ; if ( sortParam != null ) { sort = Sort . valueOf ( sortParam ) ; } Collections . sort ( fragments , sort . getComparator ( ) ) ; return new ModelAndView ( "jsonView" , "fragments" , fragments ) ;
public class EsUtil { /** * Remove any index that doesn ' t have an alias and starts with * the given prefix * @ param prefixes Ignore indexes that have names that don ' t start * with any of these * @ return list of purged indexes * @ throws IndexException */ public List < String > purgeIndexes ( final Set < String > prefixes ) throws IndexException { } }
final Set < IndexInfo > indexes = getIndexInfo ( ) ; final List < String > purged = new ArrayList < > ( ) ; if ( Util . isEmpty ( indexes ) ) { return purged ; } purge : for ( final IndexInfo ii : indexes ) { final String idx = ii . getIndexName ( ) ; if ( ! hasPrefix ( idx , prefixes ) ) { continue purge ; } /* Don ' t delete those pointed to by any aliases */ if ( ! Util . isEmpty ( ii . getAliases ( ) ) ) { continue purge ; } purged . add ( idx ) ; } deleteIndexes ( purged ) ; return purged ;
public class GanttDesignerReader { /** * Read predecessors from a Gantt Designer file . * @ param gantt Gantt Designer file */ private void processPredecessors ( Gantt gantt ) { } }
for ( Gantt . Tasks . Task ganttTask : gantt . getTasks ( ) . getTask ( ) ) { String predecessors = ganttTask . getP ( ) ; if ( predecessors != null && ! predecessors . isEmpty ( ) ) { String wbs = ganttTask . getID ( ) ; Task task = m_taskMap . get ( wbs ) ; for ( String predecessor : predecessors . split ( ";" ) ) { Task predecessorTask = m_projectFile . getTaskByID ( Integer . valueOf ( predecessor ) ) ; task . addPredecessor ( predecessorTask , RelationType . FINISH_START , ganttTask . getL ( ) ) ; } } }
public class HttpServer { /** * Set the min , max number of worker threads ( simultaneous connections ) . */ public void setThreads ( int min , int max ) { } }
QueuedThreadPool pool = ( QueuedThreadPool ) webServer . getThreadPool ( ) ; pool . setMinThreads ( min ) ; pool . setMaxThreads ( max ) ;
public class LongTupleDistanceFunctions { /** * Computes the Chebyshev distance between the given tuples * when they are interpreted as points of a sphere with the specified * size ( that is , circumference ) . * @ param t0 The first tuple * @ param t1 The second tuple * @ param size The size of the sphere * @ return The distance * @ throws IllegalArgumentException If the given tuples do not * have the same { @ link Tuple # getSize ( ) size } */ static long computeWrappedChebyshev ( LongTuple t0 , LongTuple t1 , LongTuple size ) { } }
Utils . checkForEqualSize ( t0 , t1 ) ; long max = 0 ; for ( int i = 0 ; i < t0 . getSize ( ) ; i ++ ) { long d = MathUtils . wrappedDistance ( t0 . get ( i ) , t1 . get ( i ) , size . get ( i ) ) ; max = Math . max ( max , Math . abs ( d ) ) ; } return max ;
public class OmsLeastCostFlowDirections { /** * Checks if the node is ok . * < p > A node is ok if : < / p > * < ul > * < li > if the node is valid ( ! = null in surrounding ) < / li > * < li > if the node has not been processed already ( ! . isMarked ) < / li > * < / ul > */ private boolean nodeOk ( GridNode node ) { } }
return node != null && ! assignedFlowsMap . isMarked ( node . col , node . row ) ;
public class FeatureGen { /** * Next feature . * @ return the feature */ public Feature nextFeature ( ) { } }
Feature f = null ; if ( hasNextSFeature ( ) ) { f = nextSFeature ( ) ; } else if ( hasNextEFeature ( ) ) { f = nextEFeature ( ) ; } else { // do nothing } return f ;
public class FetchRequest { /** * Read a fetch request from buffer ( socket data ) * @ param buffer the buffer data * @ return a fetch request * @ throws IllegalArgumentException while error data format ( no topic ) */ public static FetchRequest readFrom ( ByteBuffer buffer ) { } }
String topic = Utils . readShortString ( buffer ) ; int partition = buffer . getInt ( ) ; long offset = buffer . getLong ( ) ; int size = buffer . getInt ( ) ; return new FetchRequest ( topic , partition , offset , size ) ;
public class EventStreamReaderImpl { /** * If the last call was a checkpoint updates the reader group state to indicate it has completed * and releases segments . * If a checkpoint is pending its identifier is returned . ( The checkpoint will be considered * complete when this is invoked again . ) * Otherwise it checks for any segments that need to be acquired . * Segments can only be released on the next read call following a checkpoint because this is * the only point we can be sure the caller has persisted their position , which is needed to be * sure the segment is located in the position of one of the readers and not left out because it * was moved while the checkpoint was occurring , while at the same time guaranteeing that * another reader will not see events following the ones read by this reader until after they * have been persisted . */ @ GuardedBy ( "readers" ) private String updateGroupStateIfNeeded ( ) throws ReaderNotInReaderGroupException { } }
if ( atCheckpoint != null ) { groupState . checkpoint ( atCheckpoint , getPosition ( ) ) ; releaseSegmentsIfNeeded ( ) ; } String checkpoint = groupState . getCheckpoint ( ) ; if ( checkpoint != null ) { log . info ( "{} at checkpoint {}" , this , checkpoint ) ; if ( groupState . isCheckpointSilent ( checkpoint ) ) { // Checkpoint the reader immediately with the current position . Checkpoint Event is not generated . groupState . checkpoint ( checkpoint , getPosition ( ) ) ; if ( atCheckpoint != null ) { // In case the silent checkpoint held up releasing segments releaseSegmentsIfNeeded ( ) ; atCheckpoint = null ; } return null ; } else { atCheckpoint = checkpoint ; return atCheckpoint ; } } else { atCheckpoint = null ; acquireSegmentsIfNeeded ( ) ; return null ; }
public class AWSLicenseManagerClient { /** * Returns the license configuration for a resource . * @ param listLicenseSpecificationsForResourceRequest * @ return Result of the ListLicenseSpecificationsForResource operation returned by the service . * @ throws InvalidParameterValueException * One or more parameter values are not valid . * @ throws ServerInternalException * The server experienced an internal error . Try again . * @ throws AuthorizationException * The AWS user account does not have permission to perform the action . Check the IAM policy associated with * this account . * @ throws AccessDeniedException * Access to resource denied . * @ throws RateLimitExceededException * Too many requests have been submitted . Try again after a brief wait . * @ sample AWSLicenseManager . ListLicenseSpecificationsForResource * @ see < a * href = " http : / / docs . aws . amazon . com / goto / WebAPI / license - manager - 2018-08-01 / ListLicenseSpecificationsForResource " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ListLicenseSpecificationsForResourceResult listLicenseSpecificationsForResource ( ListLicenseSpecificationsForResourceRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListLicenseSpecificationsForResource ( request ) ;
public class MultigetSliceIterator { /** * This method prepares keys for execution , determines whether to use * parallelism or not to query Cassandra , executes the query and collects the result */ private void runQuery ( ) { } }
if ( this . rowKeysList != null && this . rowKeysList . size ( ) > 0 ) { // Check if there are rowkeys to query Cassandra if ( threadCount > 1 ) { // When thread count greater than 1 enables parallelism , use threads to query Cassandra // multiple times ExecutorService executor = Executors . newFixedThreadPool ( threadCount ) ; List < Future < ? > > futures = new LinkedList < Future < ? > > ( ) ; for ( final List < K > param : this . rowKeysList ) { Future < ? > future = executor . submit ( new Runnable ( ) { public void run ( ) { // Query Cassandra with the input keys provided runMultigetSliceQuery ( param ) ; } } ) ; futures . add ( future ) ; } for ( Future < ? > f : futures ) { // iterate through thread results try { f . get ( ) ; // wait for thread to complete } catch ( InterruptedException e ) { throw new HectorException ( "Failed to retrieve rows from Cassandra." , e ) ; } catch ( ExecutionException e ) { throw new HectorException ( "Failed to retrieve rows from Cassandra." , e ) ; } } // Safe to shutdown the threadpool and release the resources executor . shutdown ( ) ; // set the rowKeysIndex to size of input keys so as no further calls // will be made to Cassandra . // This ensures iterator . hasNext ( ) returns false when all keys are // queried rowKeysIndex = this . rowKeysList . size ( ) ; } else { // When thread count less than or equal to 1 ( 0 or negative ) disables // parallelism , set of ( maxRowCountPerQuery ) keys queries // Cassandra at a time runMultigetSliceQuery ( this . rowKeysList . get ( rowKeysIndex ) ) ; // Increment the rowKeyIndex instead of setting it to this . rowKeysList . size ( ) ; rowKeysIndex ++ ; } } ArrayList < Row < K , N , V > > resultList = new ArrayList < Row < K , N , V > > ( queryResult . size ( ) ) ; synchronized ( queryResult ) { // Ensure that runMultigetSliceQuery ( ) method call updates global // variable queryResult with query result ( if exists ) if ( queryResult != null && queryResult . size ( ) > 0 ) { for ( Rows < K , N , V > rows : queryResult ) { if ( rows != null && rows . getCount ( ) > 0 ) { for ( Row < K , N , V > row : rows ) { // prepare List < Row < K , N , V > > to return // the iterator of < Row < K , N , V > > to the caller resultList . add ( row ) ; } } } } } // assign global iterator with the result of multigetSliceQuery iterator = resultList . iterator ( ) ;