signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class Joining { /** * Returns a { @ code Collector } which behaves like this collector , but uses
* the specified ellipsis { @ code CharSequence } instead of default
* { @ code " . . . " } when the string limit ( if specified ) is reached .
* @ param ellipsis the sequence of characters to be used at the end of the
* joined result to designate that not all of the input elements are
* joined due to the specified string length restriction .
* @ return a new { @ code Collector } which will use the specified ellipsis
* instead of current setting . */
public Joining ellipsis ( CharSequence ellipsis ) { } } | return new Joining ( delimiter , ellipsis . toString ( ) , prefix , suffix , cutStrategy , lenStrategy , maxLength ) ; |
public class JSONObject { /** * Get the long value associated with a key . If the number value is too
* long for a long , it will be clipped .
* @ param key A key string .
* @ return The long value .
* @ throws JSONException if the key is not found or if the value cannot
* be converted to a long . */
public long getLong ( String key ) throws JSONException { } } | Object o = get ( key ) ; return o instanceof Number ? ( ( Number ) o ) . longValue ( ) : ( long ) getDouble ( key ) ; |
public class MultiPath { /** * Appends all paths from another multipath .
* @ param src
* The multipath to append to this multipath .
* @ param bReversePaths
* TRUE if the multipath is added should be added with its paths
* reversed . */
public void add ( MultiPath src , boolean bReversePaths ) { } } | m_impl . add ( ( MultiPathImpl ) src . _getImpl ( ) , bReversePaths ) ; |
public class AbstractConfigFile { /** * Loads xml - content from url / file
* @ throws IOException
* @ throws JAXBException */
public void load ( ) throws IOException , JAXBException { } } | Unmarshaller unmarshaller = jaxbCtx . createUnmarshaller ( ) ; content = ( JAXBElement < T > ) unmarshaller . unmarshal ( url ) ; |
public class QueueContainer { /** * Removes a reserved item with the given { @ code itemId } .
* @ param itemId the ID of the reserved item to be removed
* @ return if an item was reserved with the given { @ code itemId } */
public boolean txnRollbackOfferBackup ( long itemId ) { } } | QueueItem item = txMap . remove ( itemId ) ; if ( item == null ) { logger . warning ( "txnRollbackOffer operation-> No txn item for itemId: " + itemId ) ; return false ; } return true ; |
public class AbstractConnectionListener { /** * { @ inheritDoc } */
public void clearConnections ( ) { } } | if ( Tracer . isEnabled ( ) ) { for ( Object c : connectionHandles ) Tracer . returnConnection ( cm . getPool ( ) . getConfiguration ( ) . getId ( ) , mcp , this , c ) ; } if ( connectionTraces != null ) connectionTraces . clear ( ) ; connectionHandles . clear ( ) ; |
public class Authorizations { /** * Sets the roles assigned to the Account .
* @ param roles the roles assigned to the Account . */
public Authorizations setRoles ( Set < Role > roles ) { } } | this . roles . clear ( ) ; this . aggregatePermissions = null ; addRoles ( roles ) ; return this ; |
public class ConfusionMatrix { /** * Return recall for single label
* @ param label label
* @ return double */
public double getRecallForLabel ( String label ) { } } | int fnAndTp = 0 ; double recall = 0 ; int tp = 0 ; if ( map . containsKey ( label ) && map . get ( label ) . containsKey ( label ) ) { tp = this . map . get ( label ) . get ( label ) ; fnAndTp = getRowSum ( label ) ; } if ( fnAndTp > 0 ) { recall = ( double ) tp / ( double ) ( fnAndTp ) ; } return recall ; |
public class Tile { /** * Defines the number format that will be used to format the value
* in the gauge ( NOT USED AT THE MOMENT )
* @ param FORMAT */
public void setNumberFormat ( final NumberFormat FORMAT ) { } } | if ( null == numberFormat ) { _numberFormat = null == FORMAT ? NumberFormat . getInstance ( getLocale ( ) ) : FORMAT ; fireTileEvent ( RESIZE_EVENT ) ; } else { numberFormat . set ( FORMAT ) ; } |
public class DiscountCurveInterpolation { /** * Create a discount curve from forwards given by a LIBORMonteCarloModel . If the model uses multiple curves , return its discount curve .
* @ param forwardCurveName name of the forward curve .
* @ param model Monte Carlo model providing the forwards .
* @ param startTime time at which the curve starts , i . e . zero time for the curve
* @ return a discount curve from forwards given by a LIBORMonteCarloModel .
* @ throws CalculationException Thrown if the model failed to provide the forward rates . */
public static DiscountCurveInterface createDiscountCurveFromMonteCarloLiborModel ( String forwardCurveName , LIBORModelMonteCarloSimulationModel model , double startTime ) throws CalculationException { } } | // Check if the LMM uses a discount curve which is created from a forward curve
if ( model . getModel ( ) . getDiscountCurve ( ) == null || model . getModel ( ) . getDiscountCurve ( ) . getName ( ) . toLowerCase ( ) . contains ( "DiscountCurveFromForwardCurve" . toLowerCase ( ) ) ) { return new DiscountCurveFromForwardCurve ( ForwardCurveInterpolation . createForwardCurveFromMonteCarloLiborModel ( forwardCurveName , model , startTime ) ) ; } else { // i . e . forward curve of Libor Model not OIS . In this case return the OIS curve .
// Only at startTime 0!
return ( DiscountCurveInterface ) model . getModel ( ) . getDiscountCurve ( ) ; } |
public class ELParser { /** * 149852 - Ignore this generated FFDC because we expect it */
@ FFDCIgnore ( { } } | LookaheadSuccess . class } ) private boolean jj_2_8 ( int xla ) { jj_la = xla ; jj_lastpos = jj_scanpos = token ; try { return ! jj_3_8 ( ) ; } catch ( LookaheadSuccess ls ) { return true ; } finally { jj_save ( 7 , xla ) ; } |
public class ApiOvhCloud { /** * Get planned migrations
* REST : GET / cloud / project / { serviceName } / migration
* @ param serviceName [ required ] Service name
* API beta */
public ArrayList < OvhMigration > project_serviceName_migration_GET ( String serviceName ) throws IOException { } } | String qPath = "/cloud/project/{serviceName}/migration" ; StringBuilder sb = path ( qPath , serviceName ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t10 ) ; |
public class TenantService { /** * Returns true if a tenant with the specified name exists , otherwise false .
* @ since 4.3 */
public boolean nameExists ( final String name ) { } } | boolean rslt = false ; // default
try { final ITenant tenant = this . tenantDao . getTenantByName ( name ) ; rslt = tenant != null ; } catch ( IllegalArgumentException iae ) { // This exception is completely fine ; it simply
// means there is no tenant with this name .
rslt = false ; } return rslt ; |
public class AttachDiskRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( AttachDiskRequest attachDiskRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( attachDiskRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( attachDiskRequest . getDiskName ( ) , DISKNAME_BINDING ) ; protocolMarshaller . marshall ( attachDiskRequest . getInstanceName ( ) , INSTANCENAME_BINDING ) ; protocolMarshaller . marshall ( attachDiskRequest . getDiskPath ( ) , DISKPATH_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class NetworkUtils { /** * Set the learning rate for a single layer in the network to the specified value . Note that if any learning rate
* schedules are currently present , these will be removed in favor of the new ( fixed ) learning rate . < br >
* < br >
* < b > Note < / b > : < i > This method not free from a performance point of view < / i > : a proper learning rate schedule
* should be used in preference to calling this method at every iteration . Note also that
* { @ link # setLearningRate ( MultiLayerNetwork , double ) } should also be used in preference , when all layers need to be set to a new LR
* @ param layerNumber Number of the layer to set the LR for
* @ param newLr New learning rate for a single layers */
public static void setLearningRate ( MultiLayerNetwork net , int layerNumber , double newLr ) { } } | setLearningRate ( net , layerNumber , newLr , null , true ) ; |
public class QEmuIdAllocator { /** * Returns a comma - separated list of new PCI addresses .
* @ param count How many addresses to return .
* @ param separator The separator to use between addresses .
* @ return A separated String of new PCI addresses . */
@ Nonnull public String newPciAddresses ( @ Nonnegative int count , @ Nonnull String separator ) { } } | StringBuilder buf = new StringBuilder ( ) ; for ( int i = 0 ; i < count ; i ++ ) { if ( i > 0 ) buf . append ( separator ) ; buf . append ( newPciAddress ( ) ) ; } return buf . toString ( ) ; |
public class AppServiceEnvironmentsInner { /** * Create or update an App Service Environment .
* Create or update an App Service Environment .
* @ param resourceGroupName Name of the resource group to which the resource belongs .
* @ param name Name of the App Service Environment .
* @ param hostingEnvironmentEnvelope Configuration details of the App Service Environment .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < AppServiceEnvironmentResourceInner > updateAsync ( String resourceGroupName , String name , AppServiceEnvironmentPatchResource hostingEnvironmentEnvelope , final ServiceCallback < AppServiceEnvironmentResourceInner > serviceCallback ) { } } | return ServiceFuture . fromResponse ( updateWithServiceResponseAsync ( resourceGroupName , name , hostingEnvironmentEnvelope ) , serviceCallback ) ; |
public class AbcGrammar { /** * chord - or - text : : = % x22 ( chord / text - expression )
* * ( chord - newline ( chord / text - expression ) ) % x22 < p >
* < tt > " . . " < / tt > */
Rule ChordOrText ( ) { } } | return Sequence ( String ( "\"" ) , OptionalS ( FirstOfS ( Chord ( ) , TextExpression ( ) ) ) , ZeroOrMoreS ( SequenceS ( ChordOrTextNewline ( ) , FirstOfS ( Chord ( ) , TextExpression ( ) ) ) ) , String ( "\"" ) ) . label ( ChordOrText ) ; |
public class CompoundActivity { /** * Adds an activity to the list of sub - activities .
* @ param activity
* the sub - activity to add .
* @ return
* the activity itself , to enable method chaining . */
public CompoundActivity addActivity ( Activity activity ) { } } | if ( activity != null ) { logger . trace ( "adding sub-activity '{}' to list" , activity . getId ( ) ) ; activities . add ( activity ) ; } return this ; |
public class PdfDocument { /** * Implements an action in an area .
* @ param action the < CODE > PdfAction < / CODE >
* @ param llx the lower left x corner of the activation area
* @ param lly the lower left y corner of the activation area
* @ param urx the upper right x corner of the activation area
* @ param ury the upper right y corner of the activation area */
void setAction ( PdfAction action , float llx , float lly , float urx , float ury ) { } } | addAnnotation ( new PdfAnnotation ( writer , llx , lly , urx , ury , action ) ) ; |
public class ClientProxyImpl { /** * For JDK 9 + , we could use MethodHandles . privateLookupIn , which is not
* available in JDK 8. */
private static Object invokeDefaultMethodUsingPrivateLookup ( Class < ? > declaringClass , Object o , Method m , Object [ ] params ) throws WrappedException , NoSuchMethodException { } } | try { final Method privateLookup = MethodHandles . class . getDeclaredMethod ( "privateLookupIn" , Class . class , MethodHandles . Lookup . class ) ; return ( ( MethodHandles . Lookup ) privateLookup . invoke ( null , declaringClass , MethodHandles . lookup ( ) ) ) . unreflectSpecial ( m , declaringClass ) . bindTo ( o ) . invokeWithArguments ( params ) ; } catch ( NoSuchMethodException t ) { throw t ; } catch ( Throwable t ) { throw new WrappedException ( t ) ; } |
public class MetricResolver { /** * http : / / bugs . java . com / view _ bug . do ? bug _ id = 6294399 */
private String memberName ( Member member ) { } } | if ( member instanceof Constructor ) return member . getDeclaringClass ( ) . getSimpleName ( ) ; else return member . getName ( ) ; |
public class CheerleaderClient { /** * Define the log policy .
* Note : some log configuration can increase memory foot print and / or reduce the performance .
* Use them with caution .
* { @ link CheerleaderClient # LOG _ NONE }
* { @ link CheerleaderClient # LOG _ RETROFIT }
* { @ link CheerleaderClient # LOG _ OFFLINER }
* Different log policies can be combine :
* < pre >
* simpleSoundCloud . setLog ( SimpleSoundCloud . LOG _ OFFLINER | SimpleSoundCloud . LOG _ RETROFIT ) ;
* < / pre >
* @ param logLevel log policy . */
private void setLog ( int logLevel ) { } } | checkState ( ) ; if ( ( logLevel & LOG_RETROFIT ) != 0 ) { mHttpLoggingInterceptor . setLevel ( HttpLoggingInterceptor . Level . BODY ) ; } else { mHttpLoggingInterceptor . setLevel ( HttpLoggingInterceptor . Level . NONE ) ; } mOffliner . debug ( ( logLevel & LOG_OFFLINER ) != 0 ) ; |
public class AbstractOperationHandler { /** * Obtains a map of resource Attributes .
* @ param context
* the message context
* @ return map of resource Attributes
* @ throws OperationHandlerException
* @ throws URISyntaxException */
protected Map < URI , AttributeValue > getResources ( SOAPMessageContext context ) throws OperationHandlerException , URISyntaxException { } } | Object oMap = null ; String pid = null ; try { oMap = getSOAPRequestObjects ( context ) ; logger . debug ( "Retrieved SOAP Request Objects" ) ; } catch ( SoapFault af ) { logger . error ( "Error obtaining SOAP Request Objects" , af ) ; throw new OperationHandlerException ( "Error obtaining SOAP Request Objects" , af ) ; } try { pid = ( String ) callGetter ( "getPid" , oMap ) ; } catch ( Exception e ) { logger . error ( "Error obtaining parameters" , e ) ; throw new OperationHandlerException ( "Error obtaining parameters." , e ) ; } Map < URI , AttributeValue > resAttr = ResourceAttributes . getResources ( pid ) ; logger . debug ( "Extracted SOAP Request Objects" ) ; return resAttr ; |
public class DescribeByoipCidrsResult { /** * Information about your address ranges .
* @ return Information about your address ranges . */
public java . util . List < ByoipCidr > getByoipCidrs ( ) { } } | if ( byoipCidrs == null ) { byoipCidrs = new com . amazonaws . internal . SdkInternalList < ByoipCidr > ( ) ; } return byoipCidrs ; |
public class ZipFileHandleImpl { /** * Answer an input stream for an entry of a zip file . When the entry is a
* class entry which has 8K or fewer bytes , read all of the entry bytes immediately
* and cache the bytes in this handle . Subsequent input stream requests which
* locate cached bytes will answer a stream on those bytes .
* @ param useZipFile The zip file for which to answer an input stream
* @ param zipEntry The zip entry for which to answer the input stream .
* @ return An input stream on the bytes of the entry . Null for an directory
* type entry , or an entry which has zero bytes .
* @ throws IOException Thrown if the entry bytes could not be read . */
@ Override @ Trivial public InputStream getInputStream ( ZipFile useZipFile , ZipEntry zipEntry ) throws IOException { } } | String methodName = "getInputStream" ; String entryName = zipEntry . getName ( ) ; if ( zipEntry . isDirectory ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { debug ( methodName , "Entry [ " + entryName + " ] [ null ] (Not using cache: Directory entry)" ) ; } return null ; } long entrySize = zipEntry . getSize ( ) ; if ( entrySize == 0 ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { debug ( methodName , "Entry [ " + entryName + " ] [ empty stream ] (Not using cache: Empty entry)" ) ; } return EMPTY_STREAM ; } boolean doNotCache ; String doNotCacheReason ; if ( zipEntries == null ) { // No entry cache .
doNotCache = true ; doNotCacheReason = "Do not cache: Entry cache disabled" ; } else if ( entrySize > ZipCachingProperties . ZIP_CACHE_ENTRY_LIMIT ) { // Too big for the cache
doNotCache = true ; doNotCacheReason = "Do not cache: Too big" ; } else if ( entryName . equals ( "META-INF/MANIFEST.MF" ) ) { doNotCache = false ; doNotCacheReason = "Cache META-INF/MANIFEST.MF" ; } else if ( entryName . endsWith ( ".class" ) ) { doNotCache = false ; doNotCacheReason = "Cache .class resources" ; } else { doNotCache = true ; doNotCacheReason = "Do not cache: Not manifest or class resource" ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { debug ( methodName , "Entry [ " + entryName + " ] [ non-null ] [ " + doNotCacheReason + " ]" ) ; } if ( doNotCache ) { return useZipFile . getInputStream ( zipEntry ) ; // throws IOException
} // The addition of " : : : " * seems * to allow for non - unique cache keys . Duplicate
// keys * are not * possible because the CRC and last - modified values are numeric .
// Duplicate keys would be possible of the CRC or last - modified values , when
// converted to strings , could contain " : : : " character sequences .
String entryCacheKey = entryName + ":::" + Long . toString ( zipEntry . getCrc ( ) ) + ":::" + Long . toString ( getLastModified ( ) ) ; // Note that only the individual gets and puts are protected .
// That means that simultaneous get misses are possible , which
// will result in double reads and double puts .
// That is unfortunate , but is harmless .
// The simultaneous puts are allowed because they should be very
// rare .
// They are allowed because blocking entry gets while waiting for
// reads could create large delays .
byte [ ] entryBytes ; synchronized ( zipEntriesLock ) { entryBytes = zipEntries . get ( entryCacheKey ) ; } if ( entryBytes == null ) { InputStream inputStream = useZipFile . getInputStream ( zipEntry ) ; // throws IOException
try { entryBytes = read ( inputStream , ( int ) entrySize , entryName ) ; // throws IOException
} finally { inputStream . close ( ) ; // throws IOException
} synchronized ( zipEntriesLock ) { zipEntries . put ( entryCacheKey , entryBytes ) ; } } return new ByteArrayInputStream ( entryBytes ) ; |
public class BitReserve { /** * Rewind N bytes in Stream . */
public void rewindNbytes ( int N ) { } } | int bits = ( N << 3 ) ; totbit -= bits ; buf_byte_idx -= bits ; if ( buf_byte_idx < 0 ) buf_byte_idx += BUFSIZE ; |
public class HCSWFObject { /** * Add a < code > param < / code > tag to the created < code > object < / code > tag
* @ param sName
* Parameter name
* @ param sValue
* Parameter value
* @ return this */
@ Nonnull public final HCSWFObject addObjectParam ( @ Nonnull final String sName , final String sValue ) { } } | if ( ! JSMarshaller . isJSIdentifier ( sName ) ) throw new IllegalArgumentException ( "The name '" + sName + "' is not a legal JS identifier!" ) ; if ( m_aObjectParams == null ) m_aObjectParams = new CommonsLinkedHashMap < > ( ) ; m_aObjectParams . put ( sName , sValue ) ; return this ; |
public class DevicesManagementApi { /** * Deletes a device & # 39 ; s properties .
* Deletes a device & # 39 ; s properties .
* @ param did Device ID . ( required )
* @ return MetadataEnvelope
* @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */
public MetadataEnvelope deleteServerProperties ( String did ) throws ApiException { } } | ApiResponse < MetadataEnvelope > resp = deleteServerPropertiesWithHttpInfo ( did ) ; return resp . getData ( ) ; |
public class QrcodeAPI { /** * 下载二维码
* @ param ticket 内部自动 UrlEncode
* @ return BufferedImage */
public static BufferedImage showqrcode ( String ticket ) { } } | HttpUriRequest httpUriRequest = RequestBuilder . get ( ) . setUri ( MP_URI + "/cgi-bin/showqrcode" ) . addParameter ( "ticket" , ticket ) . build ( ) ; CloseableHttpResponse httpResponse = LocalHttpClient . execute ( httpUriRequest ) ; return getImage ( httpResponse ) ; |
public class LoadBalancersInner { /** * Creates or updates a load balancer .
* @ param resourceGroupName The name of the resource group .
* @ param loadBalancerName The name of the load balancer .
* @ param parameters Parameters supplied to the create or update load balancer operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable for the request */
public Observable < LoadBalancerInner > createOrUpdateAsync ( String resourceGroupName , String loadBalancerName , LoadBalancerInner parameters ) { } } | return createOrUpdateWithServiceResponseAsync ( resourceGroupName , loadBalancerName , parameters ) . map ( new Func1 < ServiceResponse < LoadBalancerInner > , LoadBalancerInner > ( ) { @ Override public LoadBalancerInner call ( ServiceResponse < LoadBalancerInner > response ) { return response . body ( ) ; } } ) ; |
public class WorkerNetAddress { /** * < code > optional string domainSocketPath = 5 ; < / code > */
public java . lang . String getDomainSocketPath ( ) { } } | java . lang . Object ref = domainSocketPath_ ; if ( ref instanceof java . lang . String ) { return ( java . lang . String ) ref ; } else { com . google . protobuf . ByteString bs = ( com . google . protobuf . ByteString ) ref ; java . lang . String s = bs . toStringUtf8 ( ) ; if ( bs . isValidUtf8 ( ) ) { domainSocketPath_ = s ; } return s ; } |
public class ICUHumanize { /** * Wraps the given operation on a context with the specified locale .
* @ param operation
* Operation to be performed
* @ param locale
* Target locale
* @ return Result of the operation */
private static < T > T withinLocale ( final Callable < T > operation , final Locale locale ) { } } | DefaultICUContext ctx = context . get ( ) ; Locale oldLocale = ctx . getLocale ( ) ; try { ctx . setLocale ( locale ) ; return operation . call ( ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } finally { ctx . setLocale ( oldLocale ) ; context . set ( ctx ) ; } |
public class BuiltinProducersConfig { /** * this method is for unit - test . */
public void disableAll ( ) { } } | javaMemoryProducers = javaMemoryPoolProducers = javaThreadingProducers = osProducer = runtimeProducer = mbeanProducers = gcProducer = errorProducer = false ; |
public class CPDefinitionOptionValueRelUtil { /** * Returns the first cp definition option value rel in the ordered set where key = & # 63 ; .
* @ param key the key
* @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > )
* @ return the first matching cp definition option value rel , or < code > null < / code > if a matching cp definition option value rel could not be found */
public static CPDefinitionOptionValueRel fetchByKey_First ( String key , OrderByComparator < CPDefinitionOptionValueRel > orderByComparator ) { } } | return getPersistence ( ) . fetchByKey_First ( key , orderByComparator ) ; |
public class EventHandler { /** * Function to remove old Snowflake Dump files to make room for new ones .
* @ param deleteOldest if true , always deletes the oldest file found if max
* number of dump files has been reached */
protected void cleanupSfDumps ( boolean deleteOldest ) { } } | // Check what the maximum number of dumpfiles and the max allowable
// aggregate dump file size is .
int maxDumpFiles = System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) ) : DEFAULT_MAX_DUMP_FILES ; int maxDumpDirSizeMB = System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) ) : DEFAULT_MAX_DUMPDIR_SIZE_MB ; File dumpDir = new File ( logDumpPathPrefix ) ; long dirSizeBytes = 0 ; if ( dumpDir . listFiles ( ) == null ) { return ; } // Keep a sorted list of files by size as we go in case we need to
// delete some
TreeSet < File > fileList = new TreeSet < > ( new Comparator < File > ( ) { @ Override public int compare ( File a , File b ) { return a . length ( ) < b . length ( ) ? - 1 : 1 ; } } ) ; // Loop over files in this directory and get rid of old ones
// while accumulating the total size
for ( File file : dumpDir . listFiles ( ) ) { if ( ( ! file . getName ( ) . startsWith ( LOG_DUMP_FILE_NAME ) && ! file . getName ( ) . startsWith ( IncidentUtil . INC_DUMP_FILE_NAME ) ) || ( System . currentTimeMillis ( ) - file . lastModified ( ) > FILE_EXPN_TIME_MS && file . delete ( ) ) ) { continue ; } dirSizeBytes += file . length ( ) ; fileList . add ( file ) ; } // If we ' re exceeding our max allotted disk usage , cut some stuff out ;
// else if we need to make space for a new dump delete the oldest .
if ( dirSizeBytes >= ( ( long ) maxDumpDirSizeMB << 20 ) ) { // While we take up more than half the allotted disk usage , keep deleting .
for ( File file : fileList ) { if ( dirSizeBytes < ( ( long ) maxDumpDirSizeMB << 19 ) ) { break ; } long victimSize = file . length ( ) ; if ( file . delete ( ) ) { dirSizeBytes -= victimSize ; } } } else if ( deleteOldest && fileList . size ( ) >= maxDumpFiles ) { fileList . first ( ) . delete ( ) ; } |
public class AbstractBlockBasedDataStore { /** * Run an integrity check on the store files and fix them as necessary
* @ throws DataStoreException if the files could not be fixed */
protected void integrityCheck ( ) throws DataStoreException { } } | try { // 1 - Check files sizes
// - - Allocation table
long atFileSize = allocationTableRandomAccessFile . length ( ) ; if ( atFileSize < AT_HEADER_SIZE + AT_BLOCK_SIZE ) /* Should have at least one entry */
throw new DataStoreException ( "Allocation table is truncated : " + allocationTableFile . getAbsolutePath ( ) ) ; // Read some header fields
FileInputStream inFile = new FileInputStream ( allocationTableFile ) ; DataInputStream in = new DataInputStream ( new BufferedInputStream ( inFile , 16384 ) ) ; int blockCount = in . readInt ( ) ; int blockSize = in . readInt ( ) ; int firstBlock = in . readInt ( ) ; // Fix AT size
long expectedATFileSize = AT_HEADER_SIZE + AT_BLOCK_SIZE * ( long ) blockCount ; if ( atFileSize != expectedATFileSize ) { log . error ( "[" + descriptor . getName ( ) + "] Allocation table has an invalid size (actual:" + atFileSize + ",expected:" + expectedATFileSize + "), fixing." ) ; allocationTableRandomAccessFile . setLength ( expectedATFileSize ) ; } // Fix data size
long dataFileSize = dataRandomAccessFile . length ( ) ; long expectedDataFileSize = ( long ) blockSize * blockCount ; if ( dataFileSize != expectedDataFileSize ) { log . error ( "[" + descriptor . getName ( ) + "] Data file has an invalid size (actual:" + dataFileSize + ",expected:" + expectedDataFileSize + "), fixing." ) ; dataRandomAccessFile . setLength ( expectedDataFileSize ) ; } // 2 - Check allocation table
// Read the AT into memory
byte [ ] flags = new byte [ blockCount ] ; int [ ] allocatedSize = new int [ blockCount ] ; int [ ] previousBlock = new int [ blockCount ] ; int [ ] nextBlock = new int [ blockCount ] ; int blocksInUse = 0 ; int msgCount = 0 ; for ( int n = 0 ; n < blockCount ; n ++ ) { flags [ n ] = in . readByte ( ) ; allocatedSize [ n ] = in . readInt ( ) ; previousBlock [ n ] = in . readInt ( ) ; nextBlock [ n ] = in . readInt ( ) ; if ( allocatedSize [ n ] != - 1 ) { blocksInUse ++ ; if ( ( flags [ n ] & FLAG_START_BLOCK ) > 0 ) msgCount ++ ; } } in . close ( ) ; log . debug ( "[" + descriptor . getName ( ) + "] Blocks in use before fix : " + blocksInUse ) ; log . debug ( "[" + descriptor . getName ( ) + "] Messages count before fix : " + msgCount ) ; // Fix first block index
boolean changed = false ; if ( firstBlock < - 1 || firstBlock >= blockCount ) { log . error ( "[" + descriptor . getName ( ) + "] Invalid allocation table first block index (" + firstBlock + "), guessing new one ..." ) ; firstBlock = guessFirstBlockIndex ( blockCount , allocatedSize , nextBlock ) ; log . debug ( "[" + descriptor . getName ( ) + "] Guessed first block index : " + firstBlock ) ; changed = true ; } // Recover table
if ( msgCount == 0 ) { if ( firstBlock == - 1 ) { // Table is empty , cleanup dirty entries
changed = changed || cleanupEmptyBlocks ( blockCount , flags , allocatedSize , previousBlock , nextBlock ) ; } else { log . error ( "[" + descriptor . getName ( ) + "] First block index should be -1, clearing ..." ) ; firstBlock = - 1 ; changed = true ; } } else { if ( firstBlock == - 1 ) { log . error ( "[" + descriptor . getName ( ) + "] Invalid first block index, guessing value ..." ) ; firstBlock = guessFirstBlockIndex ( blockCount , allocatedSize , nextBlock ) ; log . debug ( "[" + descriptor . getName ( ) + "] Guessed first block index : " + firstBlock ) ; changed = true ; } changed = changed || fixBlocks ( blockCount , blockSize , firstBlock , flags , allocatedSize , previousBlock , nextBlock ) ; changed = changed || cleanupEmptyBlocks ( blockCount , flags , allocatedSize , previousBlock , nextBlock ) ; } // Update the allocation file table
if ( changed ) { // Re - compute size
msgCount = 0 ; blocksInUse = 0 ; for ( int n = 0 ; n < blockCount ; n ++ ) { if ( allocatedSize [ n ] != - 1 ) { blocksInUse ++ ; if ( ( flags [ n ] & FLAG_START_BLOCK ) > 0 ) msgCount ++ ; } } log . debug ( "[" + descriptor . getName ( ) + "] Blocks in use after fix : " + blocksInUse ) ; log . debug ( "[" + descriptor . getName ( ) + "] Messages count after fix : " + msgCount ) ; log . debug ( "[" + descriptor . getName ( ) + "] Allocation table was altered, saving ..." ) ; allocationTableRandomAccessFile . seek ( AT_HEADER_FIRSTBLOCK_OFFSET ) ; allocationTableRandomAccessFile . writeInt ( firstBlock ) ; for ( int n = 0 ; n < blockCount ; n ++ ) { byte [ ] allocationBlock = new byte [ AT_BLOCK_SIZE ] ; // Regroup I / O to improve performance
allocationBlock [ AB_FLAGS_OFFSET ] = flags [ n ] ; allocationBlock [ AB_ALLOCSIZE_OFFSET ] = ( byte ) ( ( allocatedSize [ n ] >>> 24 ) & 0xFF ) ; allocationBlock [ AB_ALLOCSIZE_OFFSET + 1 ] = ( byte ) ( ( allocatedSize [ n ] >>> 16 ) & 0xFF ) ; allocationBlock [ AB_ALLOCSIZE_OFFSET + 2 ] = ( byte ) ( ( allocatedSize [ n ] >>> 8 ) & 0xFF ) ; allocationBlock [ AB_ALLOCSIZE_OFFSET + 3 ] = ( byte ) ( ( allocatedSize [ n ] >>> 0 ) & 0xFF ) ; allocationBlock [ AB_PREVBLOCK_OFFSET ] = ( byte ) ( ( previousBlock [ n ] >>> 24 ) & 0xFF ) ; allocationBlock [ AB_PREVBLOCK_OFFSET + 1 ] = ( byte ) ( ( previousBlock [ n ] >>> 16 ) & 0xFF ) ; allocationBlock [ AB_PREVBLOCK_OFFSET + 2 ] = ( byte ) ( ( previousBlock [ n ] >>> 8 ) & 0xFF ) ; allocationBlock [ AB_PREVBLOCK_OFFSET + 3 ] = ( byte ) ( ( previousBlock [ n ] >>> 0 ) & 0xFF ) ; allocationBlock [ AB_NEXTBLOCK_OFFSET ] = ( byte ) ( ( nextBlock [ n ] >>> 24 ) & 0xFF ) ; allocationBlock [ AB_NEXTBLOCK_OFFSET + 1 ] = ( byte ) ( ( nextBlock [ n ] >>> 16 ) & 0xFF ) ; allocationBlock [ AB_NEXTBLOCK_OFFSET + 2 ] = ( byte ) ( ( nextBlock [ n ] >>> 8 ) & 0xFF ) ; allocationBlock [ AB_NEXTBLOCK_OFFSET + 3 ] = ( byte ) ( ( nextBlock [ n ] >>> 0 ) & 0xFF ) ; allocationTableRandomAccessFile . seek ( AT_HEADER_SIZE + n * AT_BLOCK_SIZE ) ; allocationTableRandomAccessFile . write ( allocationBlock ) ; } allocationTableRandomAccessFile . getFD ( ) . sync ( ) ; } else log . debug ( "[" + descriptor . getName ( ) + "] Allocation table was not altered" ) ; } catch ( IOException e ) { throw new DataStoreException ( "Cannot check/fix store integrity : " + e ) ; } |
public class TileSet { /** * Creates a { @ link Tile } object from this tileset corresponding to the specified tile id and
* returns that tile . A null tile will never be returned , but one with an error image may be
* returned if a problem occurs loading the underlying tileset image .
* @ param tileIndex the index of the tile in the tileset . Tile indexes start with zero as the
* upper left tile and increase by one as the tiles move left to right and top to bottom over
* the source image .
* @ param rizer an entity that will be used to obtain colorizations for tilesets that are
* recolorizable . Passing null if the tileset is known not to be recolorizable is valid .
* @ return the tile object . */
public Tile getTile ( int tileIndex , Colorizer rizer ) { } } | return getTile ( tileIndex , getColorizations ( tileIndex , rizer ) ) ; |
public class ConnectorImpl { /** * { @ inheritDoc } */
public List < String > getRequiredWorkContexts ( ) { } } | List < String > result = new ArrayList < String > ( requiredWorkContexts . size ( ) ) ; for ( XsdString wc : requiredWorkContexts ) result . add ( wc . getValue ( ) ) ; return Collections . unmodifiableList ( result ) ; |
public class ClassGraph { /** * Annotate an field / argument with its type t */
private String typeAnnotation ( Options opt , Type t ) { } } | if ( t . typeName ( ) . equals ( "void" ) ) return "" ; return " : " + type ( opt , t , false ) + t . dimension ( ) ; |
public class Session { /** * Closes this Session . */
@ Override public synchronized void close ( ) { } } | if ( isClosed ) { return ; } rollback ( false ) ; try { database . logger . writeToLog ( this , Tokens . T_DISCONNECT ) ; } catch ( HsqlException e ) { } sessionData . closeAllNavigators ( ) ; sessionData . persistentStoreCollection . clearAllTables ( ) ; sessionData . closeResultCache ( ) ; database . compiledStatementManager . removeSession ( sessionId ) ; database . sessionManager . removeSession ( this ) ; database . closeIfLast ( ) ; database = null ; user = null ; rowActionList = null ; sessionContext . savepoints = null ; intConnection = null ; sessionContext = null ; lastIdentity = null ; isClosed = true ; |
public class AbstractMaterialDialogBuilder { /** * Obtains the boolean value , which specified whether the dialog should be shown fullscreen , or
* not , from a specific theme .
* @ param themeResourceId
* The resource id of the theme , the boolean value should be obtained from , as an { @ link
* Integer } value */
private void obtainFullscreen ( @ StyleRes final int themeResourceId ) { } } | TypedArray typedArray = getContext ( ) . getTheme ( ) . obtainStyledAttributes ( themeResourceId , new int [ ] { R . attr . materialDialogFullscreen } ) ; setFullscreen ( typedArray . getBoolean ( 0 , false ) ) ; |
public class TransactionIdManager { /** * Given a transaction id , return the time of its creation
* by examining the embedded timestamp .
* @ param txnId The transaction id value to examine .
* @ return The Date object representing the time this transaction
* id was created . */
public static Date getDateFromTransactionId ( long txnId ) { } } | long time = txnId >> ( COUNTER_BITS + INITIATORID_BITS ) ; time += VOLT_EPOCH ; return new Date ( time ) ; |
public class CommerceShipmentItemPersistenceImpl { /** * Returns all the commerce shipment items where commerceShipmentId = & # 63 ; .
* @ param commerceShipmentId the commerce shipment ID
* @ return the matching commerce shipment items */
@ Override public List < CommerceShipmentItem > findByCommerceShipment ( long commerceShipmentId ) { } } | return findByCommerceShipment ( commerceShipmentId , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ; |
public class AWSSdkClient { /** * Get list of EC2 { @ link Instance } s for a auto scaling group
* @ param groupName Auto scaling group name
* @ param status Instance status ( eg . running )
* @ return List of EC2 instances found for the input auto scaling group */
public List < Instance > getInstancesForGroup ( String groupName , String status ) { } } | final AmazonEC2 amazonEC2 = getEc2Client ( ) ; final DescribeInstancesResult instancesResult = amazonEC2 . describeInstances ( new DescribeInstancesRequest ( ) . withFilters ( new Filter ( ) . withName ( "tag:aws:autoscaling:groupName" ) . withValues ( groupName ) ) ) ; final List < Instance > instances = new ArrayList < > ( ) ; for ( Reservation reservation : instancesResult . getReservations ( ) ) { for ( Instance instance : reservation . getInstances ( ) ) { if ( null == status || null == instance . getState ( ) || status . equals ( instance . getState ( ) . getName ( ) ) ) { instances . add ( instance ) ; LOGGER . info ( "Found instance: " + instance + " which qualified filter: " + status ) ; } else { LOGGER . info ( "Found instance: " + instance + " but did not qualify for filter: " + status ) ; } } } return instances ; |
public class FirefoxBinary { /** * Locates the firefox binary from a system property . Will throw an exception if the binary cannot
* be found . */
private static Executable locateFirefoxBinaryFromSystemProperty ( ) { } } | String binaryName = System . getProperty ( FirefoxDriver . SystemProperty . BROWSER_BINARY ) ; if ( binaryName == null ) return null ; File binary = new File ( binaryName ) ; if ( binary . exists ( ) && ! binary . isDirectory ( ) ) return new Executable ( binary ) ; Platform current = Platform . getCurrent ( ) ; if ( current . is ( WINDOWS ) ) { if ( ! binaryName . endsWith ( ".exe" ) ) { binaryName += ".exe" ; } } else if ( current . is ( MAC ) ) { if ( ! binaryName . endsWith ( ".app" ) ) { binaryName += ".app" ; } binaryName += "/Contents/MacOS/firefox-bin" ; } binary = new File ( binaryName ) ; if ( binary . exists ( ) ) return new Executable ( binary ) ; throw new WebDriverException ( String . format ( "'%s' property set, but unable to locate the requested binary: %s" , FirefoxDriver . SystemProperty . BROWSER_BINARY , binaryName ) ) ; |
public class SourceMapResolver { /** * For a given / / # sourceMappingUrl , this locates the appropriate sourcemap on disk . This is use
* for sourcemap merging ( - - apply _ input _ source _ maps ) and for error resolution .
* @ param parseInlineSourceMaps Whether to parse Base64 encoded source maps . */
static SourceFile extractSourceMap ( SourceFile jsFile , String sourceMapURL , boolean parseInlineSourceMaps ) { } } | if ( parseInlineSourceMaps && sourceMapURL . startsWith ( BASE64_URL_PREFIX ) ) { String extractedString = extractBase64String ( sourceMapURL ) ; if ( extractedString != null ) { return SourceFile . fromCode ( jsFile . getName ( ) + ".inline.map" , extractedString ) ; } return null ; } // TODO ( tdeegan ) : Handle absolute urls here . The compiler needs to come up with a scheme for
// properly resolving absolute urls from http : / / or the root / some / abs / path / . . . See b / 62544959.
if ( isAbsolute ( sourceMapURL ) ) { return null ; } // If not absolute , its relative .
// TODO ( tdeegan ) : Handle urls relative to / / # sourceURL . See the sourcemap spec .
return getRelativePath ( jsFile . getName ( ) , sourceMapURL ) ; |
public class CPFriendlyURLEntryUtil { /** * Returns the number of cp friendly url entries where groupId = & # 63 ; and classNameId = & # 63 ; and languageId = & # 63 ; and urlTitle = & # 63 ; .
* @ param groupId the group ID
* @ param classNameId the class name ID
* @ param languageId the language ID
* @ param urlTitle the url title
* @ return the number of matching cp friendly url entries */
public static int countByG_C_L_U ( long groupId , long classNameId , String languageId , String urlTitle ) { } } | return getPersistence ( ) . countByG_C_L_U ( groupId , classNameId , languageId , urlTitle ) ; |
public class PortletCookieServiceImpl { /** * Locate the existing { @ link IPortalCookie } with the request , or create a new one .
* @ param request
* @ return the { @ link IPortalCookie } - never null */
protected IPortalCookie getOrCreatePortalCookie ( HttpServletRequest request ) { } } | IPortalCookie result = null ; // first check in request
final Cookie cookie = this . getCookieFromRequest ( this . cookieName , request ) ; if ( cookie != null ) { // found a potential cookie , call off to the dao
final String value = cookie . getValue ( ) ; result = this . portletCookieDao . getPortalCookie ( value ) ; } // still null ? check in the session
if ( result == null ) { result = locatePortalCookieInSession ( request . getSession ( ) ) ; } // if by this point we still haven ' t found the portal cookie , create one
if ( result == null ) { result = this . portletCookieDao . createPortalCookie ( this . maxAge ) ; // store the portal cookie value value in the session
HttpSession session = request . getSession ( ) ; synchronized ( WebUtils . getSessionMutex ( session ) ) { session . setAttribute ( SESSION_ATTRIBUTE__PORTAL_COOKIE_ID , result . getValue ( ) ) ; } } return result ; |
public class CreateCertificateFromCsrRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( CreateCertificateFromCsrRequest createCertificateFromCsrRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( createCertificateFromCsrRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createCertificateFromCsrRequest . getCertificateSigningRequest ( ) , CERTIFICATESIGNINGREQUEST_BINDING ) ; protocolMarshaller . marshall ( createCertificateFromCsrRequest . getSetAsActive ( ) , SETASACTIVE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class DefaultEmailModel { /** * sets the reply to address
* @ param address a valid email address
* @ param personal the real world name of the sender ( can be null )
* @ throws AddressException in case of an invalid email address */
public void setReplyTo ( String address , String personal ) throws AddressException { } } | replyTo = toInternetAddress ( address , personal ) ; |
public class RegionInstanceGroupManagerClient { /** * Sets the instance template to use when creating new instances or recreating instances in this
* group . Existing instances are not affected .
* < p > Sample code :
* < pre > < code >
* try ( RegionInstanceGroupManagerClient regionInstanceGroupManagerClient = RegionInstanceGroupManagerClient . create ( ) ) {
* ProjectRegionInstanceGroupManagerName instanceGroupManager = ProjectRegionInstanceGroupManagerName . of ( " [ PROJECT ] " , " [ REGION ] " , " [ INSTANCE _ GROUP _ MANAGER ] " ) ;
* RegionInstanceGroupManagersSetTemplateRequest regionInstanceGroupManagersSetTemplateRequestResource = RegionInstanceGroupManagersSetTemplateRequest . newBuilder ( ) . build ( ) ;
* Operation response = regionInstanceGroupManagerClient . setInstanceTemplateRegionInstanceGroupManager ( instanceGroupManager , regionInstanceGroupManagersSetTemplateRequestResource ) ;
* < / code > < / pre >
* @ param instanceGroupManager The name of the managed instance group .
* @ param regionInstanceGroupManagersSetTemplateRequestResource
* @ throws com . google . api . gax . rpc . ApiException if the remote call fails */
@ BetaApi public final Operation setInstanceTemplateRegionInstanceGroupManager ( ProjectRegionInstanceGroupManagerName instanceGroupManager , RegionInstanceGroupManagersSetTemplateRequest regionInstanceGroupManagersSetTemplateRequestResource ) { } } | SetInstanceTemplateRegionInstanceGroupManagerHttpRequest request = SetInstanceTemplateRegionInstanceGroupManagerHttpRequest . newBuilder ( ) . setInstanceGroupManager ( instanceGroupManager == null ? null : instanceGroupManager . toString ( ) ) . setRegionInstanceGroupManagersSetTemplateRequestResource ( regionInstanceGroupManagersSetTemplateRequestResource ) . build ( ) ; return setInstanceTemplateRegionInstanceGroupManager ( request ) ; |
public class ImageUtil { /** * Appends rightImage to the right side of leftImage .
* The resulting image type is one of leftImage .
* @ param leftImage
* first image , must not be null
* @ param rightImage
* second image that is appended to the right side of leftImage ,
* must not be null
* @ return appended image */
public static BufferedImage appendImages ( BufferedImage leftImage , BufferedImage rightImage ) { } } | Preconditions . checkNotNull ( leftImage ) ; Preconditions . checkNotNull ( rightImage ) ; int width = leftImage . getWidth ( ) + rightImage . getWidth ( ) ; int height = Math . max ( leftImage . getHeight ( ) , rightImage . getHeight ( ) ) ; BufferedImage result = new BufferedImage ( width , height , leftImage . getType ( ) ) ; result . createGraphics ( ) . drawImage ( leftImage , 0 , 0 , null ) ; result . createGraphics ( ) . drawImage ( rightImage , leftImage . getWidth ( ) , 0 , null ) ; return result ; |
public class SimonConsoleFilter { /** * Wraps the HTTP request with Simon measuring . Separate Simons are created for different URIs ( parameters
* ignored ) .
* @ param servletRequest HTTP servlet request
* @ param servletResponse HTTP servlet response
* @ param filterChain filter chain
* @ throws java . io . IOException possibly thrown by other filter / serlvet in the chain
* @ throws javax . servlet . ServletException possibly thrown by other filter / serlvet in the chain */
public final void doFilter ( ServletRequest servletRequest , ServletResponse servletResponse , FilterChain filterChain ) throws IOException , ServletException { } } | HttpServletRequest request = ( HttpServletRequest ) servletRequest ; HttpServletResponse response = ( HttpServletResponse ) servletResponse ; String localPath = request . getRequestURI ( ) . substring ( request . getContextPath ( ) . length ( ) ) ; if ( localPath . startsWith ( requestProcessor . getUrlPrefix ( ) ) ) { requestProcessor . processRequest ( request , response ) ; return ; } filterChain . doFilter ( request , response ) ; |
public class OfferingManager { /** * Remove an offering
* @ param offeringName the name of the offering to remove
* @ return */
public boolean removeOffering ( String offeringName ) { } } | if ( offeringName == null ) throw new NullPointerException ( "The parameter \"cloudOfferingId\" cannot be null." ) ; BasicDBObject query = new BasicDBObject ( "offering_name" , offeringName ) ; Document removedOffering = this . offeringsCollection . findOneAndDelete ( query ) ; return removedOffering != null ; |
public class TypeLord { /** * ArrayList < Foo > List < Foo > ArrayList < Z > */
public static IType findParameterizedType_Reverse ( IType sourceType , IType targetType ) { } } | if ( sourceType == null || targetType == null ) { return null ; } if ( ! sourceType . isParameterizedType ( ) ) { return null ; } // List < Z >
IType sourceTypeInHier = findParameterizedType ( targetType , getPureGenericType ( sourceType ) ) ; if ( sourceTypeInHier == null || ! sourceTypeInHier . isParameterizedType ( ) ) { return null ; } TypeVarToTypeMap map = new TypeVarToTypeMap ( ) ; IType [ ] params = sourceTypeInHier . getTypeParameters ( ) ; for ( int iPos = 0 ; iPos < params . length ; iPos ++ ) { if ( params [ iPos ] instanceof ITypeVariableType ) { map . put ( ( ITypeVariableType ) params [ iPos ] , sourceType . getTypeParameters ( ) [ iPos ] ) ; } } // ArrayList < Foo >
return getActualType ( targetType , map , true ) ; |
public class RandomSearch { /** * Part of init common to all initialization types . */
protected void initCommon ( ) { } } | this . fitness = problem . getDefaultFitness ( ) ; this . bestConfiguration = this . activeConfiguration ; this . bestFitness = this . fitness . getValue ( this . activeConfiguration ) ; // calculate fitness
this . activeFitness = this . fitness . getValue ( this . activeConfiguration ) ; this . activeNormalizedFitness = this . fitness . normalize ( activeFitness ) ; |
public class PluginXmlParser { /** * Returns the ResourceType enum value corresponding to the resource tag .
* @ param resourceTag Tag name of resource
* @ return ResourceType enum . Returns an enum of unknown if tag is not a supported resource
* type . */
private ResourceType getResourceType ( String resourceTag ) { } } | if ( resourceTag == null || ! resourceTag . endsWith ( "-resource" ) ) { return ResourceType . unknown ; } try { return ResourceType . valueOf ( resourceTag . substring ( 0 , resourceTag . length ( ) - 9 ) ) ; } catch ( Exception e ) { return ResourceType . unknown ; } |
public class SliceUtf8 { /** * Removes all white space characters from the left side of the string .
* Note : Invalid UTF - 8 sequences are not trimmed . */
public static Slice leftTrim ( Slice utf8 ) { } } | int length = utf8 . length ( ) ; int position = firstNonWhitespacePosition ( utf8 ) ; return utf8 . slice ( position , length - position ) ; |
public class ZookeeperDependency { /** * Function that will replace the placeholder
* { @ link ZookeeperDependency # VERSION _ PLACEHOLDER _ REGEX } from the
* { @ link ZookeeperDependency # contentTypeTemplate } with value from
* { @ link ZookeeperDependency # version } .
* e . g . having :
* < li > contentTypeTemplate : { @ code ' application / vnd . some - service . $ version + json ' } < / li >
* < li > version : { @ code ' v1 ' } < / li >
* the result of the function will be { @ code ' application / vnd . some - service . v1 + json ' }
* @ return content type template with version */
public String getContentTypeWithVersion ( ) { } } | if ( ! StringUtils . hasText ( this . contentTypeTemplate ) || ! StringUtils . hasText ( this . version ) ) { return "" ; } return this . contentTypeTemplate . replaceAll ( VERSION_PLACEHOLDER_REGEX , this . version ) ; |
public class MersenneTwisterFast { /** * Reads the entire state of the MersenneTwister RNG from the stream
* @ param stream
* @ throws IOException */
public void readState ( DataInputStream stream ) throws IOException { } } | int len = mt . length ; for ( int x = 0 ; x < len ; x ++ ) mt [ x ] = stream . readInt ( ) ; len = mag01 . length ; for ( int x = 0 ; x < len ; x ++ ) mag01 [ x ] = stream . readInt ( ) ; mti = stream . readInt ( ) ; __nextNextGaussian = stream . readDouble ( ) ; __haveNextNextGaussian = stream . readBoolean ( ) ; |
public class JQLBuilder { /** * For each fields .
* @ param fields
* the fields
* @ param listener
* the listener
* @ return the string */
private static String forEachFields ( final Set < String > fields , OnFieldListener listener ) { } } | StringBuilder builder = new StringBuilder ( ) ; { String comma = "" ; for ( String item : fields ) { builder . append ( comma + listener . onField ( item ) ) ; comma = ", " ; } } return builder . toString ( ) ; |
public class PredefinedValidationStampController { /** * Gets the list of predefined validation stamps . */
@ RequestMapping ( value = "predefinedValidationStamps" , method = RequestMethod . GET ) public Resources < PredefinedValidationStamp > getPredefinedValidationStampList ( ) { } } | return Resources . of ( predefinedValidationStampService . getPredefinedValidationStamps ( ) , uri ( on ( getClass ( ) ) . getPredefinedValidationStampList ( ) ) ) . with ( Link . CREATE , uri ( on ( getClass ( ) ) . getPredefinedValidationStampCreationForm ( ) ) ) ; |
public class ClassCacheManager { /** * Builds completely initialized list of callbacks for given context .
* @ param ctx contextual data given by execution service
* @ return */
public List < CommandCallback > buildCommandCallback ( CommandContext ctx , ClassLoader cl ) { } } | List < CommandCallback > callbackList = new ArrayList < CommandCallback > ( ) ; if ( ctx != null && ctx . getData ( "callbacks" ) != null ) { logger . debug ( "Callback: {}" , ctx . getData ( "callbacks" ) ) ; String [ ] callbacksArray = ( ( String ) ctx . getData ( "callbacks" ) ) . split ( "," ) ; List < String > callbacks = ( List < String > ) Arrays . asList ( callbacksArray ) ; for ( String callbackName : callbacks ) { CommandCallback handler = findCommandCallback ( callbackName . trim ( ) , cl ) ; callbackList . add ( handler ) ; } } return callbackList ; |
public class ChromeCast { /** * < p > If no application is running at the moment then exception is thrown . < / p >
* @ return current media status , state , time , playback rate , etc .
* @ throws IOException */
public final MediaStatus getMediaStatus ( ) throws IOException { } } | Application runningApp = getRunningApp ( ) ; if ( runningApp == null ) { throw new ChromeCastException ( "No application is running in ChromeCast" ) ; } return channel ( ) . getMediaStatus ( getTransportId ( runningApp ) ) ; |
public class CmsToolManager { /** * Configures a whole tool root with all its tools . < p >
* @ param cms the cms context
* @ param toolRoot the tool root to configure
* @ throws CmsException if something goes wrong */
private void configureToolRoot ( CmsObject cms , CmsToolRootHandler toolRoot ) throws CmsException { } } | List < I_CmsToolHandler > handlers = new ArrayList < I_CmsToolHandler > ( ) ; // add tool root handler
handlers . add ( toolRoot ) ; // look in every file under the root uri for valid
// admin tools and register them
List < CmsResource > resources = cms . readResourcesWithProperty ( toolRoot . getUri ( ) , HANDLERCLASS_PROPERTY ) ; Iterator < CmsResource > itRes = resources . iterator ( ) ; while ( itRes . hasNext ( ) ) { CmsResource res = itRes . next ( ) ; CmsProperty prop = cms . readPropertyObject ( res . getRootPath ( ) , HANDLERCLASS_PROPERTY , false ) ; if ( ! prop . isNullProperty ( ) ) { try { // instantiate the handler
Class < ? > handlerClass = Class . forName ( prop . getValue ( ) ) ; I_CmsToolHandler handler = ( I_CmsToolHandler ) handlerClass . newInstance ( ) ; if ( ! handler . setup ( cms , toolRoot , res . getRootPath ( ) ) ) { // log failure
if ( CmsLog . INIT . isWarnEnabled ( ) ) { CmsLog . INIT . warn ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_TOOL_SETUP_ERROR_1 , res . getRootPath ( ) ) ) ; } } // keep for later use
handlers . add ( handler ) ; // log success
if ( CmsLog . INIT . isDebugEnabled ( ) ) { if ( ! handler . getLink ( ) . equals ( VIEW_JSPPAGE_LOCATION ) ) { CmsLog . INIT . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_NEWTOOL_FOUND_2 , handler . getPath ( ) , handler . getLink ( ) ) ) ; } else { CmsLog . INIT . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_NEWTOOL_FOUND_2 , handler . getPath ( ) , res . getRootPath ( ) ) ) ; } } } catch ( Exception e ) { // log failure
if ( CmsLog . INIT . isWarnEnabled ( ) ) { CmsLog . INIT . warn ( Messages . get ( ) . getBundle ( ) . key ( Messages . INIT_TOOLMANAGER_TOOL_SETUP_ERROR_1 , res . getRootPath ( ) ) , e ) ; } } } } registerHandlerList ( cms , toolRoot , 1 , handlers ) ; |
public class RecordEventsSpanImpl { /** * Returns an immutable representation of all the data from this { @ code Span } .
* @ return an immutable representation of all the data from this { @ code Span } .
* @ throws IllegalStateException if the Span doesn ' t have RECORD _ EVENTS option . */
public SpanData toSpanData ( ) { } } | synchronized ( this ) { SpanData . Attributes attributesSpanData = attributes == null ? SpanData . Attributes . create ( Collections . < String , AttributeValue > emptyMap ( ) , 0 ) : SpanData . Attributes . create ( attributes , attributes . getNumberOfDroppedAttributes ( ) ) ; SpanData . TimedEvents < Annotation > annotationsSpanData = createTimedEvents ( getInitializedAnnotations ( ) , timestampConverter ) ; SpanData . TimedEvents < io . opencensus . trace . MessageEvent > messageEventsSpanData = createTimedEvents ( getInitializedNetworkEvents ( ) , timestampConverter ) ; SpanData . Links linksSpanData = links == null ? SpanData . Links . create ( Collections . < Link > emptyList ( ) , 0 ) : SpanData . Links . create ( new ArrayList < Link > ( links . events ) , links . getNumberOfDroppedEvents ( ) ) ; return SpanData . create ( getContext ( ) , parentSpanId , hasRemoteParent , name , kind , timestampConverter . convertNanoTime ( startNanoTime ) , attributesSpanData , annotationsSpanData , messageEventsSpanData , linksSpanData , numberOfChildren , hasBeenEnded ? getStatusWithDefault ( ) : null , hasBeenEnded ? timestampConverter . convertNanoTime ( endNanoTime ) : null ) ; } |
public class MetadataDao { /** * Delete the Metadata matching the prepared query , cascading
* @ param preparedDelete
* prepared delete query
* @ return deleted count
* @ throws SQLException
* upon failure */
public int deleteCascade ( PreparedQuery < Metadata > preparedDelete ) throws SQLException { } } | int count = 0 ; if ( preparedDelete != null ) { List < Metadata > metadataList = query ( preparedDelete ) ; count = deleteCascade ( metadataList ) ; } return count ; |
public class KeyStoreUtils { /** * Adds a private key to the specified key store from the passed private key file and certificate chain .
* @ param keyStore
* The key store to receive the private key .
* @ param pemKeyFile
* A PEM format file containing the private key .
* @ param passwordChars
* The password that protects the private key .
* @ param certChain The certificate chain to associate with the private key .
* @ throws IOException if the key store file cannot be read
* @ throws GeneralSecurityException if a cryptography problem is encountered . */
public static void addPrivateKey ( KeyStore keyStore , File pemKeyFile , char [ ] passwordChars , List < Certificate > certChain ) throws IOException , GeneralSecurityException { } } | final String methodName = "addPrivateKey" ; logger . entry ( methodName , pemKeyFile , certChain ) ; PrivateKey privateKey = createPrivateKey ( pemKeyFile , passwordChars ) ; keyStore . setKeyEntry ( "key" , privateKey , passwordChars , certChain . toArray ( new Certificate [ certChain . size ( ) ] ) ) ; logger . exit ( methodName ) ; |
public class CmsSitemapDNDController { /** * Handles the drop for a sitemap item which was dragged to a different position . < p >
* @ param sitemapEntry the dropped item
* @ param target the drop target
* @ param parent the parent sitemap entry */
private void handleDropSitemapEntry ( final CmsSitemapTreeItem sitemapEntry , final I_CmsDropTarget target , CmsClientSitemapEntry parent ) { } } | if ( isChangedPosition ( sitemapEntry , target , true ) ) { // moving a tree entry around
final CmsClientSitemapEntry entry = sitemapEntry . getSitemapEntry ( ) ; m_controller . ensureUniqueName ( parent , entry . getName ( ) , new I_CmsSimpleCallback < String > ( ) { public void execute ( String uniqueName ) { if ( ! uniqueName . equals ( entry . getName ( ) ) && isChangedPosition ( sitemapEntry , target , false ) ) { m_controller . editAndChangeName ( entry , uniqueName , Collections . < CmsPropertyModification > emptyList ( ) , entry . isNew ( ) , CmsReloadMode . none ) ; m_controller . move ( entry , m_insertPath + uniqueName + "/" , m_insertIndex ) ; } else { m_controller . move ( entry , m_insertPath + entry . getName ( ) + "/" , m_insertIndex ) ; } } } ) ; } else { sitemapEntry . resetEntry ( ) ; } |
public class CatalogUtil { /** * Check if a catalog compiled with the given version of VoltDB is
* compatible with the current version of VoltDB .
* @ param catalogVersionStr
* The version string of the VoltDB that compiled the catalog .
* @ return true if it ' s compatible , false otherwise . */
public static boolean isCatalogCompatible ( String catalogVersionStr ) { } } | if ( catalogVersionStr == null || catalogVersionStr . isEmpty ( ) ) { return false ; } // Check that it is a properly formed verstion string
Object [ ] catalogVersion = MiscUtils . parseVersionString ( catalogVersionStr ) ; if ( catalogVersion == null ) { throw new IllegalArgumentException ( "Invalid version string " + catalogVersionStr ) ; } if ( ! catalogVersionStr . equals ( VoltDB . instance ( ) . getVersionString ( ) ) ) { return false ; } return true ; |
public class AuditUtils { /** * Creates an audit entry for the ' contract broken ' event .
* @ param bean the bean
* @ param securityContext the security context
* @ return the audit entry */
public static AuditEntryBean contractBrokenFromClient ( ContractBean bean , ISecurityContext securityContext ) { } } | AuditEntryBean entry = newEntry ( bean . getClient ( ) . getClient ( ) . getOrganization ( ) . getId ( ) , AuditEntityType . Client , securityContext ) ; entry . setWhat ( AuditEntryType . BreakContract ) ; entry . setEntityId ( bean . getClient ( ) . getClient ( ) . getId ( ) ) ; entry . setEntityVersion ( bean . getClient ( ) . getVersion ( ) ) ; ContractData data = new ContractData ( bean ) ; entry . setData ( toJSON ( data ) ) ; return entry ; |
public class VariableNames { /** * Replies the name of an environment variable that corresponds to the name of a bootique variable .
* @ param bootiqueVariable the name of the bootique variable .
* @ return the name of the environment variable . */
public static String toEnvironmentVariableName ( String bootiqueVariable ) { } } | if ( Strings . isNullOrEmpty ( bootiqueVariable ) ) { return null ; } final StringBuilder name = new StringBuilder ( ) ; final Pattern pattern = Pattern . compile ( "((?:[a-z0_9_]+)|(?:[A-Z]+[^A-Z]*))" ) ; // $ NON - NLS - 1 $
for ( final String component : bootiqueVariable . split ( "[^a-zA-Z0_9_]+" ) ) { // $ NON - NLS - 1 $
final Matcher matcher = pattern . matcher ( component ) ; while ( matcher . find ( ) ) { final String word = matcher . group ( 1 ) ; if ( name . length ( ) > 0 ) { name . append ( "_" ) ; // $ NON - NLS - 1 $
} name . append ( word . toUpperCase ( ) ) ; } } return name . toString ( ) ; |
public class Bindings { /** * Associates the given source to the given handler .
* @ param source the source , must not be { @ code null }
* @ param handler the handler , must not be { @ code null } */
public static void bind ( Source source , RouteParameterHandler handler ) { } } | if ( BINDINGS . containsKey ( source ) ) { LoggerFactory . getLogger ( Bindings . class ) . warn ( "Replacing a route parameter binding for {} by {}" , source . name ( ) , handler ) ; } BINDINGS . put ( source , handler ) ; |
public class DOT { /** * Reads a DOT description from a reader and returns the PNG rendering result as a { @ link BufferedImage } .
* @ param dotReader
* the reader from which to read the description
* @ return the rendering result
* @ throws IOException
* if reading from the reader fails , or the pipe to the DOT process breaks . */
public static BufferedImage renderDOTImage ( Reader dotReader ) throws IOException { } } | InputStream pngIs = runDOT ( dotReader , "png" ) ; BufferedImage img = ImageIO . read ( pngIs ) ; pngIs . close ( ) ; return img ; |
public class AWSResourceGroupsClient { /** * Adds tags to a resource group with the specified ARN . Existing tags on a resource group are not changed if they
* are not specified in the request parameters .
* @ param tagRequest
* @ return Result of the Tag operation returned by the service .
* @ throws BadRequestException
* The request does not comply with validation rules that are defined for the request parameters .
* @ throws ForbiddenException
* The caller is not authorized to make the request .
* @ throws NotFoundException
* One or more resources specified in the request do not exist .
* @ throws MethodNotAllowedException
* The request uses an HTTP method which is not allowed for the specified resource .
* @ throws TooManyRequestsException
* The caller has exceeded throttling limits .
* @ throws InternalServerErrorException
* An internal error occurred while processing the request .
* @ sample AWSResourceGroups . Tag
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / resource - groups - 2017-11-27 / Tag " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public TagResult tag ( TagRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeTag ( request ) ; |
public class CmsSecurityManager { /** * Checks if the current user has the permissions to publish the given publish list
* ( which contains the information about the resources / project to publish ) . < p >
* @ param dbc the current OpenCms users database context
* @ param publishList the publish list to check ( contains the information about the resources / project to publish )
* @ throws CmsException if the user does not have the required permissions because of project lock state
* @ throws CmsMultiException if issues occur like a direct publish is attempted on a resource
* whose parent folder is new or deleted in the offline project ,
* or if the current user has no management access to the current project */
public void checkPublishPermissions ( CmsDbContext dbc , CmsPublishList publishList ) throws CmsException , CmsMultiException { } } | // is the current project an " offline " project ?
checkOfflineProject ( dbc ) ; // check if this is a " direct publish " attempt
if ( ! publishList . isDirectPublish ( ) ) { // check if the user is a manager of the current project , in this case he has publish permissions
checkManagerOfProjectRole ( dbc , dbc . getRequestContext ( ) . getCurrentProject ( ) ) ; } else { // direct publish , create exception containers
CmsMultiException resourceIssues = new CmsMultiException ( ) ; CmsMultiException permissionIssues = new CmsMultiException ( ) ; // iterate all resources in the direct publish list
Iterator < CmsResource > it = publishList . getDirectPublishResources ( ) . iterator ( ) ; List < String > parentFolders = new ArrayList < String > ( ) ; while ( it . hasNext ( ) ) { CmsResource res = it . next ( ) ; // the parent folder must not be new or deleted
String parentFolder = CmsResource . getParentFolder ( res . getRootPath ( ) ) ; if ( ( parentFolder != null ) && ! parentFolders . contains ( parentFolder ) ) { // check each parent folder only once
CmsResource parent = readResource ( dbc , parentFolder , CmsResourceFilter . ALL ) ; if ( parent . getState ( ) . isDeleted ( ) ) { if ( ! ( publishList . isUserPublishList ( ) && publishList . getDeletedFolderList ( ) . contains ( parent ) ) ) { // parent folder is deleted - direct publish not allowed
resourceIssues . addException ( new CmsVfsException ( Messages . get ( ) . container ( Messages . ERR_DIRECT_PUBLISH_PARENT_DELETED_2 , dbc . getRequestContext ( ) . removeSiteRoot ( res . getRootPath ( ) ) , parentFolder ) ) ) ; } } if ( parent . getState ( ) . isNew ( ) ) { if ( ! ( publishList . isUserPublishList ( ) && publishList . getFolderList ( ) . contains ( parent ) ) ) { // parent folder is new - direct publish not allowed
resourceIssues . addException ( new CmsVfsException ( Messages . get ( ) . container ( Messages . ERR_DIRECT_PUBLISH_PARENT_NEW_2 , dbc . removeSiteRoot ( res . getRootPath ( ) ) , parentFolder ) ) ) ; } } // add checked parent folder to prevent duplicate checks
parentFolders . add ( parentFolder ) ; } // check if the user has the explicit permission to direct publish the selected resource
if ( I_CmsPermissionHandler . PERM_ALLOWED != hasPermissions ( dbc . getRequestContext ( ) , res , CmsPermissionSet . ACCESS_DIRECT_PUBLISH , true , CmsResourceFilter . ALL ) ) { // the user has no " direct publish " permissions on the resource
permissionIssues . addException ( new CmsSecurityException ( Messages . get ( ) . container ( Messages . ERR_DIRECT_PUBLISH_NO_PERMISSIONS_1 , dbc . removeSiteRoot ( res . getRootPath ( ) ) ) ) ) ; } } if ( resourceIssues . hasExceptions ( ) || permissionIssues . hasExceptions ( ) ) { // there are issues , permission check has failed
resourceIssues . addExceptions ( permissionIssues . getExceptions ( ) ) ; throw resourceIssues ; } } // no issues have been found , permissions are granted |
public class OWLExistentialReasonerImpl { /** * Gets the fillers of the existential restrictions that are entailed to be superclasses the specified class
* expression and act along the specified property chain . In essence , this finds bindings for ? x with respect to
* the following template : < code > SubClassOf ( ce ObjectSomeValuesFrom ( p1 , ( ObjectSomeValuesFrom ( p2 ? x ) ) ) < / code > for
* arbitrary chains of properties .
* @ param ce The class expression . Not { @ code null } .
* @ param propertyList A list of property expressions that constitute a property chain .
* @ return A set of class expressions that are the entailed fillers of entailed existential restriction superclasses . */
public NodeSet < OWLClass > getFillers ( OWLClassExpression ce , List < OWLObjectPropertyExpression > propertyList ) { } } | Set < Node < OWLClass > > result = new HashSet < Node < OWLClass > > ( ) ; entailmentCheckCount = 0 ; computeExistentialFillers ( ce , propertyList , getDataFactory ( ) . getOWLThing ( ) , result , new HashSet < OWLClass > ( ) ) ; NodeSet < OWLClass > nodeSetResult = new OWLClassNodeSet ( result ) ; if ( fillerTreatment == FillerTreatment . ALL ) { return nodeSetResult ; } Set < Node < OWLClass > > removed = new HashSet < Node < OWLClass > > ( ) ; Set < Node < OWLClass > > finalResult = new HashSet < Node < OWLClass > > ( nodeSetResult . getNodes ( ) ) ; for ( Node < OWLClass > resultCls : result ) { OWLClass resultClsRep = resultCls . getRepresentativeElement ( ) ; if ( ! removed . contains ( resultCls ) ) { removed . add ( resultCls ) ; NodeSet < OWLClass > supers = reasoner . getSuperClasses ( resultClsRep , false ) ; removed . addAll ( supers . getNodes ( ) ) ; finalResult . removeAll ( supers . getNodes ( ) ) ; } } System . out . printf ( "For %s and %s, there were %d entailment checks.\n" , ce , propertyList , entailmentCheckCount ) ; return new OWLClassNodeSet ( finalResult ) ; |
public class RemoveBrownPtoN_F64 { /** * Removes radial distortion
* @ param x Distorted x - coordinate pixel
* @ param y Distorted y - coordinate pixel
* @ param out Undistorted normalized coordinate . */
@ Override public void compute ( double x , double y , Point2D_F64 out ) { } } | // initial estimate of undistorted point
out . x = a11 * x + a12 * y + a13 ; out . y = a22 * y + a23 ; removeRadial ( out . x , out . y , params . radial , params . t1 , params . t2 , out , tol ) ; |
public class IniFile { /** * Sets the specified long property .
* @ param pstrSection the INI section name .
* @ param pstrProp the property to be set .
* @ param plngVal the long value to be persisted . */
public void setLongProperty ( String pstrSection , String pstrProp , long plngVal , String pstrComments ) { } } | INISection objSec = null ; objSec = ( INISection ) this . mhmapSections . get ( pstrSection ) ; if ( objSec == null ) { objSec = new INISection ( pstrSection ) ; this . mhmapSections . put ( pstrSection , objSec ) ; } objSec . setProperty ( pstrProp , Long . toString ( plngVal ) , pstrComments ) ; |
public class CompareToBuilder { /** * < p > Compares two < code > Object < / code > s via reflection . < / p >
* < p > Fields can be private , thus < code > AccessibleObject . setAccessible < / code >
* is used to bypass normal access control checks . This will fail under a
* security manager unless the appropriate permissions are set . < / p >
* < ul >
* < li > Static fields will not be compared < / li >
* < li > If < code > compareTransients < / code > is < code > true < / code > ,
* compares transient members . Otherwise ignores them , as they
* are likely derived fields . < / li >
* < li > Superclass fields will be compared < / li >
* < / ul >
* < p > If both < code > lhs < / code > and < code > rhs < / code > are < code > null < / code > ,
* they are considered equal . < / p >
* @ param lhs left - hand object
* @ param rhs right - hand object
* @ param excludeFields Collection of String fields to exclude
* @ return a negative integer , zero , or a positive integer as < code > lhs < / code >
* is less than , equal to , or greater than < code > rhs < / code >
* @ throws NullPointerException if either < code > lhs < / code > or < code > rhs < / code >
* ( but not both ) is < code > null < / code >
* @ throws ClassCastException if < code > rhs < / code > is not assignment - compatible
* with < code > lhs < / code >
* @ since 2.2 */
@ GwtIncompatible ( "incompatible method" ) public static int reflectionCompare ( final Object lhs , final Object rhs , final Collection < String > excludeFields ) { } } | return reflectionCompare ( lhs , rhs , ReflectionToStringBuilder . toNoNullStringArray ( excludeFields ) ) ; |
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public EClass getIfcMassMeasure ( ) { } } | if ( ifcMassMeasureEClass == null ) { ifcMassMeasureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 836 ) ; } return ifcMassMeasureEClass ; |
public class SelectList { /** * Selects an option using optionLocator . Locator must be prefixed with one of the following : < li > label = < li > value =
* < li > index = < li > id =
* @ param optionLocator
* the select list option locator */
public void select ( String optionLocator ) { } } | getDispatcher ( ) . beforeSelect ( this , optionLocator ) ; if ( StringUtils . isBlank ( optionLocator ) ) { throw new IllegalArgumentException ( "Locator cannot be null or empty." ) ; } if ( optionLocator . split ( "=" ) . length != 2 ) { StringBuilder errMsg = new StringBuilder ( "Invalid locator specified :" ) ; errMsg . append ( optionLocator ) ; errMsg . append ( ". Locator should be of the form label=<value> (or) " ) ; errMsg . append ( "value=<value> (or) " ) ; errMsg . append ( "index=<value> (or) " ) ; errMsg . append ( "id=<value>." ) ; throw new IllegalArgumentException ( errMsg . toString ( ) ) ; } String locatorToUse = optionLocator . split ( "=" ) [ 1 ] . trim ( ) ; String tLocator = optionLocator . toLowerCase ( ) . split ( "=" ) [ 0 ] . trim ( ) ; if ( tLocator . indexOf ( "label" ) >= 0 ) { // label was given
new Select ( getElement ( ) ) . selectByVisibleText ( locatorToUse ) ; } else if ( tLocator . indexOf ( "value" ) >= 0 ) { // value was given
new Select ( getElement ( ) ) . selectByValue ( locatorToUse ) ; } else if ( tLocator . indexOf ( "index" ) >= 0 ) { // index was given
new Select ( getElement ( ) ) . selectByIndex ( Integer . parseInt ( locatorToUse ) ) ; } else if ( tLocator . indexOf ( "id" ) >= 0 ) { // id was given
getElement ( ) . findElementById ( locatorToUse ) . click ( ) ; } else { throw new NoSuchElementException ( "Unable to find " + optionLocator ) ; } getDispatcher ( ) . afterSelect ( this , optionLocator ) ; |
public class BuildStepsInner { /** * List the build arguments for a step including the secret arguments .
* ServiceResponse < PageImpl < BuildArgumentInner > > * @ param nextPageLink The NextLink from the previous successful call to List operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the PagedList & lt ; BuildArgumentInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */
public Observable < ServiceResponse < Page < BuildArgumentInner > > > listBuildArgumentsNextSinglePageAsync ( final String nextPageLink ) { } } | if ( nextPageLink == null ) { throw new IllegalArgumentException ( "Parameter nextPageLink is required and cannot be null." ) ; } String nextUrl = String . format ( "%s" , nextPageLink ) ; return service . listBuildArgumentsNext ( nextUrl , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < BuildArgumentInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < BuildArgumentInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < BuildArgumentInner > > result = listBuildArgumentsNextDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < BuildArgumentInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ; |
public class systemuser_systemgroup_binding { /** * Use this API to fetch systemuser _ systemgroup _ binding resources of given name . */
public static systemuser_systemgroup_binding [ ] get ( nitro_service service , String username ) throws Exception { } } | systemuser_systemgroup_binding obj = new systemuser_systemgroup_binding ( ) ; obj . set_username ( username ) ; systemuser_systemgroup_binding response [ ] = ( systemuser_systemgroup_binding [ ] ) obj . get_resources ( service ) ; return response ; |
public class MoskitoHttpServlet { /** * Override this method to react on http head method . */
protected void moskitoDoHead ( HttpServletRequest req , HttpServletResponse res ) throws ServletException , IOException { } } | super . doHead ( req , res ) ; |
public class RepositoryManagerImpl { /** * Get a JCR Session for the named workspace in the named repository , using the supplied HTTP servlet request for
* authentication information .
* @ param request the servlet request ; may not be null or unauthenticated
* @ param repositoryName the name of the repository in which the session is created
* @ param workspaceName the name of the workspace to which the session should be connected
* @ return an active session with the given workspace in the named repository
* @ throws javax . jcr . RepositoryException if the named repository does not exist or there was a problem obtaining the named repository */
@ Override public Session getSession ( Request request , String repositoryName , String workspaceName ) throws RepositoryException { } } | // Go through all the RepositoryFactory instances and try to create one . . .
Repository repository = getRepository ( repositoryName ) ; // If there ' s no authenticated user , try an anonymous login
if ( request == null || request . username ( ) == null ) { return repository . login ( workspaceName ) ; } return repository . login ( new RequestCredentials ( request ) , workspaceName ) ; |
public class TrieIterator { /** * Finds the next supplementary element .
* For each entry in the trie , the value to be delivered is passed through
* extract ( ) .
* We always store the next element before it is requested .
* Called after calculateNextBMP ( ) completes its round of BMP characters .
* There is a slight difference in the usage of m _ currentCodepoint _
* here as compared to calculateNextBMP ( ) . Though both represents the
* lower bound of the next element , in calculateNextBMP ( ) it gets set
* at the start of any loop , where - else , in calculateNextSupplementary ( )
* since m _ currentCodepoint _ already contains the lower bound of the
* next element ( passed down from calculateNextBMP ( ) ) , we keep it till
* the end before resetting it to the new value .
* Note , if there are no more iterations , it will never get to here .
* Blocked out by next ( ) .
* @ param element return result object */
private final void calculateNextSupplementaryElement ( Element element ) { } } | int currentValue = m_nextValue_ ; m_nextCodepoint_ ++ ; m_nextBlockIndex_ ++ ; if ( UTF16 . getTrailSurrogate ( m_nextCodepoint_ ) != UTF16 . TRAIL_SURROGATE_MIN_VALUE ) { // this piece is only called when we are in the middle of a lead
// surrogate block
if ( ! checkNullNextTrailIndex ( ) && ! checkBlockDetail ( currentValue ) ) { setResult ( element , m_currentCodepoint_ , m_nextCodepoint_ , currentValue ) ; m_currentCodepoint_ = m_nextCodepoint_ ; return ; } // we have cleared one block
m_nextIndex_ ++ ; m_nextTrailIndexOffset_ ++ ; if ( ! checkTrailBlock ( currentValue ) ) { setResult ( element , m_currentCodepoint_ , m_nextCodepoint_ , currentValue ) ; m_currentCodepoint_ = m_nextCodepoint_ ; return ; } } int nextLead = UTF16 . getLeadSurrogate ( m_nextCodepoint_ ) ; // enumerate supplementary code points
while ( nextLead < TRAIL_SURROGATE_MIN_VALUE_ ) { // lead surrogate access
final int leadBlock = m_trie_ . m_index_ [ nextLead >> Trie . INDEX_STAGE_1_SHIFT_ ] << Trie . INDEX_STAGE_2_SHIFT_ ; if ( leadBlock == m_trie_ . m_dataOffset_ ) { // no entries for a whole block of lead surrogates
if ( currentValue != m_initialValue_ ) { m_nextValue_ = m_initialValue_ ; m_nextBlock_ = leadBlock ; // = = m _ trie _ . m _ dataOffset _
m_nextBlockIndex_ = 0 ; setResult ( element , m_currentCodepoint_ , m_nextCodepoint_ , currentValue ) ; m_currentCodepoint_ = m_nextCodepoint_ ; return ; } nextLead += DATA_BLOCK_LENGTH_ ; // number of total affected supplementary codepoints in one
// block
// this is not a simple addition of
// DATA _ BLOCK _ SUPPLEMENTARY _ LENGTH since we need to consider
// that we might have moved some of the codepoints
m_nextCodepoint_ = Character . toCodePoint ( ( char ) nextLead , ( char ) UTF16 . TRAIL_SURROGATE_MIN_VALUE ) ; continue ; } if ( m_trie_ . m_dataManipulate_ == null ) { throw new NullPointerException ( "The field DataManipulate in this Trie is null" ) ; } // enumerate trail surrogates for this lead surrogate
m_nextIndex_ = m_trie_ . m_dataManipulate_ . getFoldingOffset ( m_trie_ . getValue ( leadBlock + ( nextLead & Trie . INDEX_STAGE_3_MASK_ ) ) ) ; if ( m_nextIndex_ <= 0 ) { // no data for this lead surrogate
if ( currentValue != m_initialValue_ ) { m_nextValue_ = m_initialValue_ ; m_nextBlock_ = m_trie_ . m_dataOffset_ ; m_nextBlockIndex_ = 0 ; setResult ( element , m_currentCodepoint_ , m_nextCodepoint_ , currentValue ) ; m_currentCodepoint_ = m_nextCodepoint_ ; return ; } m_nextCodepoint_ += TRAIL_SURROGATE_COUNT_ ; } else { m_nextTrailIndexOffset_ = 0 ; if ( ! checkTrailBlock ( currentValue ) ) { setResult ( element , m_currentCodepoint_ , m_nextCodepoint_ , currentValue ) ; m_currentCodepoint_ = m_nextCodepoint_ ; return ; } } nextLead ++ ; } // deliver last range
setResult ( element , m_currentCodepoint_ , UCharacter . MAX_VALUE + 1 , currentValue ) ; |
public class AsynchronousRequest { /** * For more info on event detail API go < a href = " https : / / wiki . guildwars2 . com / wiki / API : 1 / event _ details " > here < / a > < br / >
* @ param id event id
* @ param callback callback that is going to be used for { @ link Call # enqueue ( Callback ) }
* @ throws GuildWars2Exception invalid id
* @ throws NullPointerException if given { @ link Callback } is null
* @ see EventDetail event detail */
public void getEventDetailedInfo ( String id , Callback < EventDetail > callback ) throws GuildWars2Exception , NullPointerException { } } | isParamValid ( new ParamChecker ( ParamType . ID , id ) ) ; gw2API . getEventDetailedInfo ( id , GuildWars2 . lang . getValue ( ) ) . enqueue ( callback ) ; |
public class DocClient { /** * read a Document , get document reader infomation .
* @ param documentId The document id .
* @ param expireInSeconds The expire time
* @ return A ReadDocumentResponse object containing the information returned by Document . */
public ReadDocumentResponse readDocument ( String documentId , long expireInSeconds ) { } } | ReadDocumentRequest request = new ReadDocumentRequest ( ) ; request . setDocumentId ( documentId ) ; request . setExpireInSeconds ( expireInSeconds ) ; return this . readDocument ( request ) ; |
public class RankingQualityHistogram { /** * Process a database
* @ param database Database to process
* @ param relation Relation to process
* @ return Histogram of ranking qualities */
public HistogramResult run ( Database database , Relation < O > relation ) { } } | final DistanceQuery < O > distanceQuery = database . getDistanceQuery ( relation , getDistanceFunction ( ) ) ; final KNNQuery < O > knnQuery = database . getKNNQuery ( distanceQuery , relation . size ( ) ) ; if ( LOG . isVerbose ( ) ) { LOG . verbose ( "Preprocessing clusters..." ) ; } // Cluster by labels
Collection < Cluster < Model > > split = ( new ByLabelOrAllInOneClustering ( ) ) . run ( database ) . getAllClusters ( ) ; DoubleHistogram hist = new DoubleHistogram ( numbins , 0.0 , 1.0 ) ; if ( LOG . isVerbose ( ) ) { LOG . verbose ( "Processing points..." ) ; } FiniteProgress progress = LOG . isVerbose ( ) ? new FiniteProgress ( "Computing ROC AUC values" , relation . size ( ) , LOG ) : null ; ROCEvaluation roc = new ROCEvaluation ( ) ; MeanVariance mv = new MeanVariance ( ) ; // sort neighbors
for ( Cluster < ? > clus : split ) { for ( DBIDIter iter = clus . getIDs ( ) . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { KNNList knn = knnQuery . getKNNForDBID ( iter , relation . size ( ) ) ; double result = EvaluateClustering . evaluateRanking ( roc , clus , knn ) ; mv . put ( result ) ; hist . increment ( result , 1. / relation . size ( ) ) ; LOG . incrementProcessed ( progress ) ; } } LOG . ensureCompleted ( progress ) ; // Transform Histogram into a Double Vector array .
Collection < double [ ] > res = new ArrayList < > ( relation . size ( ) ) ; for ( DoubleHistogram . Iter iter = hist . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { res . add ( new double [ ] { iter . getCenter ( ) , iter . getValue ( ) } ) ; } HistogramResult result = new HistogramResult ( "Ranking Quality Histogram" , "ranking-histogram" , res ) ; result . addHeader ( "Mean: " + mv . getMean ( ) + " Variance: " + mv . getSampleVariance ( ) ) ; return result ; |
public class JdbcCpoAdapter { /** * Updates a collection of Objects in the datasource . The assumption is that the objects contained in the collection
* exist in the datasource . This method stores the object in the datasource . The objects in the collection will be
* treated as one transaction , meaning that if one of the objects fail being updated in the datasource then the entire
* collection will be rolled back , if supported by the datasource .
* < pre > Example :
* < code >
* class SomeObject so = null ;
* class CpoAdapter cpo = null ;
* try {
* cpo = new JdbcCpoAdapter ( new JdbcDataSourceInfo ( driver , url , user , password , 1,1 , false ) ) ;
* } catch ( CpoException ce ) {
* / / Handle the error
* cpo = null ;
* if ( cpo ! = null ) {
* ArrayList al = new ArrayList ( ) ;
* for ( int i = 0 ; i < 3 ; i + + ) {
* so = new SomeObject ( ) ;
* so . setId ( 1 ) ;
* so . setName ( " SomeName " ) ;
* al . add ( so ) ;
* try {
* cpo . updateObjects ( al ) ;
* } catch ( CpoException ce ) {
* / / Handle the error
* < / code >
* < / pre >
* @ param coll This is a collection of objects that have been defined within the metadata of the datasource . If the
* class is not defined an exception will be thrown .
* @ return The number of objects updated in the datasource
* @ throws CpoException Thrown if there are errors accessing the datasource */
@ Override public < T > long updateObjects ( Collection < T > coll ) throws CpoException { } } | return processUpdateGroup ( coll , JdbcCpoAdapter . UPDATE_GROUP , null , null , null , null ) ; |
public class DefaultGroovyMethods { /** * Counts the number of occurrences which satisfy the given closure from the
* items within this Iterator .
* The iterator will become exhausted of elements after determining the count value .
* Example usage :
* < pre class = " groovyTestCase " > assert [ 2,4,2,1,3,5,2,4,3 ] . toSet ( ) . iterator ( ) . count { it % 2 = = 0 } = = 2 < / pre >
* @ param self the Iterator from which we count the number of matching occurrences
* @ param closure a closure condition
* @ return the number of occurrences
* @ since 1.8.0 */
public static < T > Number count ( Iterator < T > self , @ ClosureParams ( FirstParam . FirstGenericType . class ) Closure closure ) { } } | long answer = 0 ; BooleanClosureWrapper bcw = new BooleanClosureWrapper ( closure ) ; while ( self . hasNext ( ) ) { if ( bcw . call ( self . next ( ) ) ) { ++ answer ; } } // for b / c with Java return an int if we can
if ( answer <= Integer . MAX_VALUE ) return ( int ) answer ; return answer ; |
public class ChronicleMapBuilder { /** * Configures the marshallers , used to serialize / deserialize values to / from off - heap memory in
* maps , created by this builder .
* @ param valueReader the new bytes & rarr ; value object reader strategy
* @ param valueWriter the new value object & rarr ; bytes writer strategy
* @ return this builder back
* @ see # valueReaderAndDataAccess ( SizedReader , DataAccess )
* @ see # valueSizeMarshaller ( SizeMarshaller )
* @ see ChronicleHashBuilder # keyMarshallers ( BytesReader , BytesWriter ) */
public ChronicleMapBuilder < K , V > valueMarshallers ( @ NotNull BytesReader < V > valueReader , @ NotNull BytesWriter < ? super V > valueWriter ) { } } | valueBuilder . reader ( valueReader ) ; valueBuilder . writer ( valueWriter ) ; return this ; |
public class Iterators { /** * Returns the single element contained in { @ code iterator } .
* @ throws NoSuchElementException if the iterator is empty
* @ throws IllegalArgumentException if the iterator contains multiple
* elements . The state of the iterator is unspecified . */
@ CanIgnoreReturnValue // TODO ( kak ) : Consider removing this ?
public static < T > T getOnlyElement ( Iterator < T > iterator ) { } } | T first = iterator . next ( ) ; if ( ! iterator . hasNext ( ) ) { return first ; } StringBuilder sb = new StringBuilder ( ) . append ( "expected one element but was: <" ) . append ( first ) ; for ( int i = 0 ; i < 4 && iterator . hasNext ( ) ; i ++ ) { sb . append ( ", " ) . append ( iterator . next ( ) ) ; } if ( iterator . hasNext ( ) ) { sb . append ( ", ..." ) ; } sb . append ( '>' ) ; throw new IllegalArgumentException ( sb . toString ( ) ) ; |
public class WindowsJNIFaxClientSpi { /** * This function is invoked before any native call to set the
* native layer debug mode . */
protected void preNativeCall ( ) { } } | // get logger
Logger logger = this . getLogger ( ) ; // get log level
LogLevel logLevel = logger . getLogLevel ( ) ; boolean debugMode = false ; if ( logLevel . equals ( LogLevel . DEBUG ) ) { debugMode = true ; } // set debug mode
WindowsJNIFaxClientSpi . setDebugModeNative ( debugMode ) ; |
public class IntegrationAccountsInner { /** * Logs the integration account ' s tracking events .
* @ param resourceGroupName The resource group name .
* @ param integrationAccountName The integration account name .
* @ param logTrackingEvents The callback URL parameters .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceResponse } object if successful . */
public Observable < Void > logTrackingEventsAsync ( String resourceGroupName , String integrationAccountName , TrackingEventsDefinition logTrackingEvents ) { } } | return logTrackingEventsWithServiceResponseAsync ( resourceGroupName , integrationAccountName , logTrackingEvents ) . map ( new Func1 < ServiceResponse < Void > , Void > ( ) { @ Override public Void call ( ServiceResponse < Void > response ) { return response . body ( ) ; } } ) ; |
public class Discovery { /** * List configurations .
* Lists existing configurations for the service instance .
* @ param listConfigurationsOptions the { @ link ListConfigurationsOptions } containing the options for the call
* @ return a { @ link ServiceCall } with a response type of { @ link ListConfigurationsResponse } */
public ServiceCall < ListConfigurationsResponse > listConfigurations ( ListConfigurationsOptions listConfigurationsOptions ) { } } | Validator . notNull ( listConfigurationsOptions , "listConfigurationsOptions cannot be null" ) ; String [ ] pathSegments = { "v1/environments" , "configurations" } ; String [ ] pathParameters = { listConfigurationsOptions . environmentId ( ) } ; RequestBuilder builder = RequestBuilder . get ( RequestBuilder . constructHttpUrl ( getEndPoint ( ) , pathSegments , pathParameters ) ) ; builder . query ( "version" , versionDate ) ; Map < String , String > sdkHeaders = SdkCommon . getSdkHeaders ( "discovery" , "v1" , "listConfigurations" ) ; for ( Entry < String , String > header : sdkHeaders . entrySet ( ) ) { builder . header ( header . getKey ( ) , header . getValue ( ) ) ; } builder . header ( "Accept" , "application/json" ) ; if ( listConfigurationsOptions . name ( ) != null ) { builder . query ( "name" , listConfigurationsOptions . name ( ) ) ; } return createServiceCall ( builder . build ( ) , ResponseConverterUtils . getObject ( ListConfigurationsResponse . class ) ) ; |
public class PropertyInfoRegistry { /** * Returns a FieldPropertyInfo instance for the given field . */
static synchronized FieldPropertyInfo fieldPropertyFor ( Class < ? > type , Field field , Configuration configuration , String name ) { } } | PropertyInfoKey key = new PropertyInfoKey ( type , name , configuration ) ; FieldPropertyInfo fieldPropertyInfo = FIELD_CACHE . get ( key ) ; if ( fieldPropertyInfo == null ) { fieldPropertyInfo = new FieldPropertyInfo ( type , field , name ) ; FIELD_CACHE . put ( key , fieldPropertyInfo ) ; } return fieldPropertyInfo ; |
public class DefaultGroovyMethods { /** * Selects the maximum value found from the Iterator
* using the closure to determine the correct ordering .
* The iterator will become exhausted of elements after this operation .
* If the closure has two parameters
* it is used like a traditional Comparator . I . e . it should compare
* its two parameters for order , returning a negative integer ,
* zero , or a positive integer when the first parameter is less than ,
* equal to , or greater than the second respectively . Otherwise ,
* the Closure is assumed to take a single parameter and return a
* Comparable ( typically an Integer ) which is then used for
* further comparison .
* @ param self an Iterator
* @ param closure a Closure used to determine the correct ordering
* @ return the maximum value
* @ see # max ( java . util . Collection , groovy . lang . Closure )
* @ since 1.5.5 */
public static < T > T max ( Iterator < T > self , @ ClosureParams ( FirstParam . FirstGenericType . class ) Closure closure ) { } } | return max ( ( Iterable < T > ) toList ( self ) , closure ) ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.