signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class SkinConfigTool { /** * Default accessor for config properties . Instead of using { @ code $ config . get ( " myproperty " ) } , * one can utilise Velocity fallback onto the default getter and use { @ code $ config . myproperty } . * @ param property * the property of interest * @ return configuration node if found in the following sequence : * < ol > * < li > In page configuration < / li > * < li > In global configuration < / li > * < li > { @ code null } otherwise < / li > * < / ol > * @ since 1.0 */ public Xpp3Dom get ( String property ) { } }
// first try page properties Xpp3Dom propNode = getChild ( pageProperties , property ) ; if ( propNode == null ) { // try global propNode = getChild ( globalProperties , property ) ; } return propNode ;
public class BoneCPConfig { /** * Sets the number of ms to wait before attempting to obtain a connection again after a failure . * @ param acquireRetryDelay the acquireRetryDelay to set * @ param timeUnit time granularity */ public void setAcquireRetryDelay ( long acquireRetryDelay , TimeUnit timeUnit ) { } }
this . acquireRetryDelayInMs = TimeUnit . MILLISECONDS . convert ( acquireRetryDelay , timeUnit ) ;
public class J2clClinitPrunerPass { /** * Returns the qualifed name { @ code function } is being assigned to . * < p > This implementation abstracts over the various ways of naming a function . ASSIGN need not be * involved and there may not be a sequence of GETPROPs representing the name . * < p > TODO ( b / 123354857 ) : Delete this method when naming concepts have been unified . It was created * as a temporary measure to support ES6 in this pass without becoming blocked on name APIs . */ private static String getQualifiedNameOfFunction ( Node function ) { } }
checkArgument ( function . isFunction ( ) , function ) ; // The node representing the name ( e . g . GETPROP , MEMBER _ FUNCTION _ DEF , etc . ) . Node lValue = NodeUtil . getBestLValue ( function ) ; return NodeUtil . getBestLValueName ( lValue ) ;
public class AbstractColumn { /** * Builder methods used to set the { @ link Object default value } used when a { @ link Object value } * is not specified for this { @ link Column } . * @ param < S > { @ link Class Sub - type } of this { @ link Column } . * @ param defaultValue { @ link Object default value } used when a { @ link Object value } * is not specified for this { @ link Column } . * @ return this { @ link Column } . * @ see # setDefaultValue ( Object ) */ @ SuppressWarnings ( "unchecked" ) public < S extends AbstractColumn < T > > S usingDefaultValue ( T defaultValue ) { } }
setDefaultValue ( defaultValue ) ; return ( S ) this ;
public class MetricsUtils { /** * Creates the body of a POST request encoding the metric report for a single { @ link Event } . * @ param event The event to report . * @ param analyticsId the Google Analytics ID to receive the report . * @ param random Random number generator to use for cache busting . * @ return A URL - encoded POST request body , in the format expected by Google Analytics . */ static UrlEncodedFormEntity buildPostBody ( Event event , String analyticsId , Random random ) { } }
checkNotNull ( event ) ; checkNotNull ( analyticsId ) ; checkNotNull ( random ) ; String virtualPageName = buildVirtualPageName ( event . type ( ) , event . objectType ( ) , event . name ( ) ) ; String virtualPageTitle = buildVirtualPageTitle ( event . metadata ( ) ) ; String combinedEventType = buildCombinedType ( event . type ( ) , event . objectType ( ) ) ; return new UrlEncodedFormEntity ( buildParameters ( analyticsId , event . clientId ( ) , virtualPageName , virtualPageTitle , combinedEventType , event . name ( ) , event . isUserSignedIn ( ) , event . isUserInternal ( ) , event . isUserTrialEligible ( ) , event . projectNumberHash ( ) , event . billingIdHash ( ) , event . clientHostname ( ) , random ) , StandardCharsets . UTF_8 ) ;
public class ComponentUuidFactoryImpl { /** * Get UUID from database if it exists , otherwise generate a new one . */ @ Override public String getOrCreateForKey ( String key ) { } }
return uuidsByKey . computeIfAbsent ( key , k -> Uuids . create ( ) ) ;
public class PoolInfo { /** * Returns whether or not the given pool name is legal . * Legal pool names are of nonzero length and are formed only of alphanumeric * characters , underscores ( _ ) , and hyphens ( - ) . * @ param poolInfo the name of the pool to check * @ return true if the name is a valid pool name , false otherwise */ public static boolean isLegalPoolInfo ( PoolInfo poolInfo ) { } }
if ( poolInfo == null || poolInfo . getPoolGroupName ( ) == null || poolInfo . getPoolName ( ) == null ) { return false ; } if ( INVALID_REGEX_PATTERN . matcher ( poolInfo . getPoolGroupName ( ) ) . matches ( ) || poolInfo . getPoolGroupName ( ) . isEmpty ( ) ) { return false ; } if ( INVALID_REGEX_PATTERN . matcher ( poolInfo . getPoolName ( ) ) . matches ( ) || poolInfo . getPoolName ( ) . isEmpty ( ) ) { return false ; } return true ;
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertFontDescriptorSpecificationFtUsFlagsToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class Auth { /** * Set up table from given CREATE TABLE statement under system _ auth keyspace , if not already done so . * @ param name name of the table * @ param cql CREATE TABLE statement */ public static void setupTable ( String name , String cql ) { } }
if ( Schema . instance . getCFMetaData ( AUTH_KS , name ) == null ) { try { CFStatement parsed = ( CFStatement ) QueryProcessor . parseStatement ( cql ) ; parsed . prepareKeyspace ( AUTH_KS ) ; CreateTableStatement statement = ( CreateTableStatement ) parsed . prepare ( ) . statement ; CFMetaData cfm = statement . getCFMetaData ( ) . copy ( CFMetaData . generateLegacyCfId ( AUTH_KS , name ) ) ; assert cfm . cfName . equals ( name ) ; MigrationManager . announceNewColumnFamily ( cfm ) ; } catch ( Exception e ) { throw new AssertionError ( e ) ; } }
public class TimeZoneFormat { /** * Parses the input text using the default format patterns ( e . g . " UTC { 0 } " ) . * @ param text the input text * @ param start the start index * @ param parsedLen the parsed length , or 0 on failure * @ return the parsed offset in milliseconds . */ private int parseOffsetDefaultLocalizedGMT ( String text , int start , int [ ] parsedLen ) { } }
int idx = start ; int offset = 0 ; int parsed = 0 ; do { // check global default GMT alternatives int gmtLen = 0 ; for ( String gmt : ALT_GMT_STRINGS ) { int len = gmt . length ( ) ; if ( text . regionMatches ( true , idx , gmt , 0 , len ) ) { gmtLen = len ; break ; } } if ( gmtLen == 0 ) { break ; } idx += gmtLen ; // offset needs a sign char and a digit at minimum if ( idx + 1 >= text . length ( ) ) { break ; } // parse sign int sign = 1 ; char c = text . charAt ( idx ) ; if ( c == '+' ) { sign = 1 ; } else if ( c == '-' ) { sign = - 1 ; } else { break ; } idx ++ ; // offset part // try the default pattern with the separator first int [ ] lenWithSep = { 0 } ; int offsetWithSep = parseDefaultOffsetFields ( text , idx , DEFAULT_GMT_OFFSET_SEP , lenWithSep ) ; if ( lenWithSep [ 0 ] == text . length ( ) - idx ) { // maximum match offset = offsetWithSep * sign ; idx += lenWithSep [ 0 ] ; } else { // try abutting field pattern int [ ] lenAbut = { 0 } ; int offsetAbut = parseAbuttingOffsetFields ( text , idx , lenAbut ) ; if ( lenWithSep [ 0 ] > lenAbut [ 0 ] ) { offset = offsetWithSep * sign ; idx += lenWithSep [ 0 ] ; } else { offset = offsetAbut * sign ; idx += lenAbut [ 0 ] ; } } parsed = idx - start ; } while ( false ) ; parsedLen [ 0 ] = parsed ; return offset ;
public class SocketFactoryHelper { /** * Attempt a socket bind to the input address with the given re - use option * flag . * @ param address * @ param reuseflag * @ throws IOException */ private void attemptSocketBind ( ServerSocket serverSocket , SocketAddress address , boolean reuseflag , int backlog ) throws IOException { } }
serverSocket . setReuseAddress ( reuseflag ) ; serverSocket . bind ( address , backlog ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "ServerSocket bind worked, reuse=" + serverSocket . getReuseAddress ( ) ) ; }
public class TableFactor { /** * Super basic in - place array normalization * @ param arr the array to normalize */ private void normalizeLogArr ( double [ ] arr ) { } }
// Find the log - scale normalization value double max = Double . NEGATIVE_INFINITY ; for ( double d : arr ) { if ( d > max ) max = d ; } double expSum = 0.0 ; for ( double d : arr ) { expSum += Math . exp ( d - max ) ; } double logSumExp = max + Math . log ( expSum ) ; if ( Double . isInfinite ( logSumExp ) ) { // Just put in uniform probabilities if we are normalizing all 0s for ( int i = 0 ; i < arr . length ; i ++ ) { arr [ i ] = 1.0 / arr . length ; } } else { // Normalize in log - scale before exponentiation , to help with stability for ( int i = 0 ; i < arr . length ; i ++ ) { arr [ i ] = Math . exp ( arr [ i ] - logSumExp ) ; } }
public class LogPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getNewObjectIDMUploaded ( ) { } }
if ( newObjectIDMUploadedEClass == null ) { newObjectIDMUploadedEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( LogPackage . eNS_URI ) . getEClassifiers ( ) . get ( 14 ) ; } return newObjectIDMUploadedEClass ;
public class SelenideProxyServer { /** * Start the server * It automatically adds one response filter " download " that can intercept downloaded files . */ public void start ( ) { } }
proxy . setTrustAllServers ( true ) ; if ( outsideProxy != null ) { proxy . setChainedProxy ( getProxyAddress ( outsideProxy ) ) ; } addRequestFilter ( "authentication" , new AuthenticationFilter ( ) ) ; addRequestFilter ( "requestSizeWatchdog" , new RequestSizeWatchdog ( ) ) ; addResponseFilter ( "responseSizeWatchdog" , new ResponseSizeWatchdog ( ) ) ; addResponseFilter ( "download" , new FileDownloadFilter ( config ) ) ; proxy . start ( config . proxyPort ( ) ) ; port = proxy . getPort ( ) ;
public class SQLParser { /** * Parse a date string . We parse the documented forms , which are : * < ul > * < li > YYYY - MM - DD < / li > * < li > YYYY - MM - DD HH : MM : SS < / li > * < li > YYYY - MM - DD HH : MM : SS . SSSSS < / li > * < / ul > * As it turns out , TimestampType takes string parameters in just this * format . So , we defer to TimestampType , and return what it * constructs . This has microsecond granularity . * @ param dateIn input date string * @ return TimestampType object * @ throws SQLParser . Exception */ public static TimestampType parseDate ( String dateIn ) { } }
// Remove any quotes around the timestamp value . ENG - 2623 String dateRepled = dateIn . replaceAll ( "^\"|\"$" , "" ) . replaceAll ( "^'|'$" , "" ) ; return new TimestampType ( dateRepled ) ;
public class FT12Connection { /** * Attempts to gets the available serial communication ports on the host . * At first , the Java system property " microedition . commports " is queried . If there is * no property with that key , and Calimero itself has access to serial ports , * the lowest 10 ports numbers are enumerated and checked if present . < br > * The empty array is returned if no ports are discovered . * @ return array of strings with found port IDs */ public static String [ ] getPortIdentifiers ( ) { } }
String ports = null ; try { ports = System . getProperty ( "microedition.commports" ) ; } catch ( final SecurityException e ) { } if ( ports != null ) { final StringTokenizer st = new StringTokenizer ( ports , "," ) ; final String [ ] portIDs = new String [ st . countTokens ( ) ] ; for ( int i = 0 ; i < portIDs . length ; ++ i ) portIDs [ i ] = st . nextToken ( ) ; return portIDs ; } if ( SerialCom . isLoaded ( ) ) { final String prefix = defaultPortPrefix ( ) ; final List l = new ArrayList ( 10 ) ; for ( int i = 0 ; i < 10 ; ++ i ) if ( SerialCom . portExists ( prefix + i ) ) l . add ( prefix + i ) ; return ( String [ ] ) l . toArray ( new String [ l . size ( ) ] ) ; } // skip other possible adapters for now , and return empty list . . . return new String [ 0 ] ;
public class CommerceWarehousePersistenceImpl { /** * Clears the cache for all commerce warehouses . * The { @ link EntityCache } and { @ link FinderCache } are both cleared by this method . */ @ Override public void clearCache ( ) { } }
entityCache . clearCache ( CommerceWarehouseImpl . class ) ; finderCache . clearCache ( FINDER_CLASS_NAME_ENTITY ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITH_PAGINATION ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION ) ;
public class JSONCompare { /** * Compares JSON string provided to the expected JSON string using provided comparator , and returns the results of * the comparison . * @ param expectedStr Expected JSON string * @ param actualStr JSON string to compare * @ param comparator Comparator to use * @ return result of the comparison * @ throws JSONException JSON parsing error * @ throws IllegalArgumentException when type of expectedStr doesn ' t match the type of actualStr */ public static JSONCompareResult compareJSON ( String expectedStr , String actualStr , JSONComparator comparator ) throws JSONException { } }
Object expected = JSONParser . parseJSON ( expectedStr ) ; Object actual = JSONParser . parseJSON ( actualStr ) ; if ( ( expected instanceof JSONObject ) && ( actual instanceof JSONObject ) ) { return compareJSON ( ( JSONObject ) expected , ( JSONObject ) actual , comparator ) ; } else if ( ( expected instanceof JSONArray ) && ( actual instanceof JSONArray ) ) { return compareJSON ( ( JSONArray ) expected , ( JSONArray ) actual , comparator ) ; } else if ( expected instanceof JSONString && actual instanceof JSONString ) { return compareJson ( ( JSONString ) expected , ( JSONString ) actual ) ; } else if ( expected instanceof JSONObject ) { return new JSONCompareResult ( ) . fail ( "" , expected , actual ) ; } else { return new JSONCompareResult ( ) . fail ( "" , expected , actual ) ; }
public class DictionariesNERTagger { /** * { @ link Dictionaries } based Named Entity Detection and Classification . * @ param tokens * the tokenized sentence * @ return a list of detected { @ link SequenceLabel } objects */ public final List < SequenceLabel > getNames ( final String [ ] tokens ) { } }
final Span [ ] origSpans = nercToSpans ( tokens ) ; final Span [ ] neSpans = SequenceLabelerME . dropOverlappingSpans ( origSpans ) ; final List < SequenceLabel > names = getNamesFromSpans ( neSpans , tokens ) ; return names ;
public class PagerDuty { /** * Create a new instance using the specified API key . */ public static PagerDuty create ( String apiKey ) { } }
Retrofit retrofit = new Retrofit . Builder ( ) . baseUrl ( HOST ) . addConverterFactory ( GsonConverterFactory . create ( ) ) . build ( ) ; return create ( apiKey , retrofit ) ;
public class JobClientManager { /** * 得到 可用的 客户端节点 */ public JobClientNode getAvailableJobClient ( String nodeGroup ) { } }
Set < JobClientNode > jobClientNodes = NODE_MAP . get ( nodeGroup ) ; if ( CollectionUtils . isEmpty ( jobClientNodes ) ) { return null ; } List < JobClientNode > list = new ArrayList < JobClientNode > ( jobClientNodes ) ; while ( list . size ( ) > 0 ) { JobClientNode jobClientNode = loadBalance . select ( list , null ) ; if ( jobClientNode != null && ( jobClientNode . getChannel ( ) == null || jobClientNode . getChannel ( ) . isClosed ( ) ) ) { ChannelWrapper channel = appContext . getChannelManager ( ) . getChannel ( jobClientNode . getNodeGroup ( ) , NodeType . JOB_CLIENT , jobClientNode . getIdentity ( ) ) ; if ( channel != null ) { // 更新channel jobClientNode . setChannel ( channel ) ; } } if ( jobClientNode != null && jobClientNode . getChannel ( ) != null && ! jobClientNode . getChannel ( ) . isClosed ( ) ) { return jobClientNode ; } else { list . remove ( jobClientNode ) ; } } return null ;
public class DefaultConfigMethod { /** * Returns the configuration options for this configuration method . * @ return array of configuration options * @ throws IllegalAccessException - Re - thrown , from invoking the configuration method via reflection * @ throws InvocationTargetException - Re - thrown , from invoking the configuration method via reflection * @ throws InstantiationException - Re - thrown , from invoking the configuration method via reflection */ public Option [ ] getOptions ( ) throws IllegalAccessException , InvocationTargetException , InstantiationException { } }
if ( options == null ) { List < Option > options = new ArrayList < Option > ( ) ; Configuration config = getMethod ( ) . getAnnotation ( Configuration . class ) ; for ( Class < ? extends CompositeOption > option : config . extend ( ) ) { options . addAll ( Arrays . asList ( option . newInstance ( ) . getOptions ( ) ) ) ; } options . addAll ( Arrays . asList ( ( Option [ ] ) getMethod ( ) . invoke ( configInstance ) ) ) ; this . options = options . toArray ( new Option [ options . size ( ) ] ) ; } return options ;
public class ConfigProto { /** * < pre > * Map from device type name ( e . g . , " CPU " or " GPU " ) to maximum * number of devices of that type to use . If a particular device * type is not found in the map , the system picks an appropriate * number . * < / pre > * < code > map & lt ; string , int32 & gt ; device _ count = 1 ; < / code > */ public java . util . Map < java . lang . String , java . lang . Integer > getDeviceCountMap ( ) { } }
return internalGetDeviceCount ( ) . getMap ( ) ;
public class TimelineModel { /** * Merge the given one event with the given collection of events with UI update . Only events within one group can be merged . * Note : after merging , the merged event will get the same properties as the given one event except start and end dates . * @ param event given event to be merged with collection of events * @ param events collection of events * @ param timelineUpdater TimelineUpdater instance to update the merged events in UI * @ return TimelineEvent result event after merging * @ throws IllegalStateException thrown if not all events are within the same group */ public TimelineEvent merge ( TimelineEvent event , Collection < TimelineEvent > events , TimelineUpdater timelineUpdater ) { } }
if ( event == null ) { // nothing to merge return null ; } if ( events == null || events . isEmpty ( ) ) { // nothing to merge return event ; } // check whether all events within the same group String group = event . getGroup ( ) ; for ( TimelineEvent e : events ) { if ( ( group == null && e . getGroup ( ) != null ) || ( group != null && ! group . equals ( e . getGroup ( ) ) ) ) { throw new IllegalStateException ( "Events to be merged may be only belong to one and the same group!" ) ; } } // order events according to their start / end dates TreeSet < TimelineEvent > orderedEvents = new TreeSet < TimelineEvent > ( new TimelineEventComparator ( ) ) ; orderedEvents . add ( event ) ; orderedEvents . addAll ( events ) ; // find the largest end date Date endDate = null ; for ( TimelineEvent e : orderedEvents ) { if ( endDate == null && e . getEndDate ( ) != null ) { endDate = e . getEndDate ( ) ; } else if ( endDate != null && e . getEndDate ( ) != null && endDate . before ( e . getEndDate ( ) ) ) { endDate = e . getEndDate ( ) ; } } TimelineEvent mergedEvent = new TimelineEvent ( event . getData ( ) , orderedEvents . first ( ) . getStartDate ( ) , endDate , event . isEditable ( ) , event . getGroup ( ) , event . getStyleClass ( ) ) ; // merge . . . deleteAll ( events , timelineUpdater ) ; update ( mergedEvent , timelineUpdater ) ; return mergedEvent ;
public class QueryParser { /** * : not ( selector ) */ private void not ( ) { } }
tq . consume ( ":not" ) ; String subQuery = tq . chompBalanced ( '(' , ')' ) ; Validate . notEmpty ( subQuery , ":not(selector) subselect must not be empty" ) ; evals . add ( new StructuralEvaluator . Not ( parse ( subQuery ) ) ) ;
public class ConfigAccessorImpl { /** * must be called within synchronized ( this ) block */ private boolean checkCache ( ) { } }
if ( this . cacheValid ) { long now = System . nanoTime ( ) ; if ( ( now - this . cacheExpiryTime ) > 0 ) { this . cacheValid = false ; } } return this . cacheValid ;
public class MacOsWatchServiceFactory { /** * The default { @ link WatchService } . * @ return The watch service to use . * @ throws IOException if an error occurs creating the watch service */ @ Bean ( preDestroy = "close" ) @ Prototype @ Requires ( classes = { } }
MacOSXListeningWatchService . class , Library . class } ) @ Requires ( property = FileWatchConfiguration . ENABLED , value = StringUtils . TRUE , defaultValue = StringUtils . TRUE ) @ Requires ( property = FileWatchConfiguration . PATHS ) @ Primary protected WatchService macWatchService ( ) throws IOException { try { return new MacOSXListeningWatchService ( ) ; } catch ( Exception e ) { if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( "Unable to create Mac OS X specific watch service. Falling back to default polling strategy: " + e . getMessage ( ) , e ) ; } return new WatchServiceFactory ( ) . watchService ( ) ; }
public class CorsFilters { /** * Apply CORS filter on request * @ param requestContext request context * @ param predicate must return { @ code true } if the input origin is allowed , else { @ code false } . */ public static void filterRequest ( ContainerRequestContext requestContext , Predicate < String > predicate ) { } }
// NOT a CORS request String requestOrigin = requestContext . getHeaderString ( Headers . ORIGIN ) ; if ( requestOrigin == null ) { return ; } if ( ! predicate . test ( requestOrigin ) ) { requestContext . abortWith ( Response . status ( Response . Status . BAD_REQUEST ) . build ( ) ) ; return ; } // It is a CORS pre - flight request , there is no route for it , just return 200 if ( requestContext . getMethod ( ) . equalsIgnoreCase ( HttpMethod . OPTIONS ) ) { requestContext . abortWith ( Response . status ( Response . Status . OK ) . build ( ) ) ; }
public class ServicesInner { /** * Stop service . * The services resource is the top - level resource that represents the Data Migration Service . This action stops the service and the service cannot be used for data migration . The service owner won ' t be billed when the service is stopped . * @ param groupName Name of the resource group * @ param serviceName Name of the service * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceResponse } object if successful . */ public Observable < Void > beginStopAsync ( String groupName , String serviceName ) { } }
return beginStopWithServiceResponseAsync ( groupName , serviceName ) . map ( new Func1 < ServiceResponse < Void > , Void > ( ) { @ Override public Void call ( ServiceResponse < Void > response ) { return response . body ( ) ; } } ) ;
public class CmsContextMenu { /** * Creates the context menu . < p > * @ param entries a list with all entries for the context menu */ private void createContextMenu ( List < I_CmsContextMenuEntry > entries ) { } }
Iterator < I_CmsContextMenuEntry > it = entries . iterator ( ) ; while ( it . hasNext ( ) ) { I_CmsContextMenuEntry entry = it . next ( ) ; if ( ! entry . isVisible ( ) ) { continue ; } if ( entry . isSeparator ( ) ) { addSeparator ( ) ; } else { A_CmsContextMenuItem item = entry . generateMenuItem ( ) ; if ( entry . hasSubMenu ( ) ) { CmsContextMenu submenu = new CmsContextMenu ( entry . getSubMenu ( ) , m_isFixed , m_popup ) ; item . setSubMenu ( submenu ) ; addItem ( item ) ; } else { addItem ( item ) ; } } }
public class BackupClientImpl { /** * getState . * @ param state * value of state * @ return String sate */ private String getRepositoryBackupToFullState ( int state ) { } }
String st = "" ; switch ( state ) { case RepositoryBackupChain . FINISHED : st = "finished" ; break ; case RepositoryBackupChain . WORKING : st = "working" ; break ; case RepositoryBackupChain . INITIALIZED : st = "initialized" ; break ; case RepositoryBackupChain . FULL_BACKUP_FINISHED_INCREMENTAL_BACKUP_WORKING : st = "finished" ; break ; default : break ; } return st ;
public class CertStore { /** * Returns the default { @ code CertStore } type as specified by the * { @ code certstore . type } security property , or the string * { @ literal " LDAP " } if no such property exists . * < p > The default { @ code CertStore } type can be used by applications * that do not want to use a hard - coded type when calling one of the * { @ code getInstance } methods , and want to provide a default * { @ code CertStore } type in case a user does not specify its own . * < p > The default { @ code CertStore } type can be changed by setting * the value of the { @ code certstore . type } security property to the * desired type . * @ see java . security . Security security properties * @ return the default { @ code CertStore } type as specified by the * { @ code certstore . type } security property , or the string * { @ literal " LDAP " } if no such property exists . */ public final static String getDefaultType ( ) { } }
String cstype ; cstype = AccessController . doPrivileged ( new PrivilegedAction < String > ( ) { public String run ( ) { return Security . getProperty ( CERTSTORE_TYPE ) ; } } ) ; if ( cstype == null ) { cstype = "LDAP" ; } return cstype ;
public class PinView { /** * Save current attributes in { @ link PinView # mPinViewSettings } */ private void saveSettings ( ) { } }
mPinViewSettings = new PinViewSettings . Builder ( ) . withColorSplit ( mColorSplit ) . withColorTextPinBox ( mColorTextPinBoxes ) . withColorTextTitles ( mColorTextTitles ) . withCustomDrawablePinBox ( mCustomDrawablePinBox ) . withDeleteOnClick ( isDeleteOnClick ( ) ) . withNativePinBox ( isNativePinBox ( ) ) . withSplit ( getSplit ( ) ) . withMaskPassword ( isMaskPassword ( ) ) . withKeyboardMandatory ( isKeyboardMandatory ( ) ) . withNumberCharacters ( getNumberCharacters ( ) ) . withSizeSplit ( getSizeSplit ( ) ) . withTextSizePinBox ( getTextSizePinBoxes ( ) ) . withTextSizeTitles ( getTextSizeTitles ( ) ) . withNumberPinBoxes ( getNumberPinBoxes ( ) ) . withPinTitles ( getPinTitles ( ) ) . build ( ) ;
public class PolicyConstraintsExtension { /** * Return an enumeration of names of attributes existing within this * attribute . */ public Enumeration < String > getElements ( ) { } }
AttributeNameEnumeration elements = new AttributeNameEnumeration ( ) ; elements . addElement ( REQUIRE ) ; elements . addElement ( INHIBIT ) ; return ( elements . elements ( ) ) ;
public class Symmetry454Chronology { /** * Obtains a local date in Symmetry454 calendar system from the * era , year - of - era and day - of - year fields . * @ param era the Symmetry454 era , not null * @ param yearOfEra the year - of - era * @ param dayOfYear the day - of - year * @ return the Symmetry454 local date , not null * @ throws DateTimeException if unable to create the date * @ throws ClassCastException if the { @ code era } is not a { @ code IsoEra } */ @ Override public Symmetry454Date dateYearDay ( Era era , int yearOfEra , int dayOfYear ) { } }
return dateYearDay ( prolepticYear ( era , yearOfEra ) , dayOfYear ) ;
public class ArchiveTransferManager { /** * Calculates the part size to use when uploading an archive of the * specified size using Glacier ' s multipart upload APIs . Because of the tree * hashing algorithm , part sizes must be aligned on 2 ^ n MB boundaries ( ex : * 1MB , 2MB , 4MB , 8MB , etc ) . All parts must be the same size , except for the * last part . * @ param fileSize * The size of the file being uploaded . * @ return The part size to use in the multipart upload . */ private long calculatePartSize ( long fileSize ) { } }
long partSize = MINIMUM_PART_SIZE ; int approxNumParts = 1 ; while ( partSize * approxNumParts < fileSize && partSize * 2 <= MAXIMUM_UPLOAD_PART_SIZE ) { partSize *= 2 ; approxNumParts *= 2 ; } return partSize ;
public class AbstractAmazonDynamoDBAsync { /** * Simplified method form for invoking the UpdateTable operation with an AsyncHandler . * @ see # updateTableAsync ( UpdateTableRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < UpdateTableResult > updateTableAsync ( String tableName , ProvisionedThroughput provisionedThroughput , com . amazonaws . handlers . AsyncHandler < UpdateTableRequest , UpdateTableResult > asyncHandler ) { } }
return updateTableAsync ( new UpdateTableRequest ( ) . withTableName ( tableName ) . withProvisionedThroughput ( provisionedThroughput ) , asyncHandler ) ;
public class SocketStream { /** * Reads bytes from the socket . * @ param buf byte buffer receiving the bytes * @ param offset offset into the buffer * @ param length number of bytes to read * @ return number of bytes read or - 1 * @ exception throws ClientDisconnectException if the connection is dropped */ @ Override public int read ( byte [ ] buf , int offset , int length ) throws IOException { } }
try { if ( _is == null ) { if ( _s == null ) { return - 1 ; } _is = _s . getInputStream ( ) ; } int readLength = _is . read ( buf , offset , length ) ; if ( readLength >= 0 ) { _totalReadBytes += readLength ; } return readLength ; } catch ( InterruptedIOException e ) { e . printStackTrace ( ) ; if ( _throwReadInterrupts ) throw e ; log . log ( Level . FINEST , e . toString ( ) , e ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; if ( _throwReadInterrupts ) { throw e ; } if ( log . isLoggable ( Level . FINEST ) ) { log . log ( Level . FINEST , e . toString ( ) , e ) ; } else { log . finer ( e . toString ( ) ) ; } // server / 0611 /* try { close ( ) ; } catch ( IOException e1 ) { */ } return - 1 ;
public class QueryBuilder { /** * Provides a type hint for an expression , as in { @ code WHERE k = ( double ) 1/3 } . * < p > To create the data type , use the constants and static methods in { @ link DataTypes } , or * { @ link # udt ( CqlIdentifier ) } . */ @ NonNull public static Term typeHint ( @ NonNull Term term , @ NonNull DataType targetType ) { } }
return new TypeHintTerm ( term , targetType ) ;
public class CmsImagePreviewHandler { /** * Returns image tag attributes to set for editor plugins . < p > * @ param callback the callback to execute */ public void getImageAttributes ( I_CmsSimpleCallback < Map < String , String > > callback ) { } }
Map < String , String > result = new HashMap < String , String > ( ) ; result . put ( Attribute . hash . name ( ) , String . valueOf ( getImageIdHash ( ) ) ) ; m_formatHandler . getImageAttributes ( result ) ; m_previewDialog . getImageAttributes ( result , callback ) ;
public class AllChemCompProvider { /** * Load all { @ link ChemComp } definitions into memory . */ private void loadAllChemComps ( ) throws IOException { } }
String fileName = getLocalFileName ( ) ; logger . debug ( "Loading " + fileName ) ; InputStreamProvider isp = new InputStreamProvider ( ) ; InputStream inStream = isp . getInputStream ( fileName ) ; MMcifParser parser = new SimpleMMcifParser ( ) ; ChemCompConsumer consumer = new ChemCompConsumer ( ) ; // The Consumer builds up the BioJava - structure object . // you could also hook in your own and build up you own data model . parser . addMMcifConsumer ( consumer ) ; parser . parse ( new BufferedReader ( new InputStreamReader ( inStream ) ) ) ; dict = consumer . getDictionary ( ) ; inStream . close ( ) ;
public class FactoryBriefDefinition { /** * Creates a descriptor by randomly selecting points inside a square region using a Gaussian distribution * with a sigma of ( 5/2 ) * radius . This is done exactly as is described in the paper where twice * as many points are sampled as are compared . . * @ param rand Random number generator . * @ param radius Radius of the square region . width = 2 * radius + 1. * @ param numPairs Number of sample point pairs . * @ return Definition of a BRIEF feature . */ public static BinaryCompareDefinition_I32 gaussian ( Random rand , int radius , int numPairs ) { } }
BinaryCompareDefinition_I32 ret = new BinaryCompareDefinition_I32 ( radius , numPairs * 2 , numPairs ) ; double sigma = ( 2.0 * radius + 1.0 ) / 5.0 ; for ( int i = 0 ; i < numPairs ; i ++ ) { randomGaussian ( rand , sigma , radius , ret . samplePoints [ i ] ) ; randomGaussian ( rand , sigma , radius , ret . samplePoints [ i + numPairs ] ) ; ret . compare [ i ] . set ( i , i + numPairs ) ; } return ret ;
public class JacksonDBCollection { /** * Performs an update operation . * @ param query search query for old object to update * @ param object object with which to update < tt > query < / tt > * @ param upsert if the database should create the element if it does not exist * @ param multi if the update should be applied to all objects matching ( db version 1.1.3 and above ) . An object will * not be inserted if it does not exist in the collection and upsert = true and multi = true . * See < a href = " http : / / www . mongodb . org / display / DOCS / Atomic + Operations " > http : / / www . mongodb . org / display / DOCS / Atomic + Operations < / a > * @ param concern the write concern * @ return The write result * @ throws MongoException If an error occurred */ public WriteResult < T , K > update ( T query , T object , boolean upsert , boolean multi , WriteConcern concern ) throws MongoException { } }
return update ( convertToBasicDbObject ( query ) , convertToBasicDbObject ( object ) , upsert , multi , concern ) ;
public class PropertiesUtil { /** * Loads properties from the file identified by the given { @ code fileString } , * which can be a regular file ( path ) , a classpath resource or a URL . * @ param fileString identifies the file * @ return the properties * @ throws IOException on errors reading the file / URL */ public static Properties loadProperties ( final String fileString ) throws IOException { } }
final File file = new File ( fileString ) ; // first try : regular file if ( file . exists ( ) ) { return loadProperties ( file ) ; } // second try : classpath resource final InputStream resourceStream = Thread . currentThread ( ) . getContextClassLoader ( ) . getResourceAsStream ( fileString ) ; if ( resourceStream != null ) { return loadProperties ( resourceStream ) ; } // third / last try : URL try { final URL url = new URL ( fileString ) ; return loadProperties ( url . openStream ( ) ) ; } catch ( final MalformedURLException e ) { // there is no guarantee that fileString is a URL at all } throw new IllegalArgumentException ( "Unable to find " + fileString ) ;
public class GVRSceneObject { /** * Visits all the components of the specified type attached to * the descendants of this scene object . * The ComponentVisitor . visit function is called for every * eligible component of each descendant until it returns false . * This allows you to traverse the scene graph safely without copying it . * This method gives much better performance than iterating * children ( ) or getChildren ( ) . * @ param visitor ComponentVisitor interface implementing " visit " function * @ param componentType type of component to find * @ see # children ( ) * @ see # getChildren ( ) * @ see SceneVisitor * @ see # forAllDescendants ( SceneVisitor ) * @ see # getComponent ( long ) */ public void forAllComponents ( ComponentVisitor visitor , long componentType ) { } }
synchronized ( mComponents ) { GVRComponent comp = getComponent ( componentType ) ; if ( ( comp != null ) && ! visitor . visit ( comp ) ) { return ; } } synchronized ( mChildren ) { for ( int i = 0 ; i < mChildren . size ( ) ; ++ i ) { GVRSceneObject child = mChildren . get ( i ) ; child . forAllComponents ( visitor , componentType ) ; } }
public class PaymentActivity { /** * Cleaning up all Rx subscriptions in onDestroy . */ @ Override protected void onDestroy ( ) { } }
mCompositeDisposable . dispose ( ) ; LocalBroadcastManager . getInstance ( this ) . unregisterReceiver ( mBroadcastReceiver ) ; mPaymentSession . onDestroy ( ) ; super . onDestroy ( ) ;
public class GrailsASTUtils { /** * Returns true if the given class name is a parent class of the given class * @ param classNode The class node * @ param parentClassName the parent class name * @ return True if it is a subclass */ public static boolean isSubclassOf ( ClassNode classNode , String parentClassName ) { } }
ClassNode currentSuper = classNode . getSuperClass ( ) ; while ( currentSuper != null && ! currentSuper . getName ( ) . equals ( OBJECT_CLASS ) ) { if ( currentSuper . getName ( ) . equals ( parentClassName ) ) return true ; currentSuper = currentSuper . getSuperClass ( ) ; } return false ;
public class TextChunk { /** * Splits a TextChunk in two , at the position of the i - th TextElement */ public TextChunk [ ] splitAt ( int i ) { } }
if ( i < 1 || i >= this . getTextElements ( ) . size ( ) ) { throw new IllegalArgumentException ( ) ; } TextChunk [ ] rv = new TextChunk [ ] { new TextChunk ( this . getTextElements ( ) . subList ( 0 , i ) ) , new TextChunk ( this . getTextElements ( ) . subList ( i , this . getTextElements ( ) . size ( ) ) ) } ; return rv ;
public class CommerceCurrencyLocalServiceBaseImpl { /** * Adds the commerce currency to the database . Also notifies the appropriate model listeners . * @ param commerceCurrency the commerce currency * @ return the commerce currency that was added */ @ Indexable ( type = IndexableType . REINDEX ) @ Override public CommerceCurrency addCommerceCurrency ( CommerceCurrency commerceCurrency ) { } }
commerceCurrency . setNew ( true ) ; return commerceCurrencyPersistence . update ( commerceCurrency ) ;
public class ProviderResourceInfo { /** * put anything here , such as J2EEName * @ param key * @ param obj */ public void putCustomizedProperty ( String key , Object obj ) { } }
if ( key == null || obj == null ) return ; customizedProperties . put ( key , obj ) ;
public class AttributeMarshaller { /** * Sorts attributes so that xsd : attribute ones come first */ private static Set < AttributeDefinition > sortAttributes ( AttributeDefinition [ ] attributes ) { } }
Set < AttributeDefinition > sortedAttrs = new LinkedHashSet < > ( attributes . length ) ; List < AttributeDefinition > elementAds = null ; for ( AttributeDefinition ad : attributes ) { if ( ad . getParser ( ) . isParseAsElement ( ) ) { if ( elementAds == null ) { elementAds = new ArrayList < > ( ) ; } elementAds . add ( ad ) ; } else { sortedAttrs . add ( ad ) ; } } if ( elementAds != null ) { sortedAttrs . addAll ( elementAds ) ; } return sortedAttrs ;
public class GCLINERGImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case AfplibPackage . GCLINERG__XPOS : return XPOS_EDEFAULT == null ? xpos != null : ! XPOS_EDEFAULT . equals ( xpos ) ; case AfplibPackage . GCLINERG__YPOS : return YPOS_EDEFAULT == null ? ypos != null : ! YPOS_EDEFAULT . equals ( ypos ) ; } return super . eIsSet ( featureID ) ;
public class FileStandardInfo { /** * { @ inheritDoc } * @ see jcifs . Encodable # encode ( byte [ ] , int ) */ @ Override public int encode ( byte [ ] dst , int dstIndex ) { } }
int start = dstIndex ; SMBUtil . writeInt8 ( this . allocationSize , dst , dstIndex ) ; dstIndex += 8 ; SMBUtil . writeInt8 ( this . endOfFile , dst , dstIndex ) ; dstIndex += 8 ; SMBUtil . writeInt4 ( this . numberOfLinks , dst , dstIndex ) ; dstIndex += 4 ; dst [ dstIndex ++ ] = ( byte ) ( this . deletePending ? 1 : 0 ) ; dst [ dstIndex ++ ] = ( byte ) ( this . directory ? 1 : 0 ) ; return dstIndex - start ;
public class ImageRetinaApiImpl { /** * { @ inheritDoc } */ @ Override public ByteArrayInputStream compare ( ImagePlotShape shape , ImageEncoding imageEncoding , Model model1 , Model model2 ) throws JsonProcessingException , ApiException { } }
return compare ( null , shape , imageEncoding , model1 , model2 ) ;
public class StringUtil { /** * returns true if the input string has letters and they are all UPPERCASE * @ param str * @ return */ public static boolean isAllUpperCase ( String str ) { } }
if ( str == null ) return false ; boolean hasLetters = false ; char c ; for ( int i = str . length ( ) - 1 ; i >= 0 ; i -- ) { c = str . charAt ( i ) ; if ( Character . isLetter ( c ) ) { if ( ! Character . isUpperCase ( c ) ) return false ; hasLetters = true ; } } return hasLetters ;
public class MathUtils { /** * Checks whether two { @ link RectF } have the same aspect ratio . * @ param r1 the first rect . * @ param r2 the second rect . * @ return { @ code true } if both rectangles have the same aspect ratio , * { @ code false } otherwise . */ protected static boolean haveSameAspectRatio ( RectF r1 , RectF r2 ) { } }
// Reduces precision to avoid problems when comparing aspect ratios . float srcRectRatio = MathUtils . truncate ( MathUtils . getRectRatio ( r1 ) , 3 ) ; float dstRectRatio = MathUtils . truncate ( MathUtils . getRectRatio ( r2 ) , 3 ) ; // Compares aspect ratios that allows for a tolerance range of [ 0 , 0.01] return ( Math . abs ( srcRectRatio - dstRectRatio ) <= 0.01f ) ;
public class LogWriter { /** * Sets the log format for writing process traces ( e . g . MXML ) . * @ param logFormat The log format to set . * @ throws PerspectiveException if the log format does not support the writers ' log perspective . * @ throws CompatibilityException if the charset of the log writer is not supported by the log format . */ private void setLogFormat ( AbstractLogFormat logFormat ) throws PerspectiveException , CompatibilityException { } }
if ( ! logFormat . supportsCharset ( charset ) ) throw new CompatibilityException ( "Log format \"" + logFormat . getName ( ) + "\" does not support charset \"" + charset . name ( ) + "\"" ) ; if ( ! logFormat . supportsLogPerspective ( logPerspective ) ) throw new PerspectiveException ( PerspectiveError . INCOMPATIBLE_LOGFORMAT ) ; this . logFormat = logFormat ;
public class DeletionTaskFailureReasonType { /** * A list of objects that contains details about the service - linked role deletion failure , if that information is * returned by the service . If the service - linked role has active sessions or if any resources that were used by the * role have not been deleted from the linked service , the role can ' t be deleted . This parameter includes a list of * the resources that are associated with the role and the region in which the resources are being used . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setRoleUsageList ( java . util . Collection ) } or { @ link # withRoleUsageList ( java . util . Collection ) } if you want * to override the existing values . * @ param roleUsageList * A list of objects that contains details about the service - linked role deletion failure , if that * information is returned by the service . If the service - linked role has active sessions or if any resources * that were used by the role have not been deleted from the linked service , the role can ' t be deleted . This * parameter includes a list of the resources that are associated with the role and the region in which the * resources are being used . * @ return Returns a reference to this object so that method calls can be chained together . */ public DeletionTaskFailureReasonType withRoleUsageList ( RoleUsageType ... roleUsageList ) { } }
if ( this . roleUsageList == null ) { setRoleUsageList ( new com . amazonaws . internal . SdkInternalList < RoleUsageType > ( roleUsageList . length ) ) ; } for ( RoleUsageType ele : roleUsageList ) { this . roleUsageList . add ( ele ) ; } return this ;
public class TransitionInflater { /** * Loads a { @ link TransitionManager } object from a resource * @ param resource The resource id of the transition manager to load * @ return The loaded TransitionManager object * @ throws android . content . res . Resources . NotFoundException when the * transition manager cannot be loaded */ @ Nullable public TransitionManager inflateTransitionManager ( int resource , @ NonNull ViewGroup sceneRoot ) { } }
XmlResourceParser parser = mContext . getResources ( ) . getXml ( resource ) ; try { return createTransitionManagerFromXml ( parser , Xml . asAttributeSet ( parser ) , sceneRoot ) ; } catch ( XmlPullParserException e ) { InflateException ex = new InflateException ( e . getMessage ( ) ) ; ex . initCause ( e ) ; throw ex ; } catch ( IOException e ) { InflateException ex = new InflateException ( parser . getPositionDescription ( ) + ": " + e . getMessage ( ) ) ; ex . initCause ( e ) ; throw ex ; } finally { parser . close ( ) ; }
public class ResourceLoader { /** * Returns an instance of inputStream if resource exists , null otherwise . * @ param resource resource to load * @ return an inputStream of resource if exists , null otherwise * @ throws MalformedURLException if url is malformed * @ throws IOException if is impossible to open the stream */ public static InputStream loadResource ( String resource ) throws MalformedURLException , IOException { } }
String content = resource . trim ( ) ; // if resource is a content and not a path if ( ! isPath ( content ) ) return new ByteArrayInputStream ( content . getBytes ( "UTF-8" ) ) ; URL url = Thread . currentThread ( ) . getContextClassLoader ( ) . getResource ( content ) ; // Could not find resource . Try with the classloader that loaded this class . if ( isNull ( url ) ) { ClassLoader classLoader = ResourceLoader . class . getClassLoader ( ) ; if ( classLoader != null ) url = classLoader . getResource ( content ) ; } // Last ditch attempt searching classpath if ( isNull ( url ) ) url = ClassLoader . getSystemResource ( content ) ; // one more time if ( isNull ( url ) && content . contains ( ":" ) ) url = new URL ( content ) ; if ( isNull ( url ) ) Error . xmlNotFound ( content ) ; return url . openStream ( ) ;
public class AttributeListImpl { public void addNamespaceDeclaration ( String uri , String pfx ) { } }
// Canonical EXI defines that namespace declarations MUST be sorted // lexicographically according to the NS prefix if ( this . nsDecls . size ( ) == 0 || ! this . isCanonical ) { this . nsDecls . add ( new NamespaceDeclaration ( uri , pfx ) ) ; } else { // sort int i = this . getNumberOfNamespaceDeclarations ( ) ; // greater ? while ( i > 0 && isGreaterNS ( i - 1 , pfx ) ) { // move right i -- ; } // update position i this . nsDecls . add ( i , new NamespaceDeclaration ( uri , pfx ) ) ; }
public class HtmlForm { /** * < p > Set the value of the < code > acceptcharset < / code > property . < / p > */ public void setAcceptcharset ( java . lang . String acceptcharset ) { } }
getStateHelper ( ) . put ( PropertyKeys . acceptcharset , acceptcharset ) ;
public class AWSCloud9Client { /** * Changes the settings of an existing AWS Cloud9 development environment . * @ param updateEnvironmentRequest * @ return Result of the UpdateEnvironment operation returned by the service . * @ throws BadRequestException * The target request is invalid . * @ throws ConflictException * A conflict occurred . * @ throws NotFoundException * The target resource cannot be found . * @ throws ForbiddenException * An access permissions issue occurred . * @ throws TooManyRequestsException * Too many service requests were made over the given time period . * @ throws LimitExceededException * A service limit was exceeded . * @ throws InternalServerErrorException * An internal server error occurred . * @ sample AWSCloud9 . UpdateEnvironment * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / cloud9-2017-09-23 / UpdateEnvironment " target = " _ top " > AWS API * Documentation < / a > */ @ Override public UpdateEnvironmentResult updateEnvironment ( UpdateEnvironmentRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateEnvironment ( request ) ;
public class TransTypes { /** * Lookup the method as a member of the type . Compare the * erasures . * @ param type the class where to look for the method * @ param method the method to look for in class * @ param erasure the erasure of method */ private boolean isSameMemberWhenErased ( Type type , MethodSymbol method , Type erasure ) { } }
return types . isSameType ( erasure ( types . memberType ( type , method ) ) , erasure ) ;
public class JsonWriter { /** * Generates a mapping between attribute names and data types . * @ param name name of the map * @ param types types to write */ private void writeAttributeTypes ( String name , FieldType [ ] types ) throws IOException { } }
m_writer . writeStartObject ( name ) ; for ( FieldType field : types ) { m_writer . writeNameValuePair ( field . name ( ) . toLowerCase ( ) , field . getDataType ( ) . getValue ( ) ) ; } m_writer . writeEndObject ( ) ;
public class FtRemote { /** * Make a random socket . * @ return Socket * @ throws IOException If fails */ private static ServerSocket random ( ) throws IOException { } }
final ServerSocket skt = new ServerSocket ( 0 ) ; skt . setReuseAddress ( true ) ; return skt ;
public class FutureTask { /** * Sets the result of this future to the given value unless * this future has already been set or has been cancelled . * < p > This method is invoked internally by the { @ link # run } method * upon successful completion of the computation . * @ param v the value */ protected void set ( V v ) { } }
if ( U . compareAndSwapInt ( this , STATE , NEW , COMPLETING ) ) { outcome = v ; U . putOrderedInt ( this , STATE , NORMAL ) ; // final state finishCompletion ( ) ; }
public class AJP13InputStream { public int read ( ) throws IOException { } }
if ( _closed ) return - 1 ; if ( _packet . unconsumedData ( ) <= 0 ) { fillPacket ( ) ; if ( _packet . unconsumedData ( ) <= 0 ) { _closed = true ; return - 1 ; } } return _packet . getByte ( ) ;
public class ByteArray { /** * Add data at the current position . * @ param source * Source data . */ public void put ( byte [ ] source ) { } }
// If the buffer is small . if ( mBuffer . capacity ( ) < ( mLength + source . length ) ) { expandBuffer ( mLength + source . length + ADDITIONAL_BUFFER_SIZE ) ; } mBuffer . put ( source ) ; mLength += source . length ;
public class CmsSerialDateValue { /** * Check , if all values used for calculating the series for a specific pattern are valid . * @ return < code > null < / code > if the pattern is valid , a suitable error message otherwise . */ private String validatePattern ( ) { } }
String error = null ; switch ( getPatternType ( ) ) { case DAILY : error = isEveryWorkingDay ( ) ? null : validateInterval ( ) ; break ; case WEEKLY : error = validateInterval ( ) ; if ( null == error ) { error = validateWeekDaySet ( ) ; } break ; case MONTHLY : error = validateInterval ( ) ; if ( null == error ) { error = validateMonthSet ( ) ; if ( null == error ) { error = isWeekDaySet ( ) ? validateWeekOfMonthSet ( ) : validateDayOfMonth ( ) ; } } break ; case YEARLY : error = isWeekDaySet ( ) ? validateWeekOfMonthSet ( ) : validateDayOfMonth ( ) ; break ; case INDIVIDUAL : case NONE : default : } return error ;
public class DescribeMovingAddressesResult { /** * The status for each Elastic IP address . * @ param movingAddressStatuses * The status for each Elastic IP address . */ public void setMovingAddressStatuses ( java . util . Collection < MovingAddressStatus > movingAddressStatuses ) { } }
if ( movingAddressStatuses == null ) { this . movingAddressStatuses = null ; return ; } this . movingAddressStatuses = new com . amazonaws . internal . SdkInternalList < MovingAddressStatus > ( movingAddressStatuses ) ;
public class ImageStatistics { /** * Computes the variance of pixel intensity values inside the image . * @ param img Input image . Not modified . * @ param mean Mean pixel intensity value . * @ return Pixel variance */ public static double variance ( GrayS16 img , double mean ) { } }
if ( BoofConcurrency . USE_CONCURRENT ) { return ImplImageStatistics_MT . variance ( img , mean ) ; } else { return ImplImageStatistics . variance ( img , mean ) ; }
public class RouteUtils { /** * Given a { @ link RouteProgress } , this method will calculate the remaining waypoint names * along the given route based on route option waypoint names and the progress remaining coordinates . * If the waypoint names are empty , this method will return null . * @ param routeProgress for route waypoint names and remaining coordinates * @ return String array including the origin waypoint name and the remaining ones * @ since 0.19.0 */ @ Nullable public String [ ] calculateRemainingWaypointNames ( RouteProgress routeProgress ) { } }
RouteOptions routeOptions = routeProgress . directionsRoute ( ) . routeOptions ( ) ; if ( routeOptions == null || TextUtils . isEmpty ( routeOptions . waypointNames ( ) ) ) { return null ; } String allWaypointNames = routeOptions . waypointNames ( ) ; String [ ] names = allWaypointNames . split ( SEMICOLON ) ; int coordinatesSize = routeProgress . directionsRoute ( ) . routeOptions ( ) . coordinates ( ) . size ( ) ; String [ ] remainingWaypointNames = Arrays . copyOfRange ( names , coordinatesSize - routeProgress . remainingWaypoints ( ) , coordinatesSize ) ; String [ ] waypointNames = new String [ remainingWaypointNames . length + ORIGIN_WAYPOINT_NAME_THRESHOLD ] ; waypointNames [ ORIGIN_WAYPOINT_NAME ] = names [ ORIGIN_WAYPOINT_NAME ] ; System . arraycopy ( remainingWaypointNames , FIRST_POSITION , waypointNames , SECOND_POSITION , remainingWaypointNames . length ) ; return waypointNames ;
public class Applications { /** * Add the < em > application < / em > to the list . * @ param app * the < em > application < / em > to be added . */ public void addApplication ( Application app ) { } }
appNameApplicationMap . put ( app . getName ( ) . toUpperCase ( Locale . ROOT ) , app ) ; addInstancesToVIPMaps ( app , this . virtualHostNameAppMap , this . secureVirtualHostNameAppMap ) ; applications . add ( app ) ;
public class XmlSerializer { /** * Deserializes a XML ( default XStream representation ) representation of an * object . */ public Object deserialize ( String string ) { } }
XStream xstream = new XStream ( ) ; return xstream . fromXML ( string ) ;
public class UtilDenoiseWavelet { /** * Robust median estimator of the noise standard deviation . Typically applied to the HH < sub > 1 < / sub > subband . * & sigma ; = Median ( | Y < sub > ij < / sub > | ) / 0.6745 < br > * where & sigma ; is the estimated noise standard deviation , and Median ( | Y < sub > ij < / sub > | ) * is the median absolute value of all the pixels in the subband . * D . L . Donoho and I . M . Johnstone , " Ideal spatial adaption via wavelet shrinkage . " * Biometrika , vol 81 , pp . 425-455 , 1994 * @ param subband The subband the image is being computed from . Not modified . * @ param storage Used to temporarily store the absolute value of each element in the subband . * @ return estimated noise variance . */ public static float estimateNoiseStdDev ( GrayF32 subband , float storage [ ] ) { } }
storage = subbandAbsVal ( subband , storage ) ; int N = subband . width * subband . height ; return QuickSelect . select ( storage , N / 2 , N ) / 0.6745f ;
public class SignalRsInner { /** * Create a new SignalR service and update an exiting SignalR service . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param resourceName The name of the SignalR resource . * @ param parameters Parameters for the create or update operation * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < SignalRResourceInner > createOrUpdateAsync ( String resourceGroupName , String resourceName , SignalRCreateParameters parameters ) { } }
return createOrUpdateWithServiceResponseAsync ( resourceGroupName , resourceName , parameters ) . map ( new Func1 < ServiceResponse < SignalRResourceInner > , SignalRResourceInner > ( ) { @ Override public SignalRResourceInner call ( ServiceResponse < SignalRResourceInner > response ) { return response . body ( ) ; } } ) ;
public class ListT { /** * / * ( non - Javadoc ) * @ see cyclops2 . monads . transformers . values . ListT # cycleUntil ( java . util . function . Predicate ) */ @ Override public ListT < W , T > cycleUntil ( final Predicate < ? super T > predicate ) { } }
return ( ListT < W , T > ) FoldableTransformerSeq . super . cycleUntil ( predicate ) ;
public class AmazonWebServiceRequest { /** * Sets the optional progress listener for receiving updates about the progress of the request , * and returns a reference to this object so that method calls can be chained together . * @ param progressListener * The new progress listener . * @ return A reference to this updated object so that method calls can be chained together . */ public < T extends AmazonWebServiceRequest > T withGeneralProgressListener ( ProgressListener progressListener ) { } }
setGeneralProgressListener ( progressListener ) ; @ SuppressWarnings ( "unchecked" ) T t = ( T ) this ; return t ;
public class DeviceImpl { /** * Get device info . * Invoked when the client request the info CORBA operation . It updates the * black box and returns a DevInfo object with miscellaneous device info * @ return A DevInfo object */ public DevInfo info ( ) { } }
Util . out4 . println ( "DeviceImpl.info() arrived" ) ; final DevInfo back = new DevInfo ( ) ; // Retrieve server host final Util tg = Util . instance ( ) ; back . server_host = tg . get_host_name ( ) ; // Fill - in remaining structure fields back . dev_class = device_class . get_name ( ) ; back . server_id = tg . get_ds_real_name ( ) ; back . server_version = Tango_DevVersion ; back . doc_url = device_class . get_doc_url ( ) ; // Record operation request in black box blackbox . insert_op ( Op_Info ) ; // Return to caller Util . out4 . println ( "Leaving DeviceImpl.info()" ) ; return back ;
public class ChannelServiceImpl { /** * < pre > * 切换Channel状态 * 1 . 首先判断Channel是否为空或状态位是否正确 * 2 . 通知总裁器 , 更新节点 * 3 . 数据库数据库更新状态 * 4 . 调用远程方法 , 推送Channel到node节点 * < / pre > */ private void switchChannelStatus ( final Long channelId , final ChannelStatus channelStatus ) { } }
transactionTemplate . execute ( new TransactionCallbackWithoutResult ( ) { @ Override protected void doInTransactionWithoutResult ( TransactionStatus status ) { try { final ChannelDO channelDo = channelDao . findById ( channelId ) ; if ( null == channelDo ) { String exceptionCause = "query channelId:" + channelId + " return null." ; logger . error ( "ERROR ## " + exceptionCause ) ; throw new ManagerException ( exceptionCause ) ; } ChannelStatus oldStatus = arbitrateManageService . channelEvent ( ) . status ( channelDo . getId ( ) ) ; Channel channel = doToModel ( channelDo ) ; // 检查下ddl / home配置 List < Pipeline > pipelines = channel . getPipelines ( ) ; if ( pipelines . size ( ) > 1 ) { boolean ddlSync = true ; boolean homeSync = true ; for ( Pipeline pipeline : pipelines ) { homeSync &= pipeline . getParameters ( ) . isHome ( ) ; ddlSync &= pipeline . getParameters ( ) . getDdlSync ( ) ; } if ( ddlSync ) { throw new InvalidConfigureException ( INVALID_TYPE . DDL ) ; } if ( homeSync ) { throw new InvalidConfigureException ( INVALID_TYPE . HOME ) ; } } channel . setStatus ( oldStatus ) ; ChannelStatus newStatus = channelStatus ; if ( newStatus != null ) { if ( newStatus . equals ( oldStatus ) ) { // String exceptionCause = " switch the channel ( " + // channelId + " ) status to " + // channelStatus // + " but it had the status : " + oldStatus ; // logger . error ( " ERROR # # " + exceptionCause ) ; // throw new ManagerException ( exceptionCause ) ; // ignored return ; } else { channel . setStatus ( newStatus ) ; // 强制修改为当前变更状态 } } else { newStatus = oldStatus ; } // 针对关闭操作 , 要优先更改对应的status , 避免node工作线程继续往下跑 if ( newStatus . isStop ( ) ) { arbitrateManageService . channelEvent ( ) . stop ( channelId ) ; } else if ( newStatus . isPause ( ) ) { arbitrateManageService . channelEvent ( ) . pause ( channelId ) ; } // 通知变更内容 boolean result = configRemoteService . notifyChannel ( channel ) ; // 客户端响应成功 , 才更改对应的状态 if ( result ) { // 针对启动的话 , 需要先通知到客户端 , 客户端启动线程后 , 再更改channel状态 if ( newStatus . isStart ( ) ) { arbitrateManageService . channelEvent ( ) . start ( channelId ) ; } } } catch ( Exception e ) { logger . error ( "ERROR ## switch the channel(" + channelId + ") status has an exception." ) ; throw new ManagerException ( e ) ; } } } ) ;
public class CopyBroadcastReceiver { /** * Receive one message from the broadcast buffer . * @ param handler to be called for each message received . * @ return the number of messages that have been received . */ public int receive ( final MessageHandler handler ) { } }
int messagesReceived = 0 ; final BroadcastReceiver receiver = this . receiver ; final long lastSeenLappedCount = receiver . lappedCount ( ) ; if ( receiver . receiveNext ( ) ) { if ( lastSeenLappedCount != receiver . lappedCount ( ) ) { throw new IllegalStateException ( "unable to keep up with broadcast" ) ; } final int length = receiver . length ( ) ; final int capacity = scratchBuffer . capacity ( ) ; if ( length > capacity && ! scratchBuffer . isExpandable ( ) ) { throw new IllegalStateException ( "buffer required length of " + length + " but only has " + capacity ) ; } final int msgTypeId = receiver . typeId ( ) ; scratchBuffer . putBytes ( 0 , receiver . buffer ( ) , receiver . offset ( ) , length ) ; if ( ! receiver . validate ( ) ) { throw new IllegalStateException ( "unable to keep up with broadcast" ) ; } handler . onMessage ( msgTypeId , scratchBuffer , 0 , length ) ; messagesReceived = 1 ; } return messagesReceived ;
public class CustomErrorPages { /** * Returns the default error page for a given status code . * Guaranteed to never be null . * @ param status * @ return String representation of the default error page . */ public String getDefaultFor ( int status ) { } }
String defaultPage = defaultPages . get ( status ) ; return ( defaultPage != null ) ? defaultPage : "<html><body><h2>HTTP Status " + status + "</h2></body></html>" ;
public class StructrMethodDefinition { /** * - - - - - static methods - - - - - */ static StructrMethodDefinition deserialize ( final StructrTypeDefinition parent , final String name , final Map < String , Object > source ) { } }
final StructrMethodDefinition newMethod = new StructrMethodDefinition ( parent , name ) ; newMethod . deserialize ( source ) ; return newMethod ;
public class JsiiRuntime { /** * Reads the next response from STDOUT of the child process . * @ return The parsed JSON response . * @ throws JsiiException if we couldn ' t parse the response . */ JsonNode readNextResponse ( ) { } }
try { String responseLine = this . stdout . readLine ( ) ; if ( responseLine == null ) { String error = this . stderr . lines ( ) . collect ( Collectors . joining ( "\n\t" ) ) ; throw new JsiiException ( "Child process exited unexpectedly: " + error ) ; } return JsiiObjectMapper . INSTANCE . readTree ( responseLine ) ; } catch ( IOException e ) { throw new JsiiException ( "Unable to read reply from jsii-runtime: " + e . toString ( ) , e ) ; }
public class StatisticsJDBCStorageConnection { /** * { @ inheritDoc } */ public ItemData getItemData ( String identifier ) throws RepositoryException , IllegalStateException { } }
Statistics s = ALL_STATISTICS . get ( GET_ITEM_DATA_BY_ID_DESCR ) ; try { s . begin ( ) ; return wcs . getItemData ( identifier ) ; } finally { s . end ( ) ; }
public class CmsUploadButton { /** * Updates the CSS classes according to the button state . < p > * @ param styleDependent the dependent style name */ private void updateState ( String styleDependent ) { } }
if ( CmsStringUtil . isEmptyOrWhitespaceOnly ( styleDependent ) ) { // reseting to cmsState - up styleDependent = "up" ; } if ( ! styleDependent . equals ( m_styleDependent ) ) { m_main . removeStyleDependentName ( m_styleDependent ) ; m_main . setStyleDependentName ( styleDependent , true ) ; m_styleDependent = styleDependent ; }
public class AbstractDataStore { /** * ( non - Javadoc ) * @ see net . timewalker . ffmq4 . utils . store . LinkedDataStore # lock ( int ) */ @ Override public final void lock ( int handle ) throws DataStoreException { } }
if ( SAFE_MODE ) checkHandle ( handle ) ; if ( ! locks . flip ( handle ) ) { locks . flip ( handle ) ; // Restore state throw new DataStoreException ( "Handle already locked : " + handle ) ; }
public class RemoteInputChannel { /** * Assigns exclusive buffers to this input channel , and this method should be called only once * after this input channel is created . */ void assignExclusiveSegments ( List < MemorySegment > segments ) { } }
checkState ( this . initialCredit == 0 , "Bug in input channel setup logic: exclusive buffers have " + "already been set for this input channel." ) ; checkNotNull ( segments ) ; checkArgument ( segments . size ( ) > 0 , "The number of exclusive buffers per channel should be larger than 0." ) ; this . initialCredit = segments . size ( ) ; this . numRequiredBuffers = segments . size ( ) ; synchronized ( bufferQueue ) { for ( MemorySegment segment : segments ) { bufferQueue . addExclusiveBuffer ( new NetworkBuffer ( segment , this ) , numRequiredBuffers ) ; } }
public class ScaledIcon { /** * documentation inherited from interface Icon */ public void paintIcon ( Component c , Graphics g , int x , int y ) { } }
Graphics2D gfx = ( Graphics2D ) g ; AffineTransform otrans = gfx . getTransform ( ) ; RenderingHints ohints = gfx . getRenderingHints ( ) ; gfx . setRenderingHint ( RenderingHints . KEY_INTERPOLATION , RenderingHints . VALUE_INTERPOLATION_BILINEAR ) ; gfx . scale ( _scale , _scale ) ; _icon . paintIcon ( c , g , x , y ) ; gfx . setTransform ( otrans ) ; gfx . setRenderingHints ( ohints ) ;
public class Connection { /** * These key - value pairs define parameters for the connection : * < ul > * < li > * < code > HOST < / code > - The host URI : either the fully qualified domain name ( FQDN ) or the IPv4 address of the * database host . * < / li > * < li > * < code > PORT < / code > - The port number , between 1024 and 65535 , of the port on which the database host is listening * for database connections . * < / li > * < li > * < code > USER _ NAME < / code > - The name under which to log in to the database . The value string for * < code > USER _ NAME < / code > is " < code > USERNAME < / code > " . * < / li > * < li > * < code > PASSWORD < / code > - A password , if one is used , for the user name . * < / li > * < li > * < code > ENCRYPTED _ PASSWORD < / code > - When you enable connection password protection by setting * < code > ConnectionPasswordEncryption < / code > in the Data Catalog encryption settings , this field stores the * encrypted password . * < / li > * < li > * < code > JDBC _ DRIVER _ JAR _ URI < / code > - The Amazon S3 path of the JAR file that contains the JDBC driver to use . * < / li > * < li > * < code > JDBC _ DRIVER _ CLASS _ NAME < / code > - The class name of the JDBC driver to use . * < / li > * < li > * < code > JDBC _ ENGINE < / code > - The name of the JDBC engine to use . * < / li > * < li > * < code > JDBC _ ENGINE _ VERSION < / code > - The version of the JDBC engine to use . * < / li > * < li > * < code > CONFIG _ FILES < / code > - ( Reserved for future use ) . * < / li > * < li > * < code > INSTANCE _ ID < / code > - The instance ID to use . * < / li > * < li > * < code > JDBC _ CONNECTION _ URL < / code > - The URL for the JDBC connection . * < / li > * < li > * < code > JDBC _ ENFORCE _ SSL < / code > - A Boolean string ( true , false ) specifying whether Secure Sockets Layer ( SSL ) with * hostname matching will be enforced for the JDBC connection on the client . The default is false . * < / li > * < / ul > * @ param connectionProperties * These key - value pairs define parameters for the connection : < / p > * < ul > * < li > * < code > HOST < / code > - The host URI : either the fully qualified domain name ( FQDN ) or the IPv4 address of the * database host . * < / li > * < li > * < code > PORT < / code > - The port number , between 1024 and 65535 , of the port on which the database host is * listening for database connections . * < / li > * < li > * < code > USER _ NAME < / code > - The name under which to log in to the database . The value string for * < code > USER _ NAME < / code > is " < code > USERNAME < / code > " . * < / li > * < li > * < code > PASSWORD < / code > - A password , if one is used , for the user name . * < / li > * < li > * < code > ENCRYPTED _ PASSWORD < / code > - When you enable connection password protection by setting * < code > ConnectionPasswordEncryption < / code > in the Data Catalog encryption settings , this field stores the * encrypted password . * < / li > * < li > * < code > JDBC _ DRIVER _ JAR _ URI < / code > - The Amazon S3 path of the JAR file that contains the JDBC driver to * use . * < / li > * < li > * < code > JDBC _ DRIVER _ CLASS _ NAME < / code > - The class name of the JDBC driver to use . * < / li > * < li > * < code > JDBC _ ENGINE < / code > - The name of the JDBC engine to use . * < / li > * < li > * < code > JDBC _ ENGINE _ VERSION < / code > - The version of the JDBC engine to use . * < / li > * < li > * < code > CONFIG _ FILES < / code > - ( Reserved for future use ) . * < / li > * < li > * < code > INSTANCE _ ID < / code > - The instance ID to use . * < / li > * < li > * < code > JDBC _ CONNECTION _ URL < / code > - The URL for the JDBC connection . * < / li > * < li > * < code > JDBC _ ENFORCE _ SSL < / code > - A Boolean string ( true , false ) specifying whether Secure Sockets Layer * ( SSL ) with hostname matching will be enforced for the JDBC connection on the client . The default is false . * < / li > * @ return Returns a reference to this object so that method calls can be chained together . */ public Connection withConnectionProperties ( java . util . Map < String , String > connectionProperties ) { } }
setConnectionProperties ( connectionProperties ) ; return this ;
public class ClassHelper { /** * Check if the passed classes are convertible . Includes conversion checks * between primitive types and primitive wrapper types . * @ param aSrcClass * First class . May not be < code > null < / code > . * @ param aDstClass * Second class . May not be < code > null < / code > . * @ return < code > true < / code > if the classes are directly convertible . */ public static boolean areConvertibleClasses ( @ Nonnull final Class < ? > aSrcClass , @ Nonnull final Class < ? > aDstClass ) { } }
ValueEnforcer . notNull ( aSrcClass , "SrcClass" ) ; ValueEnforcer . notNull ( aDstClass , "DstClass" ) ; // Same class ? if ( aDstClass . equals ( aSrcClass ) ) return true ; // Default assignable if ( aDstClass . isAssignableFrom ( aSrcClass ) ) return true ; // Special handling for " int . class " = = " Integer . class " etc . if ( aDstClass == getPrimitiveWrapperClass ( aSrcClass ) ) return true ; if ( aDstClass == getPrimitiveClass ( aSrcClass ) ) return true ; // Not convertible return false ;
public class TcpIpConfig { /** * Adds a ' well known ' member . * Each HazelcastInstance will try to connect to at least one of the members , to find all other members , * and create a cluster . * A member can be a comma separated string , e . . g ' 10.11.12.1,10.11.12.2 ' which indicates multiple members * are going to be added . * @ param member the member to add * @ return the updated configuration * @ throws IllegalArgumentException if member is { @ code null } or empty * @ see # getMembers ( ) */ public TcpIpConfig addMember ( String member ) { } }
String memberText = checkHasText ( member , "member must contain text" ) ; StringTokenizer tokenizer = new StringTokenizer ( memberText , "," ) ; while ( tokenizer . hasMoreTokens ( ) ) { String s = tokenizer . nextToken ( ) ; this . members . add ( s . trim ( ) ) ; } return this ;
public class AsteriskVersion { /** * Determine the Asterisk version from the string returned by Asterisk . The * string should contain " Asterisk " followed by a version number . * @ param coreLine * @ return the detected version , or null if unknown */ public static AsteriskVersion getDetermineVersionFromString ( String coreLine ) { } }
for ( AsteriskVersion version : knownVersions ) { for ( Pattern pattern : version . patterns ) { if ( pattern . matcher ( coreLine ) . matches ( ) ) { return version ; } } } return null ;
public class AbstractFramedChannel { /** * Called when a source sub channel fails to fulfil its contract , and leaves the channel in an inconsistent state . * The underlying read side will be forcibly closed . * @ param cause The possibly null cause */ @ SuppressWarnings ( { } }
"unchecked" , "rawtypes" } ) protected void markReadsBroken ( Throwable cause ) { if ( readsBrokenUpdater . compareAndSet ( this , 0 , 1 ) ) { if ( UndertowLogger . REQUEST_IO_LOGGER . isDebugEnabled ( ) ) { UndertowLogger . REQUEST_IO_LOGGER . debugf ( new ClosedChannelException ( ) , "Marking reads broken on channel %s" , this ) ; } if ( receiver != null ) { receiver . markStreamBroken ( ) ; } for ( AbstractFramedStreamSourceChannel < C , R , S > r : new ArrayList < > ( getReceivers ( ) ) ) { r . markStreamBroken ( ) ; } handleBrokenSourceChannel ( cause ) ; safeClose ( channel . getSourceChannel ( ) ) ; closeSubChannels ( ) ; }
public class MediaType { /** * Return a replica of this instance with the quality value of the given MediaType . * @ return the same instance if the given MediaType doesn ' t have a quality value , or a new one otherwise */ public MediaType copyQualityValue ( MediaType mediaType ) { } }
if ( ! mediaType . parameters . containsKey ( PARAM_QUALITY_FACTOR ) ) { return this ; } Map < String , String > params = new LinkedHashMap < String , String > ( this . parameters ) ; params . put ( PARAM_QUALITY_FACTOR , mediaType . parameters . get ( PARAM_QUALITY_FACTOR ) ) ; return new MediaType ( this , params ) ;
public class AnimaCache { /** * Get the column mapping based on the model Class type * Generated and stored in the Map when no column mapping exists * @ param modelType model class type * @ return model column mapping */ public static Map < String , String > computeModelColumnMappings ( Class < ? > modelType ) { } }
return MODEL_COLUMN_MAPPINGS . computeIfAbsent ( modelType , model -> { List < Field > fields = computeModelFields ( model ) ; return fields . stream ( ) . collect ( toMap ( AnimaCache :: getColumnName , Field :: getName ) ) ; } ) ;
public class AmazonIdentityManagementClient { /** * Deletes the permissions boundary for the specified IAM role . * < important > * Deleting the permissions boundary for a role might increase its permissions . For example , it might allow anyone * who assumes the role to perform all the actions granted in its permissions policies . * < / important > * @ param deleteRolePermissionsBoundaryRequest * @ return Result of the DeleteRolePermissionsBoundary operation returned by the service . * @ throws NoSuchEntityException * The request was rejected because it referenced a resource entity that does not exist . The error message * describes the resource . * @ throws UnmodifiableEntityException * The request was rejected because only the service that depends on the service - linked role can modify or * delete the role on your behalf . The error message includes the name of the service that depends on this * service - linked role . You must request the change through that service . * @ throws ServiceFailureException * The request processing has failed because of an unknown error , exception or failure . * @ sample AmazonIdentityManagement . DeleteRolePermissionsBoundary * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / iam - 2010-05-08 / DeleteRolePermissionsBoundary " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteRolePermissionsBoundaryResult deleteRolePermissionsBoundary ( DeleteRolePermissionsBoundaryRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteRolePermissionsBoundary ( request ) ;
public class HmacSignatureBuilder { /** * 仅包含请求头的数据运算 */ private void updateOnlyHeaderBuildDigest ( Mac digest ) throws IllegalStateException , UnsupportedEncodingException { } }
digest . update ( Validator . notNull ( apiKey , "apiKey not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( contentType , "contentType not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( date , "date not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( host , "host not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( method , "method not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( nonce , "nonce not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( resource , "resource not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ; digest . update ( Validator . notNull ( scheme , "scheme not null" ) . getBytes ( charset ) ) ; digest . update ( delimiter ) ;
public class FSDatasetAsyncDiskService { /** * Execute the task sometime in the future , using ThreadPools . */ synchronized void execute ( File root , Runnable task ) { } }
if ( executors == null ) { throw new RuntimeException ( "AsyncDiskService is already shutdown" ) ; } ThreadPoolExecutor executor = executors . get ( root ) ; if ( executor == null ) { throw new RuntimeException ( "Cannot find root " + root + " for execution of task " + task ) ; } else { executor . execute ( task ) ; }