signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AsyncFile { /** * Reads all bytes from this channel into the given buffer . * @ param path the path of the file to read */ public static Promise < ByteBuf > readFile ( Executor executor , Path path ) { } }
return openAsync ( executor , path , set ( READ ) ) . then ( file -> file . read ( ) . then ( buf -> file . close ( ) . whenException ( $ -> buf . recycle ( ) ) . map ( $ -> buf ) ) ) ;
public class BigDecimalParser { /** * returns the instance . * @ return CurrencyDoubleRenderer */ public static final Parser < BigDecimal > instance ( ) { } }
// NOPMD it ' s thread save ! if ( BigDecimalParser . instanceParser == null ) { synchronized ( BigDecimalParser . class ) { if ( BigDecimalParser . instanceParser == null ) { BigDecimalParser . instanceParser = new BigDecimalParser ( ) ; } } } return BigDecimalParser . instanceParser ;
public class S3CryptoModuleBase { /** * Takes the position of the rightmost desired byte of a user specified * range and returns the position of the end of the following cipher block ; * or { @ value Long # MAX _ VALUE } if the resultant position has a value that * exceeds { @ value Long # MAX _ VALUE } . */ private static long getCipherBlockUpperBound ( final long rightmostBytePosition ) { } }
long cipherBlockSize = JceEncryptionConstants . SYMMETRIC_CIPHER_BLOCK_SIZE ; long offset = cipherBlockSize - ( rightmostBytePosition % cipherBlockSize ) ; long upperBound = rightmostBytePosition + offset + cipherBlockSize ; return upperBound < 0 ? Long . MAX_VALUE : upperBound ;
public class AmazonKinesisAsyncClient { /** * Simplified method form for invoking the ListStreams operation . * @ see # listStreamsAsync ( ListStreamsRequest ) */ @ Override public java . util . concurrent . Future < ListStreamsResult > listStreamsAsync ( Integer limit , String exclusiveStartStreamName ) { } }
return listStreamsAsync ( new ListStreamsRequest ( ) . withLimit ( limit ) . withExclusiveStartStreamName ( exclusiveStartStreamName ) ) ;
public class ShuffleSecretManager { /** * Register an application with its secret . * Executors need to first authenticate themselves with the same secret before * fetching shuffle files written by other executors in this application . */ public void registerApp ( String appId , String shuffleSecret ) { } }
// Always put the new secret information to make sure it ' s the most up to date . // Otherwise we have to specifically look at the application attempt in addition // to the applicationId since the secrets change between application attempts on yarn . shuffleSecretMap . put ( appId , shuffleSecret ) ; logger . info ( "Registered shuffle secret for application {}" , appId ) ;
public class SarlEnumerationBuilderImpl { /** * Initialize the Ecore element when inside a script . */ public void eInit ( SarlScript script , String name , IJvmTypeProvider context ) { } }
setTypeResolutionContext ( context ) ; if ( this . sarlEnumeration == null ) { this . container = script ; this . sarlEnumeration = SarlFactory . eINSTANCE . createSarlEnumeration ( ) ; script . getXtendTypes ( ) . add ( this . sarlEnumeration ) ; this . sarlEnumeration . setAnnotationInfo ( XtendFactory . eINSTANCE . createXtendTypeDeclaration ( ) ) ; if ( ! Strings . isEmpty ( name ) ) { this . sarlEnumeration . setName ( name ) ; } }
public class AmazonMacieClient { /** * Removes the specified member account from Amazon Macie . * @ param disassociateMemberAccountRequest * @ return Result of the DisassociateMemberAccount operation returned by the service . * @ throws InvalidInputException * The request was rejected because an invalid or out - of - range value was supplied for an input parameter . * @ throws InternalException * Internal server error . * @ sample AmazonMacie . DisassociateMemberAccount * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / macie - 2017-12-19 / DisassociateMemberAccount " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DisassociateMemberAccountResult disassociateMemberAccount ( DisassociateMemberAccountRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDisassociateMemberAccount ( request ) ;
public class FirstElement { /** * Consumes the first element from the passed iterator . * @ param consumable the iterator to be consumed * @ throws IllegalArgumentException if the passed iterator is empty * @ return the consumed value */ @ Override public E apply ( Iterator < E > consumable ) { } }
dbc . precondition ( consumable != null , "consuming a null iterator" ) ; dbc . precondition ( consumable . hasNext ( ) , "no element to consume" ) ; return consumable . next ( ) ;
public class Token { /** * Determines if this token is a special identifier with the given trigger . * If a list of < tt > contents < / tt > is given , this method checks that the content matches one of them . * @ param trigger the trigger of the special id * @ param contents the content to check for . If the list es empty , only the token type and the trigger is checked . * @ return < tt > true < / tt > if this token is a special identifier with the given trigger . * If < tt > contents < / tt > is not empty , the content must also match one of the elements . */ public boolean isSpecialIdentifierWithContent ( String trigger , String ... contents ) { } }
if ( ! matches ( TokenType . SPECIAL_ID , trigger ) ) { return false ; } if ( contents . length == 0 ) { return true ; } for ( String content : contents ) { if ( content != null && content . equals ( getContents ( ) ) ) { return true ; } } return false ;
public class ApiOvhCloud { /** * Activate private network in a new region * REST : POST / cloud / project / { serviceName } / network / private / { networkId } / region * @ param networkId [ required ] Network id * @ param region [ required ] Region to active on your network * @ param serviceName [ required ] Service name */ public OvhNetwork project_serviceName_network_private_networkId_region_POST ( String serviceName , String networkId , String region ) throws IOException { } }
String qPath = "/cloud/project/{serviceName}/network/private/{networkId}/region" ; StringBuilder sb = path ( qPath , serviceName , networkId ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "region" , region ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhNetwork . class ) ;
public class DynamoDBService { /** * Perform a scan request and retry if ProvisionedThroughputExceededException occurs . */ private ScanResult scan ( ScanRequest scanRequest ) { } }
m_logger . debug ( "Performing scan() request on table {}" , scanRequest . getTableName ( ) ) ; Timer timer = new Timer ( ) ; boolean bSuccess = false ; ScanResult scanResult = null ; for ( int attempts = 1 ; ! bSuccess ; attempts ++ ) { try { scanResult = m_ddbClient . scan ( scanRequest ) ; if ( attempts > 1 ) { m_logger . info ( "scan() succeeded on attempt #{}" , attempts ) ; } bSuccess = true ; m_logger . debug ( "Time to scan table {}: {}" , scanRequest . getTableName ( ) , timer . toString ( ) ) ; } catch ( ProvisionedThroughputExceededException e ) { if ( attempts >= m_max_read_attempts ) { String errMsg = "All retries exceeded; abandoning scan() for table: " + scanRequest . getTableName ( ) ; m_logger . error ( errMsg , e ) ; throw new RuntimeException ( errMsg , e ) ; } m_logger . warn ( "scan() attempt #{} failed: {}" , attempts , e ) ; try { Thread . sleep ( attempts * m_retry_wait_millis ) ; } catch ( InterruptedException ex2 ) { // ignore } } } return scanResult ;
public class ValueOfTag { /** * This method is used to determine whether the parameter whose name is * stored in { @ code mParameterName } exists within the { @ code * PageContext . REQUEST _ SCOPE } scope . If the parameter does exist , * then this method will return { @ code true } , otherwise it returns * { @ code false } . This method has the side affect of loading the * parameter value into { @ code mParameterValue } if the parameter * does exist . * @ return { @ code true } if the parameter whose name is in { @ code * mParameterName } exists in the { @ code PageContext . REQUEST _ SCOPE * } scope , { @ code false } otherwise . */ private boolean parameterExists ( ) { } }
parameterValue = pageContext . getAttribute ( parameterName , PageContext . REQUEST_SCOPE ) ; // - - Harald K 20020726 if ( parameterValue == null ) { parameterValue = pageContext . getRequest ( ) . getParameter ( parameterName ) ; } return ( parameterValue != null ) ;
public class Link { /** * Parses the links attributes from the given source { @ link String } . * @ param source * @ return */ private static Map < String , String > getAttributeMap ( String source ) { } }
if ( ! StringUtils . hasText ( source ) ) { return Collections . emptyMap ( ) ; } Map < String , String > attributes = new HashMap < > ( ) ; Matcher matcher = KEY_AND_VALUE_PATTERN . matcher ( source ) ; while ( matcher . find ( ) ) { attributes . put ( matcher . group ( 1 ) , matcher . group ( 2 ) ) ; } return attributes ;
public class Entities { /** * Gets a random entity from the container ( if there is one ) . * @ param container * Source container . * @ return Random entity ( if found ) . */ public static Optional < Entity > randomEntity ( final EntityContainer container ) { } }
return randomEntity0 ( container . streamEntities ( ) , container . getEntityCount ( ) ) ;
public class ApplicationUpdateMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ApplicationUpdate applicationUpdate , ProtocolMarshaller protocolMarshaller ) { } }
if ( applicationUpdate == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( applicationUpdate . getInputUpdates ( ) , INPUTUPDATES_BINDING ) ; protocolMarshaller . marshall ( applicationUpdate . getApplicationCodeUpdate ( ) , APPLICATIONCODEUPDATE_BINDING ) ; protocolMarshaller . marshall ( applicationUpdate . getOutputUpdates ( ) , OUTPUTUPDATES_BINDING ) ; protocolMarshaller . marshall ( applicationUpdate . getReferenceDataSourceUpdates ( ) , REFERENCEDATASOURCEUPDATES_BINDING ) ; protocolMarshaller . marshall ( applicationUpdate . getCloudWatchLoggingOptionUpdates ( ) , CLOUDWATCHLOGGINGOPTIONUPDATES_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class MetadataSet { /** * Search all the given expressions and return the first one that matches the * given property and value ( if present ) . * If < code > value < / code > is { @ link Optional # absent ( ) } * , only the property is used in the lookup . * @ param metas * A set of metadata expressions to search * @ param property * The property of the searched expression * @ param value * The value of the searched expression , can be absent if the value * is not relevant in the search * @ return an { @ link Optional } containing an expression in the * < code > metas < / code > set matching the given property and value , or * { @ link Optional # absent ( ) } if none is found . */ public static Optional < Metadata > tryFind ( Set < Metadata > metas , final Property property , final Optional < String > value ) { } }
Preconditions . checkNotNull ( metas ) ; Preconditions . checkNotNull ( property ) ; Preconditions . checkNotNull ( value ) ; return Iterables . tryFind ( metas , new Predicate < Metadata > ( ) { @ Override public boolean apply ( Metadata meta ) { return property . equals ( meta . getProperty ( ) ) && ( ! value . isPresent ( ) || value . get ( ) . equals ( meta . getValue ( ) ) ) ; } } ) ;
public class UserController { /** * GET 获取 * @ param * @ return */ @ NoAuth @ RequestMapping ( value = "/session" , method = RequestMethod . GET ) @ ResponseBody public JsonObjectBase get ( ) { } }
VisitorVo visitorVo = userMgr . getCurVisitor ( ) ; if ( visitorVo != null ) { return buildSuccess ( "visitor" , visitorVo ) ; } else { // 没有登录啊 return buildGlobalError ( "syserror.inner" , ErrorCode . GLOBAL_ERROR ) ; }
public class CustomerArtifactPaths { /** * Comma - separated list of paths in the test execution environment where the artifacts generated by the customer ' s * tests will be pulled from . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDeviceHostPaths ( java . util . Collection ) } or { @ link # withDeviceHostPaths ( java . util . Collection ) } if you * want to override the existing values . * @ param deviceHostPaths * Comma - separated list of paths in the test execution environment where the artifacts generated by the * customer ' s tests will be pulled from . * @ return Returns a reference to this object so that method calls can be chained together . */ public CustomerArtifactPaths withDeviceHostPaths ( String ... deviceHostPaths ) { } }
if ( this . deviceHostPaths == null ) { setDeviceHostPaths ( new java . util . ArrayList < String > ( deviceHostPaths . length ) ) ; } for ( String ele : deviceHostPaths ) { this . deviceHostPaths . add ( ele ) ; } return this ;
public class Strings { /** * Gets the string representation of the submitted object and never returns { @ code null } . * @ param o the object to get the string representation of . * @ return the string representation of the submitted object ( never { @ code null } ) . */ @ SuppressWarnings ( "null" ) public static String valueOf ( @ Nullable Object o ) { } }
return String . valueOf ( String . valueOf ( o ) ) ;
public class NetworkWatchersInner { /** * Configures flow log on a specified resource . * @ param resourceGroupName The name of the network watcher resource group . * @ param networkWatcherName The name of the network watcher resource . * @ param parameters Parameters that define the configuration of flow log . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < FlowLogInformationInner > setFlowLogConfigurationAsync ( String resourceGroupName , String networkWatcherName , FlowLogInformationInner parameters , final ServiceCallback < FlowLogInformationInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( setFlowLogConfigurationWithServiceResponseAsync ( resourceGroupName , networkWatcherName , parameters ) , serviceCallback ) ;
public class JspRuntimeLibrary { /** * Decode an URL formatted string . * @ param s The string to decode . * @ return The decoded string . */ public static String decode ( String encoded ) { } }
// speedily leave if we ' re not needed if ( encoded == null ) return null ; if ( encoded . indexOf ( '%' ) == - 1 && encoded . indexOf ( '+' ) == - 1 ) return encoded ; // allocate the buffer - use byte [ ] to avoid calls to new . byte holdbuffer [ ] = new byte [ encoded . length ( ) ] ; char holdchar ; int bufcount = 0 ; for ( int count = 0 ; count < encoded . length ( ) ; count ++ ) { char cur = encoded . charAt ( count ) ; if ( cur == '%' ) { holdbuffer [ bufcount ++ ] = ( byte ) Integer . parseInt ( encoded . substring ( count + 1 , count + 3 ) , 16 ) ; if ( count + 2 >= encoded . length ( ) ) count = encoded . length ( ) ; else count += 2 ; } else if ( cur == '+' ) { holdbuffer [ bufcount ++ ] = ( byte ) ' ' ; } else { holdbuffer [ bufcount ++ ] = ( byte ) cur ; } } // REVISIT - - remedy for Deprecated warning . // return new String ( holdbuffer , 0,0 , bufcount ) ; return new String ( holdbuffer , 0 , bufcount ) ;
public class RESTParameter { /** * Add a new child parameter with the given name and type to this parameter . The child * parameter will not be marked as required . This parameter is returned to support * builder syntax . * @ param childParamName Name of new child parameter . * @ param childParamType Type of new child parameter . * @ return This parameter object . */ public RESTParameter add ( String childParamName , String childParamType ) { } }
return add ( new RESTParameter ( childParamName , childParamType ) ) ;
public class JSSEProviderFactory { /** * Insert a provider into Security at the provided slot number . * @ param newProvider * @ param slot */ public static void insertProviderAt ( Provider newProvider , int slot ) { } }
Provider [ ] provider_list = Security . getProviders ( ) ; if ( null == provider_list || 0 == provider_list . length ) { return ; } // add the new provider to the new list at the correct slot # . Provider [ ] newList = new Provider [ provider_list . length + 2 ] ; newList [ slot ] = newProvider ; int newListIndex = 1 ; // add the rest of the providers for ( int i = 0 ; i < provider_list . length ; i ++ ) { Provider currentProvider = provider_list [ i ] ; if ( currentProvider != null && ! currentProvider . getName ( ) . equals ( newProvider . getName ( ) ) ) { // keep incrementing until we find the first available slot . while ( newList [ newListIndex ] != null ) { newListIndex ++ ; } newList [ newListIndex ] = currentProvider ; newListIndex ++ ; } } removeAllProviders ( ) ; // add the rest of the providers to the list . for ( int i = 0 ; i < newList . length ; i ++ ) { Provider currentProvider = newList [ i ] ; if ( currentProvider != null ) { int position = Security . insertProviderAt ( currentProvider , ( i + 1 ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , currentProvider . getName ( ) + " provider added at position " + position ) ; } }
public class OAuth2SessionManager { /** * Refreshes access token after expiration ( before sending request ) thanks to the refresh token . The bew access * token is then stored in this manager . * Method is synchronized so that no two threads will attempt to refresh at the same time . If a locked thread sees * that token has already been refreshed , no refresh is attempted either . * Not all providers support tokens refresh ( ex : CloudMe ) . */ public void refreshToken ( ) throws CStorageException { } }
if ( refreshTokenUrl == null ) { throw new CStorageException ( "Provider does not support token refresh" ) ; } OAuth2Credentials currentCredentials = userCredentials . getCredentials ( ) ; synchronized ( refreshLock ) { OAuth2Credentials afterLockCredentials = userCredentials . getCredentials ( ) ; if ( afterLockCredentials . getRefreshToken ( ) == null ) { throw new CStorageException ( "No refresh token available" ) ; } // TODO : better check again hasExpired ( ) ? if ( ! afterLockCredentials . equals ( currentCredentials ) ) { LOGGER . debug ( "Not refreshed token in this thread, already done" ) ; return ; } LOGGER . debug ( "Refreshing token" ) ; HttpResponse response ; try { HttpPost post = new HttpPost ( accessTokenUrl ) ; List < NameValuePair > parameters = new ArrayList < NameValuePair > ( ) ; parameters . add ( new BasicNameValuePair ( OAuth2 . CLIENT_ID , appInfo . getAppId ( ) ) ) ; parameters . add ( new BasicNameValuePair ( OAuth2 . CLIENT_SECRET , appInfo . getAppSecret ( ) ) ) ; parameters . add ( new BasicNameValuePair ( OAuth2 . REFRESH_TOKEN , afterLockCredentials . getRefreshToken ( ) ) ) ; parameters . add ( new BasicNameValuePair ( OAuth2 . SCOPE , getScopeForAuthorization ( ) ) ) ; parameters . add ( new BasicNameValuePair ( OAuth2 . GRANT_TYPE , OAuth2 . REFRESH_TOKEN ) ) ; post . setEntity ( new UrlEncodedFormEntity ( parameters , PcsUtils . UTF8 . name ( ) ) ) ; response = httpClient . execute ( post ) ; } catch ( IOException ex ) { throw new CStorageException ( "HTTP request while refreshing token has failed" , ex ) ; } // FIXME check status code here try { String data = EntityUtils . toString ( response . getEntity ( ) , PcsUtils . UTF8 . name ( ) ) ; // Update credentials JSONObject json = new JSONObject ( data ) ; afterLockCredentials . update ( json ) ; } catch ( IOException ex ) { throw new CStorageException ( "Can't get string from HttpResponse: " + response . toString ( ) , ex ) ; } catch ( JSONException ex ) { throw new CStorageException ( "Error parsing the JSON response" , ex ) ; } try { userCredentialsRepo . save ( userCredentials ) ; } catch ( IOException ex ) { throw new CStorageException ( "Can't save credentials" , ex ) ; } }
public class GzipInputHandler { /** * Parse the gzip header information , if it exists , from the input buffer . * This handles running out of data at any point in the gzip header sequence * and picking up with future buffers . * @ param data * @ throws DataFormatException * if the header data is corrupt */ private int parseHeader ( byte [ ] data ) throws DataFormatException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing gzip header; state=" + this . state ) ; } int pos = 0 ; for ( ; pos < data . length && PARSE_STATE . DONE != this . state ; ) { byte b = data [ pos ++ ] ; if ( PARSE_STATE . ID1 == this . state ) { if ( ( byte ) 0x1F != b ) { throw new DataFormatException ( "Invalid gzip header, first byte=" + b ) ; } this . state = PARSE_STATE . ID2 ; } else if ( PARSE_STATE . ID2 == this . state ) { if ( ( byte ) 0x8B != b ) { throw new DataFormatException ( "Invalid gzip header, second byte=" + b ) ; } this . state = PARSE_STATE . COMPRESSION ; } else if ( PARSE_STATE . COMPRESSION == this . state ) { if ( Deflater . DEFLATED != b ) { throw new DataFormatException ( "Invalid gzip compression method=" + b ) ; } this . state = PARSE_STATE . FLAG ; } else if ( PARSE_STATE . FLAG == this . state ) { if ( - 1 == this . gzipFlag ) { // after saving the extra flag byte , skip the next 6 bytes if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "gzip header flag=" + b ) ; } this . gzipFlag = b ; pos = skip ( data , pos , 6 ) ; } else { // previously saw the flag but didn ' t skip a full 6 pos = skip ( data , pos , this . parseInt ) ; } if ( 0 == this . parseInt ) { if ( isFExtraSet ( ) ) { this . state = PARSE_STATE . FEXTRA ; } else if ( isFNameSet ( ) ) { this . state = PARSE_STATE . FNAME ; } else if ( isFCommentSet ( ) ) { this . state = PARSE_STATE . FCOMMENT ; } else if ( isFHCRCSet ( ) ) { this . state = PARSE_STATE . FHCRC ; } else { this . state = PARSE_STATE . DONE ; } this . parseOffset = 0 ; } } else if ( PARSE_STATE . FEXTRA == this . state ) { // FEXTRA has length in 2 bytes , then that many bytes if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing FEXTRA data, offset=" + this . parseOffset ) ; } if ( 0 == this . parseOffset ) { this . parseInt = b ; this . parseOffset ++ ; continue ; } else if ( 1 == this . parseOffset ) { this . parseInt = ( b << 8 ) | this . parseInt ; this . parseOffset ++ ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "FEXTRA length is " + this . parseInt ) ; } pos = skip ( data , pos , this . parseInt ) ; if ( 0 == this . parseInt ) { if ( isFNameSet ( ) ) { this . state = PARSE_STATE . FNAME ; } else if ( isFCommentSet ( ) ) { this . state = PARSE_STATE . FCOMMENT ; } else if ( isFHCRCSet ( ) ) { this . state = PARSE_STATE . FHCRC ; } else { this . state = PARSE_STATE . DONE ; } this . parseOffset = 0 ; } } else if ( PARSE_STATE . FNAME == this . state ) { // FNAME is a zero delimited file name if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing FNAME data" ) ; } if ( 0 != b ) { pos = skipPast ( data , pos , ( byte ) 0 ) ; } if ( data . length == pos ) { return pos ; } if ( isFCommentSet ( ) ) { this . state = PARSE_STATE . FCOMMENT ; } else if ( isFHCRCSet ( ) ) { this . state = PARSE_STATE . FHCRC ; } else { this . state = PARSE_STATE . DONE ; } } else if ( PARSE_STATE . FCOMMENT == this . state ) { // FCOMMENT is a zero delimited file comment if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing FCOMMENT data" ) ; } if ( 0 != b ) { pos = skipPast ( data , pos , ( byte ) 0 ) ; } if ( data . length == pos ) { return pos ; } if ( isFHCRCSet ( ) ) { this . state = PARSE_STATE . FHCRC ; } else { this . state = PARSE_STATE . DONE ; } } else if ( PARSE_STATE . FHCRC == this . state ) { // FHCRC has 2 extra bytes ( checksum of all gzip header bytes ) if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing FHCRC data; offset=" + this . parseOffset ) ; } this . parseOffset ++ ; if ( 2 > this . parseOffset ) { continue ; } this . state = PARSE_STATE . DONE ; } } // end loop if ( PARSE_STATE . DONE == this . state ) { this . parseOffset = 0 ; this . parseInt = 0 ; } return pos ;
public class Sql { /** * Performs the given SQL query and return a " page " of rows from the result set . A page is defined as starting at * a 1 - based offset , and containing a maximum number of rows . * In addition , the < code > metaClosure < / code > will be called once passing in the * < code > ResultSetMetaData < / code > as argument . * The query may contain placeholder question marks which match the given list of parameters . * Note that the underlying implementation is based on either invoking < code > ResultSet . absolute ( ) < / code > , * or if the ResultSet type is < code > ResultSet . TYPE _ FORWARD _ ONLY < / code > , the < code > ResultSet . next ( ) < / code > method * is invoked equivalently . The first row of a ResultSet is 1 , so passing in an offset of 1 or less has no effect * on the initial positioning within the result set . * Note that different database and JDBC driver implementations may work differently with respect to this method . * Specifically , one should expect that < code > ResultSet . TYPE _ FORWARD _ ONLY < / code > may be less efficient than a * " scrollable " type . * This method supports named and named ordinal parameters by supplying such * parameters in the < code > params < / code > list . See the class Javadoc for more details . * Resource handling is performed automatically where appropriate . * @ param sql the SQL statement * @ param params a list of parameters * @ param offset the 1 - based offset for the first row to be processed * @ param maxRows the maximum number of rows to be processed * @ param metaClosure called for meta data ( only once after sql execution ) * @ return a list of GroovyRowResult objects * @ throws SQLException if a database access error occurs */ public List < GroovyRowResult > rows ( String sql , List < Object > params , int offset , int maxRows , @ ClosureParams ( value = SimpleType . class , options = "java.sql.ResultSetMetaData" ) Closure metaClosure ) throws SQLException { } }
AbstractQueryCommand command = createPreparedQueryCommand ( sql , params ) ; // for efficiency set maxRows ( adjusted for the first offset rows we are going to skip the cursor over ) command . setMaxRows ( offset + maxRows ) ; try { return asList ( sql , command . execute ( ) , offset , maxRows , metaClosure ) ; } finally { command . closeResources ( ) ; }
public class PlannerReader { /** * This method extracts calendar data from a Planner file . * @ param project Root node of the Planner file */ private void readCalendars ( Project project ) throws MPXJException { } }
Calendars calendars = project . getCalendars ( ) ; if ( calendars != null ) { for ( net . sf . mpxj . planner . schema . Calendar cal : calendars . getCalendar ( ) ) { readCalendar ( cal , null ) ; } Integer defaultCalendarID = getInteger ( project . getCalendar ( ) ) ; m_defaultCalendar = m_projectFile . getCalendarByUniqueID ( defaultCalendarID ) ; if ( m_defaultCalendar != null ) { m_projectFile . getProjectProperties ( ) . setDefaultCalendarName ( m_defaultCalendar . getName ( ) ) ; } }
public class CrawlController { /** * Adds a new seed URL . A seed URL is a URL that is fetched by the crawler * to extract new URLs in it and follow them for crawling . You can also * specify a specific document id to be assigned to this seed URL . This * document id needs to be unique . Also , note that if you add three seeds * with document ids 1,2 , and 7 . Then the next URL that is found during the * crawl will get a doc id of 8 . Also you need to ensure to add seeds in * increasing order of document ids . * Specifying doc ids is mainly useful when you have had a previous crawl * and have stored the results and want to start a new crawl with seeds * which get the same document ids as the previous crawl . * @ param pageUrl * the URL of the seed * @ param docId * the document id that you want to be assigned to this seed URL . * @ throws InterruptedException * @ throws IOException */ public void addSeed ( String pageUrl , int docId ) throws IOException , InterruptedException { } }
String canonicalUrl = URLCanonicalizer . getCanonicalURL ( pageUrl ) ; if ( canonicalUrl == null ) { logger . error ( "Invalid seed URL: {}" , pageUrl ) ; } else { if ( docId < 0 ) { docId = docIdServer . getDocId ( canonicalUrl ) ; if ( docId > 0 ) { logger . trace ( "This URL is already seen." ) ; return ; } docId = docIdServer . getNewDocID ( canonicalUrl ) ; } else { try { docIdServer . addUrlAndDocId ( canonicalUrl , docId ) ; } catch ( RuntimeException e ) { if ( config . isHaltOnError ( ) ) { throw e ; } else { logger . error ( "Could not add seed: {}" , e . getMessage ( ) ) ; } } } WebURL webUrl = new WebURL ( ) ; webUrl . setTldList ( tldList ) ; webUrl . setURL ( canonicalUrl ) ; webUrl . setDocid ( docId ) ; webUrl . setDepth ( ( short ) 0 ) ; if ( robotstxtServer . allows ( webUrl ) ) { frontier . schedule ( webUrl ) ; } else { // using the WARN level here , as the user specifically asked to add this seed logger . warn ( "Robots.txt does not allow this seed: {}" , pageUrl ) ; } }
public class CRFClassifier { /** * Convert an ObjectBank to arrays of data features and labels . * @ return A Pair , where the first element is an int [ ] [ ] [ ] [ ] representing the * data and the second element is an int [ ] [ ] representing the labels . */ public Pair < int [ ] [ ] [ ] [ ] , int [ ] [ ] > documentsToDataAndLabels ( Collection < List < IN > > documents ) { } }
// first index is the number of the document // second index is position in the document also the index of the // clique / factor table // third index is the number of elements in the clique / window these features // are for ( starting with last element ) // fourth index is position of the feature in the array that holds them // element in data [ i ] [ j ] [ k ] [ m ] is the index of the mth feature occurring in // position k of the jth clique of the ith document // int [ ] [ ] [ ] [ ] data = new int [ documentsSize ] [ ] [ ] [ ] ; List < int [ ] [ ] [ ] > data = new ArrayList < int [ ] [ ] [ ] > ( ) ; // first index is the number of the document // second index is the position in the document // element in labels [ i ] [ j ] is the index of the correct label ( if it exists ) // at position j in document i // int [ ] [ ] labels = new int [ documentsSize ] [ ] ; List < int [ ] > labels = new ArrayList < int [ ] > ( ) ; int numDatums = 0 ; for ( List < IN > doc : documents ) { Pair < int [ ] [ ] [ ] , int [ ] > docPair = documentToDataAndLabels ( doc ) ; data . add ( docPair . first ( ) ) ; labels . add ( docPair . second ( ) ) ; numDatums += doc . size ( ) ; } System . err . println ( "numClasses: " + classIndex . size ( ) + ' ' + classIndex ) ; System . err . println ( "numDocuments: " + data . size ( ) ) ; System . err . println ( "numDatums: " + numDatums ) ; System . err . println ( "numFeatures: " + featureIndex . size ( ) ) ; printFeatures ( ) ; int [ ] [ ] [ ] [ ] dataA = new int [ 0 ] [ ] [ ] [ ] ; int [ ] [ ] labelsA = new int [ 0 ] [ ] ; return new Pair < int [ ] [ ] [ ] [ ] , int [ ] [ ] > ( data . toArray ( dataA ) , labels . toArray ( labelsA ) ) ;
public class ORBManager { /** * Check if the checkORBgiopMaxMsgSize has been set . This environment * variable should be set in Mega bytes . */ private static String checkORBgiopMaxMsgSize ( ) { } }
/* * JacORB definition ( see jacorb . properties file ) : * This is NOT the maximum buffer size that can be used , but just the * largest size of buffers that will be kept and managed . This value * will be added to an internal constant of 5 , so the real value in * bytes is 2 * * ( 5 + maxManagedBufSize - 1 ) . You only need to increase this * value if you are dealing with LOTS of LARGE data structures . You may * decrease it to make the buffer manager release large buffers * immediately rather than keeping them for later reuse . */ String str = "20" ; // Set to 16 Mbytes // Check if environment ask for bigger size . final String tmp = System . getProperty ( "ORBgiopMaxMsgSize" ) ; if ( tmp != null && checkBufferSize ( tmp ) != null ) { str = tmp ; } return str ;
public class QueryImpl { /** * sorts a query by a column * @ param strColumn column to sort * @ param order sort type ( Query . ORDER _ ASC or Query . ORDER _ DESC ) * @ throws PageException */ @ Override public synchronized void sort ( String strColumn , int order ) throws PageException { } }
// disconnectCache ( ) ; sort ( getColumn ( strColumn ) , order ) ;
public class AbstractHibernateDao { /** * Setup a query with parameters and other configurations . * @ param query * @ param paramValues * @ param paramTypes * @ param offset * @ param limit */ private void setupQuery ( Query query , Object [ ] paramValues , Type [ ] paramTypes , Integer offset , Integer limit ) { } }
if ( paramValues != null && paramTypes != null ) { query . setParameters ( paramValues , paramTypes ) ; } if ( offset != null ) { query . setFirstResult ( offset ) ; } if ( limit != null ) { query . setMaxResults ( limit ) ; }
public class ZoneOperationId { /** * Returns a zone operation identity given project , zone and operation names . */ public static ZoneOperationId of ( String project , String zone , String operation ) { } }
return new ZoneOperationId ( project , zone , operation ) ;
public class MaterialValueBox { /** * Updates the style of the field label according to the field value if the * field value is empty - null or " " - removes the label ' active ' style else * will add the ' active ' style to the field label . */ protected void updateLabelActiveStyle ( ) { } }
if ( this . valueBoxBase . getText ( ) != null && ! this . valueBoxBase . getText ( ) . isEmpty ( ) ) { label . addStyleName ( CssName . ACTIVE ) ; } else { label . removeStyleName ( CssName . ACTIVE ) ; }
public class I18nLoader { /** * looks for a file named cukedoctor . properties using @ baseDir as starting point */ private InputStream findCukedoctorProperties ( String baseDir ) { } }
List < String > files = FileUtil . findFiles ( baseDir , "cukedoctor.properties" , true ) ; if ( files != null && ! files . isEmpty ( ) ) { String path = files . get ( 0 ) ; log . fine ( "Loading cukedoctor resource bundle from: " + path ) ; File file = new File ( path ) ; try { return new FileInputStream ( file ) ; } catch ( Exception e ) { log . log ( Level . WARNING , "Could not load resource bundle from target folder" , e ) ; } } return null ;
public class Coordination { /** * setter for resolved - sets * @ generated * @ param v value to set into the feature */ public void setResolved ( String v ) { } }
if ( Coordination_Type . featOkTst && ( ( Coordination_Type ) jcasType ) . casFeat_resolved == null ) jcasType . jcas . throwFeatMissing ( "resolved" , "de.julielab.jules.types.Coordination" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( Coordination_Type ) jcasType ) . casFeatCode_resolved , v ) ;
public class IntObjectHashMap { /** * Get the stored value associated with the given key * @ param key key associated with the data * @ return data associated with the key */ public T get ( final int key ) { } }
final int hash = hashOf ( key ) ; int index = hash & mask ; if ( containsKey ( key , index ) ) { return ( T ) values [ index ] ; } if ( states [ index ] == FREE ) { return missingEntries ; } int j = index ; for ( int perturb = perturb ( hash ) ; states [ index ] != FREE ; perturb >>= PERTURB_SHIFT ) { j = probe ( perturb , j ) ; index = j & mask ; if ( containsKey ( key , index ) ) { return ( T ) values [ index ] ; } } return missingEntries ;
public class MtasRequestHandler { /** * Finish status . * @ param item * the item * @ throws IOException * Signals that an I / O exception has occurred . */ public void finishStatus ( MtasSolrStatus item ) throws IOException { } }
running . remove ( item ) ; if ( item . error ( ) ) { error . add ( item ) ; }
public class ContainerDefinition { /** * The dependencies defined for container startup and shutdown . A container can contain multiple dependencies . When * a dependency is defined for container startup , for container shutdown it is reversed . * Your Amazon ECS container instances require at least version 1.26.0 of the container agent to enable container * dependencies . However , we recommend using the latest container agent version . For information about checking your * agent version and updating to the latest version , see < a * href = " http : / / docs . aws . amazon . com / AmazonECS / latest / developerguide / ecs - agent - update . html " > Updating the Amazon ECS * Container Agent < / a > in the < i > Amazon Elastic Container Service Developer Guide < / i > . If you are using an Amazon * ECS - optimized Linux AMI , your instance needs at least version 1.26.0-1 of the < code > ecs - init < / code > package . If * your container instances are launched from version < code > 20190301 < / code > or later , then they contain the required * versions of the container agent and < code > ecs - init < / code > . For more information , see < a * href = " http : / / docs . aws . amazon . com / AmazonECS / latest / developerguide / ecs - optimized _ AMI . html " > Amazon ECS - optimized * Linux AMI < / a > in the < i > Amazon Elastic Container Service Developer Guide < / i > . * @ param dependsOn * The dependencies defined for container startup and shutdown . A container can contain multiple * dependencies . When a dependency is defined for container startup , for container shutdown it is * reversed . < / p > * Your Amazon ECS container instances require at least version 1.26.0 of the container agent to enable * container dependencies . However , we recommend using the latest container agent version . For information * about checking your agent version and updating to the latest version , see < a * href = " http : / / docs . aws . amazon . com / AmazonECS / latest / developerguide / ecs - agent - update . html " > Updating the * Amazon ECS Container Agent < / a > in the < i > Amazon Elastic Container Service Developer Guide < / i > . If you are * using an Amazon ECS - optimized Linux AMI , your instance needs at least version 1.26.0-1 of the * < code > ecs - init < / code > package . If your container instances are launched from version < code > 20190301 < / code > * or later , then they contain the required versions of the container agent and < code > ecs - init < / code > . For * more information , see < a * href = " http : / / docs . aws . amazon . com / AmazonECS / latest / developerguide / ecs - optimized _ AMI . html " > Amazon * ECS - optimized Linux AMI < / a > in the < i > Amazon Elastic Container Service Developer Guide < / i > . */ public void setDependsOn ( java . util . Collection < ContainerDependency > dependsOn ) { } }
if ( dependsOn == null ) { this . dependsOn = null ; return ; } this . dependsOn = new com . amazonaws . internal . SdkInternalList < ContainerDependency > ( dependsOn ) ;
public class ClientRequestExecutor { /** * Null out current clientRequest before calling complete . timeOut and * complete must * not * be within a synchronized block since both eventually * check in the client request executor . Such a check in can trigger * additional synchronized methods deeper in the stack . */ private ClientRequest < ? > completeClientRequest ( ) { } }
ClientRequest < ? > local = atomicNullOutClientRequest ( ) ; if ( local == null ) { return null ; } if ( isExpired ) { local . timeOut ( ) ; } else { local . complete ( ) ; } if ( logger . isTraceEnabled ( ) ) logger . trace ( "Marked client associated with " + socketChannel . socket ( ) + " as complete" ) ; return local ;
public class WSJdbcResultSet { /** * @ param runtimeX a RuntimeException which occurred , indicating the wrapper may be closed . * @ throws SQLRecoverableException if the wrapper is closed and exception mapping is disabled . * @ return the RuntimeException to throw if it isn ' t . */ final protected RuntimeException runtimeXIfNotClosed ( RuntimeException runtimeX ) throws SQLException { } }
if ( state == State . CLOSED ) throw createClosedException ( "ResultSet" ) ; return runtimeX ;
public class Disruptor { /** * Publish an event to the ring buffer . * @ param < A > Class of the user supplied argument . * @ param eventTranslator the translator that will load data into the event . * @ param arg A single argument to load into the event */ public < A > void publishEvent ( final EventTranslatorOneArg < T , A > eventTranslator , final A arg ) { } }
ringBuffer . publishEvent ( eventTranslator , arg ) ;
public class InhibitAnyPolicyExtension { /** * Delete the attribute value . * @ param name name of attribute to delete . Must be SKIP _ CERTS . * @ throws IOException on error . In this case , IOException will always be * thrown , because the only attribute , SKIP _ CERTS , is * required . */ public void delete ( String name ) throws IOException { } }
if ( name . equalsIgnoreCase ( SKIP_CERTS ) ) throw new IOException ( "Attribute " + SKIP_CERTS + " may not be deleted." ) ; else throw new IOException ( "Attribute name not recognized by " + "CertAttrSet:InhibitAnyPolicy." ) ;
public class DefaultGroovyMethods { /** * Support the subscript operator with a range for a byte array * @ param array a byte array * @ param range a range indicating the indices for the items to retrieve * @ return list of the retrieved bytes * @ since 1.0 */ @ SuppressWarnings ( "unchecked" ) public static List < Byte > getAt ( byte [ ] array , Range range ) { } }
return primitiveArrayGet ( array , range ) ;
public class ApiImplementor { /** * Override if implementing one or more ' persistent connection ' operations . * These are operations that maintain long running connections , potentially staying alive * as long as the client holds them open . * @ param msg the HTTP message containing the API request * @ param httpIn the input stream * @ param httpOut the output stream * @ param name the name of the requested pconn endpoint * @ param params the API request parameters * @ throws ApiException if an error occurred while handling the API pconn endpoint */ public void handleApiPersistentConnection ( HttpMessage msg , HttpInputStream httpIn , HttpOutputStream httpOut , String name , JSONObject params ) throws ApiException { } }
throw new ApiException ( ApiException . Type . BAD_PCONN , name ) ;
public class GenericSorting { /** * Sorts the specified sub - array into ascending order . */ private static void quickSort1 ( int off , int len , IntComparator comp , Swapper swapper ) { } }
// Insertion sort on smallest arrays if ( len < SMALL ) { for ( int i = off ; i < len + off ; i ++ ) for ( int j = i ; j > off && ( comp . compare ( j - 1 , j ) > 0 ) ; j -- ) { swapper . swap ( j , j - 1 ) ; } return ; } // Choose a partition element , v int m = off + len / 2 ; // Small arrays , middle element if ( len > SMALL ) { int l = off ; int n = off + len - 1 ; if ( len > MEDIUM ) { // Big arrays , pseudomedian of 9 int s = len / 8 ; l = med3 ( l , l + s , l + 2 * s , comp ) ; m = med3 ( m - s , m , m + s , comp ) ; n = med3 ( n - 2 * s , n - s , n , comp ) ; } m = med3 ( l , m , n , comp ) ; // Mid - size , med of 3 } // long v = x [ m ] ; // Establish Invariant : v * ( < v ) * ( > v ) * v * int a = off , b = a , c = off + len - 1 , d = c ; while ( true ) { int comparison ; while ( b <= c && ( ( comparison = comp . compare ( b , m ) ) <= 0 ) ) { if ( comparison == 0 ) { if ( a == m ) m = b ; // moving target ; DELTA to JDK ! ! ! else if ( b == m ) m = a ; // moving target ; DELTA to JDK ! ! ! swapper . swap ( a ++ , b ) ; } b ++ ; } while ( c >= b && ( ( comparison = comp . compare ( c , m ) ) >= 0 ) ) { if ( comparison == 0 ) { if ( c == m ) m = d ; // moving target ; DELTA to JDK ! ! ! else if ( d == m ) m = c ; // moving target ; DELTA to JDK ! ! ! swapper . swap ( c , d -- ) ; } c -- ; } if ( b > c ) break ; if ( b == m ) m = d ; // moving target ; DELTA to JDK ! ! ! else if ( c == m ) m = c ; // moving target ; DELTA to JDK ! ! ! swapper . swap ( b ++ , c -- ) ; } // Swap partition elements back to middle int s , n = off + len ; s = java . lang . Math . min ( a - off , b - a ) ; vecswap ( swapper , off , b - s , s ) ; s = java . lang . Math . min ( d - c , n - d - 1 ) ; vecswap ( swapper , b , n - s , s ) ; // Recursively sort non - partition - elements if ( ( s = b - a ) > 1 ) quickSort1 ( off , s , comp , swapper ) ; if ( ( s = d - c ) > 1 ) quickSort1 ( n - s , s , comp , swapper ) ;
public class PlannerWriter { /** * Process the standard working hours for a given day . * @ param mpxjCalendar MPXJ Calendar instance * @ param uniqueID unique ID sequence generation * @ param day Day instance * @ param typeList Planner list of days */ private void processWorkingHours ( ProjectCalendar mpxjCalendar , Sequence uniqueID , Day day , List < OverriddenDayType > typeList ) { } }
if ( isWorkingDay ( mpxjCalendar , day ) ) { ProjectCalendarHours mpxjHours = mpxjCalendar . getCalendarHours ( day ) ; if ( mpxjHours != null ) { OverriddenDayType odt = m_factory . createOverriddenDayType ( ) ; typeList . add ( odt ) ; odt . setId ( getIntegerString ( uniqueID . next ( ) ) ) ; List < Interval > intervalList = odt . getInterval ( ) ; for ( DateRange mpxjRange : mpxjHours ) { Date rangeStart = mpxjRange . getStart ( ) ; Date rangeEnd = mpxjRange . getEnd ( ) ; if ( rangeStart != null && rangeEnd != null ) { Interval interval = m_factory . createInterval ( ) ; intervalList . add ( interval ) ; interval . setStart ( getTimeString ( rangeStart ) ) ; interval . setEnd ( getTimeString ( rangeEnd ) ) ; } } } }
public class Base64Coder { /** * Encodes a string into Base64 format . * No blanks or line breaks are inserted . * @ param string a String to be encoded . * @ return A String with the Base64 encoded data . */ @ Pure @ Inline ( value = "new String(Base64Coder.encode(($1).getBytes()))" , imported = { } }
Base64Coder . class } ) public static String encodeString ( String string ) { return new String ( encode ( string . getBytes ( ) ) ) ;
public class JCusolverDn { /** * generates one of the unitary matrices Q or P * * T determined by GEBRD */ public static int cusolverDnSorgbr_bufferSize ( cusolverDnHandle handle , int side , int m , int n , int k , Pointer A , int lda , Pointer tau , int [ ] lwork ) { } }
return checkResult ( cusolverDnSorgbr_bufferSizeNative ( handle , side , m , n , k , A , lda , tau , lwork ) ) ;
public class EntityPropertyDescFactory { /** * カラム名を表示するかどうかを処理します 。 * @ param entityDesc エンティティ記述 * @ param propertyDesc エンティティプロパティ記述 * @ param columnMeta カラムメタデータ */ protected void handleShowColumnName ( EntityDesc entityDesc , EntityPropertyDesc propertyDesc , ColumnMeta columnMeta ) { } }
if ( showColumnName || isNameDifferentBetweenPropertyAndColumn ( entityDesc , propertyDesc ) ) { propertyDesc . setShowColumnName ( true ) ; }
public class MapBasedDataMaster { /** * Returns list ( null safe ) of elements for desired key from dataSource files * @ param key desired node key * @ return list of elements for desired key * @ throws IllegalArgumentException if no element for key has been found */ @ SuppressWarnings ( "unchecked" ) @ Override public List < String > getStringList ( String key ) { } }
return getData ( key , List . class ) ;
public class Probe { /** * Create and return a LinkedList of a combination of both scids and siids . * This is a method that is used primarily in the creation of split packets . * @ return combined LikedList */ public LinkedList < ProbeIdEntry > getCombinedIdentifierList ( ) { } }
LinkedList < ProbeIdEntry > combinedList = new LinkedList < ProbeIdEntry > ( ) ; for ( String id : this . getProbeWrapper ( ) . getServiceContractIDs ( ) ) { combinedList . add ( new ProbeIdEntry ( "scid" , id ) ) ; } for ( String id : this . getProbeWrapper ( ) . getServiceInstanceIDs ( ) ) { combinedList . add ( new ProbeIdEntry ( "siid" , id ) ) ; } return combinedList ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcBlobTexture ( ) { } }
if ( ifcBlobTextureEClass == null ) { ifcBlobTextureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 41 ) ; } return ifcBlobTextureEClass ;
public class BatchMeterUsageResult { /** * Contains all UsageRecords processed by BatchMeterUsage . These records were either honored by AWS Marketplace * Metering Service or were invalid . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setResults ( java . util . Collection ) } or { @ link # withResults ( java . util . Collection ) } if you want to override * the existing values . * @ param results * Contains all UsageRecords processed by BatchMeterUsage . These records were either honored by AWS * Marketplace Metering Service or were invalid . * @ return Returns a reference to this object so that method calls can be chained together . */ public BatchMeterUsageResult withResults ( UsageRecordResult ... results ) { } }
if ( this . results == null ) { setResults ( new java . util . ArrayList < UsageRecordResult > ( results . length ) ) ; } for ( UsageRecordResult ele : results ) { this . results . add ( ele ) ; } return this ;
public class CacheProviderWrapper { /** * Adds an alias for the given key in the cache ' s mapping table . If the alias is already * associated with another key , it will be changed to associate with the new key . * @ param key the key assoicated with alias * @ param aliasArray the alias to use for lookups * @ param askPermission True implies that execution must ask the coordinating CacheUnit for permission ( No effect on CoreCache ) . * @ param coordinate Indicates that the value should be set in other caches caching this value . ( No effect on CoreCache ) */ @ Override public void addAlias ( Object key , Object [ ] aliasArray , boolean askPermission , boolean coordinate ) { } }
final String methodName = "addAlias()" ; if ( this . featureSupport . isAliasSupported ( ) ) { if ( tc . isDebugEnabled ( ) ) { Tr . debug ( tc , methodName + " cacheName=" + cacheName + " ERROR because it is not implemented yet" ) ; } } else { Tr . error ( tc , "DYNA1063E" , new Object [ ] { methodName , cacheName , this . cacheProviderName } ) ; } return ;
public class LdiSrl { /** * Adjust initial character ( s ) as beans property . < br > * Basically same as initUncap ( ) method except only when * it starts with two upper case character , for example , ' EMecha ' * @ param capitalizedName The capitalized name for beans property . ( NotNull ) * @ return The name as beans property that initial is adjusted . ( NotNull ) */ public static String initBeansProp ( String capitalizedName ) { } }
// according to Java Beans rule assertObjectNotNull ( "capitalizedName" , capitalizedName ) ; if ( is_Null_or_TrimmedEmpty ( capitalizedName ) ) { return capitalizedName ; } if ( isInitTwoUpperCase ( capitalizedName ) ) { // for example , ' EMecha ' return capitalizedName ; } return initUncap ( capitalizedName ) ;
public class TransactionSignature { /** * Returns a decoded signature . * @ param requireCanonicalEncoding if the encoding of the signature must * be canonical . * @ param requireCanonicalSValue if the S - value must be canonical ( below half * the order of the curve ) . * @ throws SignatureDecodeException if the signature is unparseable in some way . * @ throws VerificationException if the signature is invalid . */ public static TransactionSignature decodeFromBitcoin ( byte [ ] bytes , boolean requireCanonicalEncoding , boolean requireCanonicalSValue ) throws SignatureDecodeException , VerificationException { } }
// Bitcoin encoding is DER signature + sighash byte . if ( requireCanonicalEncoding && ! isEncodingCanonical ( bytes ) ) throw new VerificationException . NoncanonicalSignature ( ) ; ECKey . ECDSASignature sig = ECKey . ECDSASignature . decodeFromDER ( bytes ) ; if ( requireCanonicalSValue && ! sig . isCanonical ( ) ) throw new VerificationException ( "S-value is not canonical." ) ; // In Bitcoin , any value of the final byte is valid , but not necessarily canonical . See javadocs for // isEncodingCanonical to learn more about this . So we must store the exact byte found . return new TransactionSignature ( sig . r , sig . s , bytes [ bytes . length - 1 ] ) ;
public class CommandExecutionSpec { /** * Check the exitStatus of previous command execution matches the expected one * @ param expectedExitStatus * @ deprecated Success exit status is directly checked in the " execute remote command " method , so this is not * needed anymore . */ @ Deprecated @ Then ( "^the command exit status is '(.+?)'$" ) public void checkShellExitStatus ( int expectedExitStatus ) throws Exception { } }
assertThat ( commonspec . getCommandExitStatus ( ) ) . as ( "Is equal to " + expectedExitStatus + "." ) . isEqualTo ( expectedExitStatus ) ;
public class ListQualificationRequestsResult { /** * The Qualification request . The response includes one QualificationRequest element for each Qualification request * returned by the query . * @ param qualificationRequests * The Qualification request . The response includes one QualificationRequest element for each Qualification * request returned by the query . */ public void setQualificationRequests ( java . util . Collection < QualificationRequest > qualificationRequests ) { } }
if ( qualificationRequests == null ) { this . qualificationRequests = null ; return ; } this . qualificationRequests = new java . util . ArrayList < QualificationRequest > ( qualificationRequests ) ;
public class DescribeClusterTracksResult { /** * A list of maintenance tracks output by the < code > DescribeClusterTracks < / code > operation . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setMaintenanceTracks ( java . util . Collection ) } or { @ link # withMaintenanceTracks ( java . util . Collection ) } if * you want to override the existing values . * @ param maintenanceTracks * A list of maintenance tracks output by the < code > DescribeClusterTracks < / code > operation . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeClusterTracksResult withMaintenanceTracks ( MaintenanceTrack ... maintenanceTracks ) { } }
if ( this . maintenanceTracks == null ) { setMaintenanceTracks ( new com . amazonaws . internal . SdkInternalList < MaintenanceTrack > ( maintenanceTracks . length ) ) ; } for ( MaintenanceTrack ele : maintenanceTracks ) { this . maintenanceTracks . add ( ele ) ; } return this ;
public class CrsWktExtension { /** * Remove the extension . Leaves the column and values . * @ since 3.2.0 */ public void removeExtension ( ) { } }
try { if ( extensionsDao . isTableExists ( ) ) { extensionsDao . deleteByExtension ( EXTENSION_NAME ) ; } } catch ( SQLException e ) { throw new GeoPackageException ( "Failed to delete CRS WKT extension. GeoPackage: " + geoPackage . getName ( ) , e ) ; }
public class Importer { /** * Check whether a file with given path and checksum already exists */ private File fileExists ( final String path , final long checksum ) throws FrameworkException { } }
final PropertyKey < Long > checksumKey = StructrApp . key ( File . class , "checksum" ) ; final PropertyKey < String > pathKey = StructrApp . key ( File . class , "path" ) ; return app . nodeQuery ( File . class ) . and ( pathKey , path ) . and ( checksumKey , checksum ) . getFirst ( ) ;
public class ImageScale3x { /** * Retrieve the scaled image . Note this is the method that actually * does the work so it may take some time to return * @ return The newly scaled image */ public BufferedImage getScaledImage ( ) { } }
RawScale3x scaler = new RawScale3x ( srcData , width , height ) ; BufferedImage image = new BufferedImage ( width * 3 , height * 3 , BufferedImage . TYPE_INT_ARGB ) ; image . setRGB ( 0 , 0 , width * 3 , height * 3 , scaler . getScaledData ( ) , 0 , width * 3 ) ; return image ;
public class BadAlias { /** * syck _ badalias _ cmp */ @ JRubyMethod ( name = "<=>" ) public static IRubyObject cmp ( IRubyObject alias1 , IRubyObject alias2 ) { } }
IRubyObject str1 = ( IRubyObject ) ( ( RubyObject ) alias1 ) . fastGetInstanceVariable ( "@name" ) ; IRubyObject str2 = ( IRubyObject ) ( ( RubyObject ) alias2 ) . fastGetInstanceVariable ( "@name" ) ; return str1 . callMethod ( alias1 . getRuntime ( ) . getCurrentContext ( ) , "<=>" , str2 ) ;
public class Utils { /** * Append the second pathString to the first . The result will not end with a / . * In case two absolute paths are given , e . g . / A / B / , and / C / D / , then the result * will be / A / B / C / D * Multiple slashes will be shortened to a single slash , so / A / / / B is equivalent to / A / B */ @ NonNull public static String appendPath ( @ NonNull String first , @ NonNull String second ) { } }
String result = first + SEP + second ; while ( result . contains ( "//" ) ) { result = result . replaceAll ( "//" , "/" ) ; } if ( result . length ( ) > 1 && result . endsWith ( SEP ) ) { return result . substring ( 0 , result . length ( ) - 1 ) ; } else { return result ; }
public class KeyVaultClientBaseImpl { /** * List certificate issuers for a specified key vault . * The GetCertificateIssuers operation returns the set of certificate issuer resources in the specified key vault . This operation requires the certificates / manageissuers / getissuers permission . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; CertificateIssuerItem & gt ; object */ public Observable < Page < CertificateIssuerItem > > getCertificateIssuersNextAsync ( final String nextPageLink ) { } }
return getCertificateIssuersNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < CertificateIssuerItem > > , Page < CertificateIssuerItem > > ( ) { @ Override public Page < CertificateIssuerItem > call ( ServiceResponse < Page < CertificateIssuerItem > > response ) { return response . body ( ) ; } } ) ;
public class CompositeType { /** * Returns the flat field descriptors for the given field expression . * @ param fieldExpression The field expression for which the flat field descriptors are computed . * @ return The list of descriptors for the flat fields which are specified by the field expression . */ @ PublicEvolving public List < FlatFieldDescriptor > getFlatFields ( String fieldExpression ) { } }
List < FlatFieldDescriptor > result = new ArrayList < FlatFieldDescriptor > ( ) ; this . getFlatFields ( fieldExpression , 0 , result ) ; return result ;
public class ScalarFieldUpdater { /** * Add all Terms columns needed for our scalar field . */ private void addTermColumns ( String fieldValue ) { } }
Set < String > termSet = tokenize ( fieldValue ) ; indexTerms ( termSet ) ; addFieldTermReferences ( termSet ) ; addFieldReference ( ) ;
public class CPDefinitionOptionValueRelUtil { /** * Returns the first cp definition option value rel in the ordered set where uuid = & # 63 ; . * @ param uuid the uuid * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching cp definition option value rel , or < code > null < / code > if a matching cp definition option value rel could not be found */ public static CPDefinitionOptionValueRel fetchByUuid_First ( String uuid , OrderByComparator < CPDefinitionOptionValueRel > orderByComparator ) { } }
return getPersistence ( ) . fetchByUuid_First ( uuid , orderByComparator ) ;
public class UpdateTeamMemberRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateTeamMemberRequest updateTeamMemberRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateTeamMemberRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateTeamMemberRequest . getProjectId ( ) , PROJECTID_BINDING ) ; protocolMarshaller . marshall ( updateTeamMemberRequest . getUserArn ( ) , USERARN_BINDING ) ; protocolMarshaller . marshall ( updateTeamMemberRequest . getProjectRole ( ) , PROJECTROLE_BINDING ) ; protocolMarshaller . marshall ( updateTeamMemberRequest . getRemoteAccessAllowed ( ) , REMOTEACCESSALLOWED_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DeleteInstanceSnapshotRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteInstanceSnapshotRequest deleteInstanceSnapshotRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteInstanceSnapshotRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteInstanceSnapshotRequest . getInstanceSnapshotName ( ) , INSTANCESNAPSHOTNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AtomContainer { /** * { @ inheritDoc } */ @ Override public Iterable < IAtom > atoms ( ) { } }
return new Iterable < IAtom > ( ) { @ Override public Iterator < IAtom > iterator ( ) { return new AtomIterator ( ) ; } } ;
public class GreenPepperXmlRpcClient { /** * { @ inheritDoc } */ @ SuppressWarnings ( "unchecked" ) public void createRunner ( Runner runner , String identifier ) throws GreenPepperServerException { } }
Vector params = CollectionUtil . toVector ( runner . marshallize ( ) ) ; log . debug ( "Creating runner: " + runner . getName ( ) ) ; execute ( XmlRpcMethodName . createRunner , params , identifier ) ;
public class ManagedExecutorServiceImpl { /** * { @ inheritDoc } */ @ SuppressWarnings ( { } }
"rawtypes" , "unchecked" } ) @ Override public < T > List < Future < T > > invokeAll ( Collection < ? extends Callable < T > > tasks ) throws InterruptedException { Entry < Collection < ? extends Callable < T > > , TaskLifeCycleCallback [ ] > entry = createCallbacks ( tasks ) ; tasks = entry . getKey ( ) ; TaskLifeCycleCallback [ ] callbacks = entry . getValue ( ) ; // Policy executor can optimize the last task in the list to run on the current thread if we submit under the same executor , PolicyExecutor executor = callbacks . length > 0 ? callbacks [ callbacks . length - 1 ] . policyExecutor : policyExecutor ; return ( List ) executor . invokeAll ( tasks , callbacks ) ;
public class MicrosoftSQLServerHelper { /** * This returns a < code > PrintWriter < / code > for a specific * backend . The order of printwriter lookup is as follows : * first , the returned value from the < code > externalhelper . getPrintWriter ( ) < / code > , * which also can be overwritten by extending the helper < br > * then , based on the trace writer ( i . e . the Websphere trace setting ) < br > * @ return * < CODE > PrintWriter < / CODE > * @ exception ResourceException if something goes wrong . */ @ Override public PrintWriter getPrintWriter ( ) throws ResourceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "getPrintWriter" ) ; // not synchronizing here since there will be one helper // and most likely the setting will be serially , even if it ' s not , // it shouldn ' t matter here ( tracing ) . if ( jdbcTraceWriter == null ) { jdbcTraceWriter = new PrintWriter ( new TraceWriter ( jdbcTC ) , true ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getPrintWriter" , jdbcTraceWriter ) ; return jdbcTraceWriter ;
public class PutPipelineDefinitionRequest { /** * The parameter values used with the pipeline . * @ return The parameter values used with the pipeline . */ public java . util . List < ParameterValue > getParameterValues ( ) { } }
if ( parameterValues == null ) { parameterValues = new com . amazonaws . internal . SdkInternalList < ParameterValue > ( ) ; } return parameterValues ;
public class LoggerUtils { /** * 打印日志 * @ param logLevel 日志级别 * @ param clazz 指定类 * @ param message 消息 * @ param values 格式化参数 * @ since 1.0.9 */ public static void log ( LogLevel logLevel , Class < ? > clazz , String message , String ... values ) { } }
switch ( logLevel ) { case WARN : warn ( clazz , message , values ) ; break ; case ERROR : error ( clazz , message , values ) ; break ; case FATAL : fatal ( clazz , message , values ) ; break ; default : info ( clazz , message , values ) ; break ; }
public class ValidateSecurityProfileBehaviorsResult { /** * The list of any errors found in the behaviors . * @ param validationErrors * The list of any errors found in the behaviors . */ public void setValidationErrors ( java . util . Collection < ValidationError > validationErrors ) { } }
if ( validationErrors == null ) { this . validationErrors = null ; return ; } this . validationErrors = new java . util . ArrayList < ValidationError > ( validationErrors ) ;
public class KunderaCriteriaBuilder { /** * ( non - Javadoc ) * @ see * javax . persistence . criteria . CriteriaBuilder # lt ( javax . persistence . criteria * . Expression , javax . persistence . criteria . Expression ) */ @ Override public Predicate lt ( Expression < ? extends Number > arg0 , Expression < ? extends Number > arg1 ) { } }
// TODO Auto - generated method stub return null ;
public class LoadBalancerProbesInner { /** * Gets load balancer probe . * @ param resourceGroupName The name of the resource group . * @ param loadBalancerName The name of the load balancer . * @ param probeName The name of the probe . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < ProbeInner > getAsync ( String resourceGroupName , String loadBalancerName , String probeName , final ServiceCallback < ProbeInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getWithServiceResponseAsync ( resourceGroupName , loadBalancerName , probeName ) , serviceCallback ) ;
public class PullerInternal { /** * This is compatible with CouchDB , but it only works for revs of generation 1 without attachments . */ protected void pullBulkWithAllDocs ( final List < RevisionInternal > bulkRevs ) { } }
// http : / / wiki . apache . org / couchdb / HTTP _ Bulk _ Document _ API ++ httpConnectionCount ; final RevisionList remainingRevs = new RevisionList ( bulkRevs ) ; Collection < String > keys = CollectionUtils . transform ( bulkRevs , new CollectionUtils . Functor < RevisionInternal , String > ( ) { public String invoke ( RevisionInternal rev ) { return rev . getDocID ( ) ; } } ) ; Map < String , Object > body = new HashMap < String , Object > ( ) ; body . put ( "keys" , keys ) ; Future future = sendAsyncRequest ( "POST" , "_all_docs?include_docs=true" , body , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { Map < String , Object > res = ( Map < String , Object > ) result ; if ( e != null ) { setError ( e ) ; // TODO : There is a known bug caused by the line below , which is // TODO : causing testMockSinglePullCouchDb to fail when running on a Nexus5 device . // TODO : ( the batching behavior is different in that case ) // TODO : See https : / / github . com / couchbase / couchbase - lite - java - core / issues / 271 // completedChangesCount . addAndGet ( bulkRevs . size ( ) ) ; } else { // Process the resulting rows ' documents . // We only add a document if it doesn ' t have attachments , and if its // revID matches the one we asked for . List < Map < String , Object > > rows = ( List < Map < String , Object > > ) res . get ( "rows" ) ; Log . v ( TAG , "%s checking %d bulk-fetched remote revisions" , this , rows . size ( ) ) ; for ( Map < String , Object > row : rows ) { Map < String , Object > doc = ( Map < String , Object > ) row . get ( "doc" ) ; if ( doc != null && doc . get ( "_attachments" ) == null ) { RevisionInternal rev = new RevisionInternal ( doc ) ; RevisionInternal removedRev = remainingRevs . removeAndReturnRev ( rev ) ; if ( removedRev != null ) { rev . setSequence ( removedRev . getSequence ( ) ) ; queueDownloadedRevision ( rev ) ; } } else { Status status = statusFromBulkDocsResponseItem ( row ) ; if ( status . isError ( ) && row . containsKey ( "key" ) && row . get ( "key" ) != null ) { RevisionInternal rev = remainingRevs . revWithDocId ( ( String ) row . get ( "key" ) ) ; if ( rev != null ) { remainingRevs . remove ( rev ) ; revisionFailed ( rev , new CouchbaseLiteException ( status ) ) ; } } } } } // Any leftover revisions that didn ' t get matched will be fetched individually : if ( remainingRevs . size ( ) > 0 ) { Log . v ( TAG , "%s bulk-fetch didn't work for %d of %d revs; getting individually" , this , remainingRevs . size ( ) , bulkRevs . size ( ) ) ; for ( RevisionInternal rev : remainingRevs ) { queueRemoteRevision ( rev ) ; } pullRemoteRevisions ( ) ; } -- httpConnectionCount ; // Start another task if there are still revisions waiting to be pulled : pullRemoteRevisions ( ) ; } } ) ; pendingFutures . add ( future ) ;
public class StandardDdlParser { /** * Method which extracts the table element string from a CREATE TABLE statement . * @ param tokens the { @ link DdlTokenStream } representing the tokenized DDL content ; may not be null * @ param useTerminator * @ return the parsed table elements String . * @ throws ParsingException */ protected String getTableElementsString ( DdlTokenStream tokens , boolean useTerminator ) throws ParsingException { } }
assert tokens != null ; StringBuilder sb = new StringBuilder ( 100 ) ; if ( useTerminator ) { while ( ! isTerminator ( tokens ) ) { sb . append ( SPACE ) . append ( tokens . consume ( ) ) ; } } else { // Assume we start with open parenthesis ' ( ' , then we can count on walking through ALL tokens until we find the close // parenthesis ' ) ' . If there are intermediate parenthesis , we can count on them being pairs . tokens . consume ( L_PAREN ) ; // EXPECTED int iParen = 0 ; while ( tokens . hasNext ( ) ) { if ( tokens . matches ( L_PAREN ) ) { iParen ++ ; } else if ( tokens . matches ( R_PAREN ) ) { if ( iParen == 0 ) { tokens . consume ( R_PAREN ) ; break ; } iParen -- ; } if ( isComment ( tokens ) ) { tokens . consume ( ) ; } else { sb . append ( SPACE ) . append ( tokens . consume ( ) ) ; } } } return sb . toString ( ) ;
public class SoyGeneralOptions { /** * Sets the file containing compile - time globals . * < p > Each line of the file should have the format * < pre > * & lt ; global _ name & gt ; = & lt ; primitive _ data & gt ; * < / pre > * where primitive _ data is a valid Soy expression literal for a primitive type ( null , boolean , * integer , float , or string ) . Empty lines and lines beginning with " / / " are ignored . The file * should be encoded in UTF - 8. * < p > If you need to generate a file in this format from Java , consider using the utility { @ code * SoyUtils . generateCompileTimeGlobalsFile ( ) } . * @ param compileTimeGlobalsFile The file containing compile - time globals . * @ throws IOException If there is an error reading the compile - time globals file . */ public SoyGeneralOptions setCompileTimeGlobals ( File compileTimeGlobalsFile ) throws IOException { } }
setCompileTimeGlobalsInternal ( SoyUtils . parseCompileTimeGlobals ( Files . asCharSource ( compileTimeGlobalsFile , UTF_8 ) ) ) ; return this ;
public class AWSCertificateManagerClient { /** * Resends the email that requests domain ownership validation . The domain owner or an authorized representative * must approve the ACM certificate before it can be issued . The certificate can be approved by clicking a link in * the mail to navigate to the Amazon certificate approval website and then clicking < b > I Approve < / b > . However , the * validation email can be blocked by spam filters . Therefore , if you do not receive the original mail , you can * request that the mail be resent within 72 hours of requesting the ACM certificate . If more than 72 hours have * elapsed since your original request or since your last attempt to resend validation mail , you must request a new * certificate . For more information about setting up your contact email addresses , see < a * href = " https : / / docs . aws . amazon . com / acm / latest / userguide / setup - email . html " > Configure Email for your Domain < / a > . * @ param resendValidationEmailRequest * @ return Result of the ResendValidationEmail operation returned by the service . * @ throws ResourceNotFoundException * The specified certificate cannot be found in the caller ' s account or the caller ' s account cannot be * found . * @ throws InvalidStateException * Processing has reached an invalid state . * @ throws InvalidArnException * The requested Amazon Resource Name ( ARN ) does not refer to an existing resource . * @ throws InvalidDomainValidationOptionsException * One or more values in the < a > DomainValidationOption < / a > structure is incorrect . * @ sample AWSCertificateManager . ResendValidationEmail * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / acm - 2015-12-08 / ResendValidationEmail " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ResendValidationEmailResult resendValidationEmail ( ResendValidationEmailRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeResendValidationEmail ( request ) ;
public class RemoteJTProxy { /** * Start corona job tracker on the machine provided by using the corona * task tracker API . * @ param jobConf The job configuration . * @ param grant The grant that specifies the remote machine . * @ return A boolean indicating success . * @ throws InterruptedException */ private boolean startRemoteJT ( JobConf jobConf , ResourceGrant grant ) throws InterruptedException { } }
org . apache . hadoop . corona . InetAddress ttAddr = Utilities . appInfoToAddress ( grant . appInfo ) ; CoronaTaskTrackerProtocol coronaTT = null ; try { coronaTT = jt . getTaskTrackerClient ( ttAddr . getHost ( ) , ttAddr . getPort ( ) ) ; } catch ( IOException e ) { LOG . error ( "Error while trying to connect to TT at " + ttAddr . getHost ( ) + ":" + ttAddr . getPort ( ) , e ) ; return false ; } LOG . warn ( "Starting remote JT for " + attemptJobId + " on " + ttAddr . getHost ( ) ) ; // Get a special map id for the JT task . Path systemDir = new Path ( jt . getSystemDir ( ) ) ; LOG . info ( "startRemoteJT:systemDir " + systemDir . toString ( ) ) ; String jobFile = CoronaJobInProgress . getJobFile ( systemDir , attemptJobId ) . toString ( ) ; LOG . info ( "startRemoteJT:jobFile " + jobFile ) ; String splitClass = JobClient . RawSplit . class . getName ( ) ; BytesWritable split = new BytesWritable ( ) ; Task jobTask = new MapTask ( jobFile , currentAttemptId , currentAttemptId . getTaskID ( ) . getId ( ) , splitClass , split , 1 , jobConf . getUser ( ) ) ; CoronaSessionInfo info = new CoronaSessionInfo ( jt . getSessionId ( ) , jt . getJobTrackerAddress ( ) , jt . getJobTrackerAddress ( ) ) ; synchronized ( this ) { try { coronaTT . startCoronaJobTracker ( jobTask , info ) ; } catch ( IOException e ) { // Increment the attempt so that the older attempt will get an error // in reportRemoteCoronaJobTracker ( ) . incrementAttemptUnprotected ( ) ; LOG . error ( "Error while performing RPC to TT at " + ttAddr . getHost ( ) + ":" + ttAddr . getPort ( ) , e ) ; return false ; } } // Now wait for the remote CJT to report its address . final long waitStart = System . currentTimeMillis ( ) ; final long timeout = RemoteJTProxy . getRemotJTTimeout ( jobConf ) ; synchronized ( this ) { while ( client == null ) { LOG . warn ( "Waiting for remote JT to start on " + ttAddr . getHost ( ) ) ; this . wait ( 1000 ) ; if ( client == null && System . currentTimeMillis ( ) - waitStart > timeout ) { // Increment the attempt so that the older attempt will get an error // in reportRemoteCoronaJobTracker ( ) . incrementAttemptUnprotected ( ) ; LOG . warn ( "Could not start remote JT on " + ttAddr . getHost ( ) ) ; return false ; } } } return true ;
public class MapMaker { /** * Builds a thread - safe map , without on - demand computation of values . This method does not alter * the state of this { @ code MapMaker } instance , so it can be invoked again to create multiple * independent maps . * < p > The bulk operations { @ code putAll } , { @ code equals } , and { @ code clear } are not guaranteed to * be performed atomically on the returned map . Additionally , { @ code size } and { @ code * containsValue } are implemented as bulk read operations , and thus may fail to observe concurrent * writes . * @ return a serializable concurrent map having the requested features */ @ Override public < K , V > ConcurrentMap < K , V > makeMap ( ) { } }
if ( ! useCustomMap ) { return new ConcurrentHashMap < K , V > ( getInitialCapacity ( ) , 0.75f , getConcurrencyLevel ( ) ) ; } return ( nullRemovalCause == null ) ? new MapMakerInternalMap < K , V > ( this ) : new NullConcurrentMap < K , V > ( this ) ;
public class SubjectReference { /** * Formats an IPTC string for this reference using information obtained from * Subject Reference System . * @ param srs * reference subject reference system * @ return IPTC formatted reference */ public String toIPTC ( SubjectReferenceSystem srs ) { } }
StringBuffer b = new StringBuffer ( ) ; b . append ( "IPTC:" ) ; b . append ( getNumber ( ) ) ; b . append ( ":" ) ; if ( getNumber ( ) . endsWith ( "000000" ) ) { b . append ( toIPTCHelper ( srs . getName ( this ) ) ) ; b . append ( "::" ) ; } else if ( getNumber ( ) . endsWith ( "000" ) ) { b . append ( toIPTCHelper ( srs . getName ( srs . get ( getNumber ( ) . substring ( 0 , 2 ) + "000000" ) ) ) ) ; b . append ( ":" ) ; b . append ( toIPTCHelper ( srs . getName ( this ) ) ) ; b . append ( ":" ) ; } else { b . append ( toIPTCHelper ( srs . getName ( srs . get ( getNumber ( ) . substring ( 0 , 2 ) + "000000" ) ) ) ) ; b . append ( ":" ) ; b . append ( toIPTCHelper ( srs . getName ( srs . get ( getNumber ( ) . substring ( 0 , 5 ) + "000" ) ) ) ) ; b . append ( ":" ) ; b . append ( toIPTCHelper ( srs . getName ( this ) ) ) ; } return b . toString ( ) ;
public class SingleThreadStage { /** * Closes the stage . * @ throws Exception */ @ Override public void close ( ) throws Exception { } }
if ( closed . compareAndSet ( false , true ) ) { interrupted . set ( true ) ; thread . interrupt ( ) ; }
public class TableResult { /** * Callback method used while the query is executed . */ public boolean newrow ( String rowdata [ ] ) { } }
if ( rowdata != null ) { if ( maxrows > 0 && nrows >= maxrows ) { atmaxrows = true ; return true ; } rows . addElement ( rowdata ) ; nrows ++ ; } return false ;
public class DateFormat { /** * Creates a { @ link DateFormat } object for the default locale that can be used * to format dates in the calendar system specified by < code > cal < / code > . * @ param cal The calendar system for which a date format is desired . * @ param dateStyle The type of date format desired . This can be * { @ link DateFormat # SHORT } , { @ link DateFormat # MEDIUM } , * etc . */ static final public DateFormat getDateInstance ( Calendar cal , int dateStyle ) { } }
return getDateInstance ( cal , dateStyle , ULocale . getDefault ( Category . FORMAT ) ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link GenericMetaDataType } { @ code > } * @ param value * Java instance representing xml element ' s value . * @ return * the new instance of { @ link JAXBElement } { @ code < } { @ link GenericMetaDataType } { @ code > } */ @ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "GenericMetaData" , substitutionHeadNamespace = "http://www.opengis.net/gml" , substitutionHeadName = "_MetaData" ) public JAXBElement < GenericMetaDataType > createGenericMetaData ( GenericMetaDataType value ) { } }
return new JAXBElement < GenericMetaDataType > ( _GenericMetaData_QNAME , GenericMetaDataType . class , null , value ) ;
public class WRadioButtonSelectExample { /** * adds a WRadioButtonSelect with LAYOUT _ COLUMN in 1 column simply by not setting the number of columns . This is * superfluous as you should use LAYOUT _ STACKED ( the default ) instead . */ private void addSingleColumnSelectExample ( ) { } }
add ( new WHeading ( HeadingLevel . H3 , "WRadioButtonSelect laid out in a single column" ) ) ; add ( new ExplanatoryText ( "When layout is COLUMN, setting the layoutColumnCount property to one, or forgetting to set it at all (default is " + "one) is a little bit pointless." ) ) ; final WRadioButtonSelect select = new WRadioButtonSelect ( "australian_state" ) ; select . setButtonLayout ( WRadioButtonSelect . LAYOUT_COLUMNS ) ; add ( new WLabel ( "One column" , select ) ) ; add ( select ) ;
public class CassandraDeepJobConfig { /** * { @ inheritDoc } */ @ Override public CassandraDeepJobConfig < T > createTableOnWrite ( Boolean createTableOnWrite ) { } }
this . createTableOnWrite = createTableOnWrite ; this . isWriteConfig = createTableOnWrite ; return this ;
public class Client { /** * Returns a cursor of datapoints specified by series with multiple rollups . * < p > The system default timezone is used for the returned DateTimes . * @ param series The series * @ param interval An interval of time for the query ( start / end datetimes ) * @ param rollup The MultiRollup for the read query . * @ return A Cursor of DataPoints . The cursor . iterator ( ) . next ( ) may throw a { @ link TempoDBException } if an error occurs while making a request . * @ see Cursor * @ see MultiRollup * @ since 1.1.0 */ public Cursor < MultiDataPoint > readMultiRollupDataPoints ( Series series , Interval interval , MultiRollup rollup ) { } }
return readMultiRollupDataPoints ( series , interval , DateTimeZone . getDefault ( ) , rollup , null ) ;
public class StunAttributeFactory { /** * Create a UsernameAttribute . * @ param username * username value * @ return newly created UsernameAttribute */ public static UsernameAttribute createUsernameAttribute ( byte username [ ] ) { } }
UsernameAttribute attribute = new UsernameAttribute ( ) ; attribute . setUsername ( username ) ; return attribute ;
public class CmsJspDeviceSelectorDesktopMobileTablet { /** * Checks if a template context is compatible with this device selector . < p > * @ param templateContext the template context to check * @ return true if the template context is compatible */ protected boolean isTemplateContextCompatible ( CmsTemplateContext templateContext ) { } }
Set < String > contextKeys = new HashSet < String > ( templateContext . getProvider ( ) . getAllContexts ( ) . keySet ( ) ) ; return contextKeys . equals ( new HashSet < String > ( TYPES ) ) ;
public class Postcard { /** * Inserts a SparceArray of Parcelable values into the mapping of this * Bundle , replacing any existing value for the given key . Either key * or value may be null . * @ param key a String , or null * @ param value a SparseArray of Parcelable objects , or null * @ return current */ public Postcard withSparseParcelableArray ( @ Nullable String key , @ Nullable SparseArray < ? extends Parcelable > value ) { } }
mBundle . putSparseParcelableArray ( key , value ) ; return this ;
public class vpnsessionaction { /** * Use this API to fetch all the vpnsessionaction resources that are configured on netscaler . */ public static vpnsessionaction [ ] get ( nitro_service service ) throws Exception { } }
vpnsessionaction obj = new vpnsessionaction ( ) ; vpnsessionaction [ ] response = ( vpnsessionaction [ ] ) obj . get_resources ( service ) ; return response ;
public class ClassUtil { /** * loads a class from a String classname * @ param clazz class to load * @ param args * @ return matching Class */ public static Object loadInstance ( Class clazz , Object [ ] args , Object defaultValue ) { } }
if ( args == null || args . length == 0 ) return loadInstance ( clazz , defaultValue ) ; try { Class [ ] cArgs = new Class [ args . length ] ; for ( int i = 0 ; i < args . length ; i ++ ) { if ( args [ i ] == null ) cArgs [ i ] = Object . class ; else cArgs [ i ] = args [ i ] . getClass ( ) ; } Constructor c = clazz . getConstructor ( cArgs ) ; return c . newInstance ( args ) ; } catch ( Throwable t ) { ExceptionUtil . rethrowIfNecessary ( t ) ; return defaultValue ; }
public class AbstractJsonArray { /** * Adds a value to the receiver ' s collection * @ param value the new value */ public void add ( V value ) { } }
values . add ( value ) ; if ( value instanceof JsonEntity ) { ( ( JsonEntity ) value ) . addPropertyChangeListener ( propListener ) ; } firePropertyChange ( "#" + ( values . size ( ) - 1 ) , null , value ) ;