signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Triple { /** * Returns a new instance of Triple & lt ; R , M , L & gt ; . * @ return a new instance of Triple & lt ; R , M , L & gt ; . */ @ Beta public Triple < R , M , L > reversed ( ) { } }
return new Triple < > ( this . right , this . middle , this . left ) ;
public class Resulting { /** * from InvocationService . ResultListener */ public final void requestProcessed ( Object result ) { } }
@ SuppressWarnings ( "unchecked" ) T casted = ( T ) result ; requestCompleted ( casted ) ;
public class RestService { /** * Check if the index name is part of the requested indices or the result of an alias . * If the index is the result of an alias , the filters and routing values of the alias are added in the * provided { @ link SearchRequestBuilder } . */ static SearchRequestBuilder applyAliasMetadata ( EsMajorVersion version , Map < String , IndicesAliases . Alias > aliases , SearchRequestBuilder searchRequestBuilder , String index , String ... indicesOrAliases ) { } }
if ( QueryUtils . isExplicitlyRequested ( index , indicesOrAliases ) ) { return searchRequestBuilder ; } Set < String > routing = new HashSet < String > ( ) ; List < QueryBuilder > aliasFilters = new ArrayList < QueryBuilder > ( ) ; for ( IndicesAliases . Alias alias : aliases . values ( ) ) { if ( QueryUtils . isExplicitlyRequested ( alias . getName ( ) , indicesOrAliases ) ) { // The alias is explicitly requested if ( StringUtils . hasLength ( alias . getSearchRouting ( ) ) ) { for ( String value : alias . getSearchRouting ( ) . split ( "," ) ) { routing . add ( value . trim ( ) ) ; } } if ( alias . getFilter ( ) != null ) { try { aliasFilters . add ( new RawQueryBuilder ( alias . getFilter ( ) , false ) ) ; } catch ( IOException e ) { throw new EsHadoopIllegalArgumentException ( "Failed to parse alias filter: [" + alias . getFilter ( ) + "]" ) ; } } } } if ( aliasFilters . size ( ) > 0 ) { QueryBuilder aliasQuery ; if ( aliasFilters . size ( ) == 1 ) { aliasQuery = aliasFilters . get ( 0 ) ; } else { aliasQuery = new BoolQueryBuilder ( ) ; for ( QueryBuilder filter : aliasFilters ) { ( ( BoolQueryBuilder ) aliasQuery ) . should ( filter ) ; } } if ( searchRequestBuilder . query ( ) == null ) { searchRequestBuilder . query ( aliasQuery ) ; } else { BoolQueryBuilder mainQuery = new BoolQueryBuilder ( ) ; mainQuery . must ( searchRequestBuilder . query ( ) ) ; if ( version . after ( EsMajorVersion . V_1_X ) ) { mainQuery . filter ( aliasQuery ) ; } else { mainQuery . must ( new ConstantScoreQueryBuilder ( ) . filter ( aliasQuery ) . boost ( 0.0f ) ) ; } searchRequestBuilder . query ( mainQuery ) ; } } if ( routing . size ( ) > 0 ) { searchRequestBuilder . routing ( StringUtils . concatenate ( routing , "," ) ) ; } return searchRequestBuilder ;
public class ManagementLocksInner { /** * Deletes a management lock at the resource group level . * To delete management locks , you must have access to Microsoft . Authorization / * or Microsoft . Authorization / locks / * actions . Of the built - in roles , only Owner and User Access Administrator are granted those actions . * @ param resourceGroupName The name of the resource group containing the lock . * @ param lockName The name of lock to delete . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceResponse } object if successful . */ public Observable < ServiceResponse < Void > > deleteAtResourceGroupLevelWithServiceResponseAsync ( String resourceGroupName , String lockName ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( lockName == null ) { throw new IllegalArgumentException ( "Parameter lockName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . deleteAtResourceGroupLevel ( resourceGroupName , lockName , this . client . subscriptionId ( ) , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Void > > > ( ) { @ Override public Observable < ServiceResponse < Void > > call ( Response < ResponseBody > response ) { try { ServiceResponse < Void > clientResponse = deleteAtResourceGroupLevelDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class AWSStorageGatewayClient { /** * Configures one or more gateway local disks as working storage for a gateway . This operation is only supported in * the stored volume gateway type . This operation is deprecated in cached volume API version 20120630 . Use * < a > AddUploadBuffer < / a > instead . * < note > * Working storage is also referred to as upload buffer . You can also use the < a > AddUploadBuffer < / a > operation to * add upload buffer to a stored volume gateway . * < / note > * In the request , you specify the gateway Amazon Resource Name ( ARN ) to which you want to add working storage , and * one or more disk IDs that you want to configure as working storage . * @ param addWorkingStorageRequest * A JSON object containing one or more of the following fields : < / p > * < ul > * < li > * < a > AddWorkingStorageInput $ DiskIds < / a > * < / li > * @ return Result of the AddWorkingStorage operation returned by the service . * @ throws InvalidGatewayRequestException * An exception occurred because an invalid gateway request was issued to the service . For more information , * see the error and message fields . * @ throws InternalServerErrorException * An internal server error has occurred during the request . For more information , see the error and message * fields . * @ sample AWSStorageGateway . AddWorkingStorage * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / storagegateway - 2013-06-30 / AddWorkingStorage " * target = " _ top " > AWS API Documentation < / a > */ @ Override public AddWorkingStorageResult addWorkingStorage ( AddWorkingStorageRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeAddWorkingStorage ( request ) ;
public class A_CmsToolHandler { /** * Sets the needed properties from the { @ link # ARGS _ PROPERTY _ DEFINITION } property of the given resource . < p > * @ param cms the cms context * @ param resourcePath the path to the resource to read the property from */ protected void setParameters ( CmsObject cms , String resourcePath ) { } }
try { CmsProperty prop = cms . readPropertyObject ( resourcePath , ARGS_PROPERTY_DEFINITION , false ) ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( prop . getValue ( ) ) ) { Map < String , String > argsMap = new HashMap < String , String > ( ) ; Iterator < String > itArgs = CmsStringUtil . splitAsList ( prop . getValue ( ) , ARGUMENT_SEPARATOR ) . iterator ( ) ; while ( itArgs . hasNext ( ) ) { String arg = "" ; try { arg = itArgs . next ( ) ; int pos = arg . indexOf ( VALUE_SEPARATOR ) ; argsMap . put ( arg . substring ( 0 , pos ) , arg . substring ( pos + 1 ) ) ; } catch ( StringIndexOutOfBoundsException e ) { LOG . error ( "sep: " + VALUE_SEPARATOR + "arg: " + arg ) ; throw e ; } } if ( argsMap . get ( ARG_PATH_NAME ) != null ) { setPath ( argsMap . get ( ARG_PATH_NAME ) ) ; } if ( argsMap . get ( ARG_CONFIRMATION_NAME ) != null ) { setConfirmationMessage ( argsMap . get ( ARG_CONFIRMATION_NAME ) ) ; } if ( argsMap . get ( ARG_PARAM_NAME ) != null ) { setParameterString ( argsMap . get ( ARG_PARAM_NAME ) ) ; } } } catch ( CmsException e ) { // should never happen if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } }
public class DefaultServiceRegistry { /** * Returns the mapping for a given { @ link CouchbaseRequest } . * @ param request the request to check . * @ return the mapping for the request . */ private static ServiceType serviceTypeFor ( final CouchbaseRequest request ) { } }
if ( request instanceof BinaryRequest ) { return ServiceType . BINARY ; } else if ( request instanceof ConfigRequest ) { return ServiceType . CONFIG ; } else if ( request instanceof ViewRequest ) { return ServiceType . VIEW ; } else if ( request instanceof QueryRequest ) { return ServiceType . QUERY ; } else if ( request instanceof SearchRequest ) { return ServiceType . SEARCH ; } else if ( request instanceof AnalyticsRequest ) { return ServiceType . ANALYTICS ; } else { throw new IllegalStateException ( "Unknown Request: " + request ) ; }
public class TextUtil { /** * Split the given string according to brackets . * The brackets are used to delimit the groups * of characters . * < p > Examples : * < ul > * < li > < code > splitBrackets ( " { a } { b } { cd } " ) < / code > returns the array * < code > [ " a " , " b " , " cd " ] < / code > < / li > * < li > < code > splitBrackets ( " abcd " ) < / code > returns the array * < code > [ " abcd " ] < / code > < / li > * < li > < code > splitBrackets ( " a { bcd " ) < / code > returns the array * < code > [ " a " , " bcd " ] < / code > < / li > * < / ul > * @ param str is the elements enclosed by backets . * @ return the groups of strings */ @ Pure @ Inline ( value = "textUtil.splitAsUUIDs('{', '}', $1)" , imported = { } }
TextUtil . class } ) public static List < UUID > splitBracketsAsUUIDs ( String str ) { return splitAsUUIDs ( '{' , '}' , str ) ;
public class EnvelopesApi { /** * Gets the Electronic Record and Signature Disclosure associated with the account . * Retrieves the Electronic Record and Signature Disclosure , with html formatting , associated with the account . You can use an optional query string to set the language for the disclosure . * @ param accountId The external account number ( int ) or account ID Guid . ( required ) * @ param envelopeId The envelopeId Guid of the envelope being accessed . ( required ) * @ param recipientId The ID of the recipient being accessed . ( required ) * @ return ConsumerDisclosure */ public ConsumerDisclosure getConsumerDisclosureDefault ( String accountId , String envelopeId , String recipientId ) throws ApiException { } }
return getConsumerDisclosureDefault ( accountId , envelopeId , recipientId , null ) ;
public class LZEncoder { /** * Copies new data into the LZEncoder ' s buffer . */ public int fillWindow ( ByteBuffer buffer ) { } }
assert ! finishing ; // Move the sliding window if needed . if ( readPos >= bufSize - keepSizeAfter ) moveWindow ( ) ; // Try to fill the dictionary buffer . If it becomes full , // some of the input bytes may be left unused . int len = buffer . remaining ( ) ; if ( len > bufSize - writePos ) len = bufSize - writePos ; buffer . get ( buf , writePos , len ) ; writePos += len ; // Set the new readLimit but only if there ' s enough data to allow // encoding of at least one more byte . if ( writePos >= keepSizeAfter ) readLimit = writePos - keepSizeAfter ; processPendingBytes ( ) ; // Tell the caller how much input we actually copied into // the dictionary . return len ;
public class LinuxTaskController { /** * Launch a task JVM that will run as the owner of the job . * This method launches a task JVM by executing a setuid * executable that will switch to the user and run the * task . */ @ Override void launchTaskJVM ( TaskController . TaskControllerContext context ) throws IOException { } }
JvmEnv env = context . env ; // get the JVM command line . String cmdLine = TaskLog . buildCommandLine ( env . setup , env . vargs , env . stdout , env . stderr , env . logSize , true ) ; StringBuffer sb = new StringBuffer ( ) ; // export out all the environment variable before child command as // the setuid / setgid binaries would not be getting , any environmental // variables which begin with LD _ * . for ( Entry < String , String > entry : env . env . entrySet ( ) ) { sb . append ( "export " ) ; sb . append ( entry . getKey ( ) ) ; sb . append ( "=" ) ; sb . append ( entry . getValue ( ) ) ; sb . append ( "\n" ) ; } sb . append ( cmdLine ) ; // write the command to a file in the // task specific cache directory writeCommand ( sb . toString ( ) , getTaskCacheDirectory ( context ) ) ; // Call the taskcontroller with the right parameters . List < String > launchTaskJVMArgs = buildLaunchTaskArgs ( context ) ; ShellCommandExecutor shExec = buildTaskControllerExecutor ( TaskCommands . LAUNCH_TASK_JVM , env . conf . getUser ( ) , launchTaskJVMArgs , env . workDir , env . env ) ; context . shExec = shExec ; try { shExec . execute ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception thrown while launching task JVM : " + StringUtils . stringifyException ( e ) ) ; LOG . warn ( "Exit code from task is : " + shExec . getExitCode ( ) ) ; LOG . warn ( "Output from task-contoller is : " + shExec . getOutput ( ) ) ; throw new IOException ( e ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "output after executing task jvm = " + shExec . getOutput ( ) ) ; }
public class DcpMessageHandler { /** * Reads server responses and uses them to fulfill promises returned by * { @ link # sendRequest } . * Dispatches other incoming messages to either the data or the control feeds . */ @ Override public void channelRead ( final ChannelHandlerContext ctx , final Object msg ) throws Exception { } }
final ByteBuf message = ( ByteBuf ) msg ; final byte magic = message . getByte ( 0 ) ; // The majority of messages are likely to be stream requests , not responses . if ( magic != MessageUtil . MAGIC_RES ) { handleRequest ( ctx , message ) ; return ; } // " The current protocol dictates that the server won ' t start // processing the next command until the current command is completely // processed ( due to the lack of barriers or any other primitives to // enforce execution order ) . The protocol defines some " quiet commands " // which won ' t send responses in certain cases ( success for mutations , // not found for gets etc ) . The client would know that such commands // was executed when it encounters the response for the next command // requested issued by the client . " // - - https : / / github . com / couchbase / memcached / blob / master / docs / BinaryProtocol . md # introduction - 1 // The DCP client does not send any " quiet commands " , so we assume // a 1:1 relationship between requests and responses , and FIFO order . final OutstandingRequest request = outstandingRequests . poll ( ) ; if ( request == null || MessageUtil . getOpaque ( message ) != request . opaque ) { // Should never happen so long as all requests are made via sendRequest ( ) // and successfully written to the channel . LOGGER . error ( "Unexpected response with opaque {} (expected {}); closing connection" , MessageUtil . getOpaque ( message ) , request == null ? "none" : request . opaque ) ; ctx . close ( ) ; return ; } request . promise . setSuccess ( new DcpResponse ( message ) ) ;
public class LocationAttributes { /** * Returns the { @ link Location } of an element ( DOM flavor ) . * @ param elem * the element that holds the location information * @ param description * a description for the location ( if < code > null < / code > , the * element ' s name is used ) * @ return a { @ link Location } object */ public static Location getLocation ( Element elem , String description ) { } }
Attr srcAttr = elem . getAttributeNodeNS ( URI , SRC_ATTR ) ; if ( srcAttr == null ) { return LocationImpl . UNKNOWN ; } return new LocationImpl ( description == null ? elem . getNodeName ( ) : description , srcAttr . getValue ( ) , getLine ( elem ) , getColumn ( elem ) ) ;
public class CacheStats { /** * Returns a new { @ code CacheStats } representing the sum of this { @ code CacheStats } and * { @ code other } . * @ param other the statistics to add with * @ return the sum of the statistics */ @ NonNull public CacheStats plus ( @ NonNull CacheStats other ) { } }
return new CacheStats ( hitCount + other . hitCount , missCount + other . missCount , loadSuccessCount + other . loadSuccessCount , loadFailureCount + other . loadFailureCount , totalLoadTime + other . totalLoadTime , evictionCount + other . evictionCount , evictionWeight + other . evictionWeight ) ;
public class ChoiceFormat { /** * After reading an object from the input stream , do a simple verification * to maintain class invariants . * @ throws InvalidObjectException if the objects read from the stream is invalid . */ private void readObject ( ObjectInputStream in ) throws IOException , ClassNotFoundException { } }
in . defaultReadObject ( ) ; if ( choiceLimits . length != choiceFormats . length ) { throw new InvalidObjectException ( "limits and format arrays of different length." ) ; }
public class PrcManufactureSave { /** * < p > Make use material reverse . < / p > * @ param pAddParam additional param * @ param pEntity Manufacture * @ throws Exception - an exception */ public final void useMaterialReverse ( final Map < String , Object > pAddParam , final Manufacture pEntity ) throws Exception { } }
// reverse draw product in process from manufacturing process UseMaterialEntry dies = getSrvOrm ( ) . retrieveEntityWithConditions ( pAddParam , UseMaterialEntry . class , " where DRAWINGTYPE=" + pEntity . constTypeCode ( ) + " and DRAWINGID=" + pEntity . getReversedId ( ) ) ; UseMaterialEntry die = new UseMaterialEntry ( ) ; die . setItsDate ( pEntity . getItsDate ( ) ) ; die . setIdDatabaseBirth ( getSrvOrm ( ) . getIdDatabase ( ) ) ; die . setSourceType ( dies . getSourceType ( ) ) ; die . setSourceId ( dies . getSourceId ( ) ) ; die . setDrawingType ( pEntity . constTypeCode ( ) ) ; die . setDrawingId ( pEntity . getItsId ( ) ) ; die . setDrawingOwnerId ( null ) ; die . setDrawingOwnerType ( null ) ; die . setSourceOwnerId ( dies . getSourceOwnerId ( ) ) ; die . setSourceOwnerType ( dies . getSourceOwnerType ( ) ) ; die . setItsCost ( dies . getItsCost ( ) ) ; die . setItsTotal ( dies . getItsTotal ( ) . negate ( ) ) ; die . setUnitOfMeasure ( dies . getUnitOfMeasure ( ) ) ; die . setInvItem ( dies . getInvItem ( ) ) ; die . setItsQuantity ( dies . getItsQuantity ( ) . negate ( ) ) ; die . setReversedId ( die . getItsId ( ) ) ; String langDef = ( String ) pAddParam . get ( "langDef" ) ; DateFormat dateFormat = DateFormat . getDateTimeInstance ( DateFormat . MEDIUM , DateFormat . SHORT , new Locale ( langDef ) ) ; die . setDescription ( makeDescription ( pEntity , langDef , dateFormat ) + " " + getSrvI18n ( ) . getMsg ( "reversed_entry_n" , langDef ) + dies . getIdDatabaseBirth ( ) + "-" + dies . getItsId ( ) ) ; getSrvOrm ( ) . insertEntity ( pAddParam , die ) ; die . setIsNew ( false ) ; pEntity . getManufacturingProcess ( ) . setTheRest ( pEntity . getManufacturingProcess ( ) . getTheRest ( ) . add ( dies . getItsQuantity ( ) ) ) ; getSrvOrm ( ) . updateEntity ( pAddParam , pEntity . getManufacturingProcess ( ) ) ; dies . setReversedId ( die . getItsId ( ) ) ; dies . setDescription ( dies . getDescription ( ) + " " + getSrvI18n ( ) . getMsg ( "reversing_entry_n" , langDef ) + die . getIdDatabaseBirth ( ) + "-" + die . getItsId ( ) ) ; // local getSrvOrm ( ) . updateEntity ( pAddParam , dies ) ;
public class RocksDBStore { /** * Creates database if it doesn ' t exist . */ protected RocksDB openDatabase ( String location , Options options ) throws IOException , RocksDBException { } }
File dir = new File ( location ) ; dir . mkdirs ( ) ; return RocksDB . open ( options , location ) ;
public class AuthenticationProviderFacade { /** * Logs a warning that this authentication provider is being skipped due to * an internal error . If debug - level logging is enabled , the full details * of the internal error are also logged . * @ param e * The internal error that occurred which has resulted in this * authentication provider being skipped . */ private void warnAuthProviderSkipped ( Throwable e ) { } }
logger . warn ( "The \"{}\" authentication provider has been skipped due " + "to an internal error. If this is unexpected or you are the " + "developer of this authentication provider, you may wish to " + "enable debug-level logging: {}" , getIdentifier ( ) , e . getMessage ( ) ) ; logger . debug ( "Authentication provider skipped due to an internal failure." , e ) ;
public class StencilEngine { /** * Renders template loaded from the given path with provided parameters to * the given character stream . * @ param path Path to load template from * @ param parameters Parameters to pass to template * @ param out Character stream to write to * @ param extraGlobalScopes Any extra global scopes to make available * @ throws IOException * @ throws ParseException */ public void render ( String path , Map < String , Object > parameters , Writer out , GlobalScope ... extraGlobalScopes ) throws IOException , ParseException { } }
render ( load ( path ) , parameters , out ) ;
public class OutputPropertyUtils { /** * Searches for the int property with the specified key in the property list . * If the key is not found in this property list , the default property list , * and its defaults , recursively , are then checked . The method returns * < code > false < / code > if the property is not found , or if the value is other * than " yes " . * @ param key the property key . * @ param props the list of properties that will be searched . * @ return the value in this property list as a int value , or 0 * if null or not a number . */ public static int getIntProperty ( String key , Properties props ) { } }
String s = props . getProperty ( key ) ; if ( null == s ) return 0 ; else return Integer . parseInt ( s ) ;
public class MMElementRule { /** * Get the map linking the symbol of the element and number maximum of occurrence . * For the analysis with the DNP database and mass lower than 3000 Da . * @ return The HashMap of the symbol linked with the maximum occurrence */ private HashMap < String , Integer > getDNP_3000 ( ) { } }
HashMap < String , Integer > map = new HashMap < String , Integer > ( ) ; map . put ( "C" , 162 ) ; map . put ( "H" , 208 ) ; map . put ( "N" , 48 ) ; map . put ( "O" , 78 ) ; map . put ( "P" , 6 ) ; map . put ( "S" , 9 ) ; map . put ( "F" , 16 ) ; map . put ( "Cl" , 11 ) ; map . put ( "Br" , 8 ) ; return map ;
public class TopologyUtils { /** * Parses the value in Config . TOPOLOGY _ COMPONENT _ RAMMAP , * and returns a map containing only component specified . * Returns a empty map if the Config is not set * @ param topology the topology def * @ return a map ( componentName - & gt ; RAM required ) */ public static Map < String , ByteAmount > getComponentRamMapConfig ( TopologyAPI . Topology topology ) throws RuntimeException { } }
Map < String , String > configMap = getComponentConfigMap ( topology , Config . TOPOLOGY_COMPONENT_RAMMAP ) ; Map < String , ByteAmount > ramMap = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : configMap . entrySet ( ) ) { long requiredRam = Long . parseLong ( entry . getValue ( ) ) ; ramMap . put ( entry . getKey ( ) , ByteAmount . fromBytes ( requiredRam ) ) ; } return ramMap ;
public class EmbeddedJmxTrans { /** * return a String and not an embedded - jmxtrans class / enum to be portable and usable in JMX tools such as VisualVM */ @ Nullable public String getState ( ) { } }
lifecycleLock . readLock ( ) . lock ( ) ; try { return state == null ? null : state . toString ( ) ; } finally { lifecycleLock . readLock ( ) . unlock ( ) ; }
public class CQJDBCStorageConnection { /** * Create NodeData from TempNodeData content . * @ param tempData * @ param parentPath * @ param parentACL * @ return * @ throws RepositoryException * @ throws SQLException * @ throws IOException */ protected PersistedNodeData loadNodeFromTemporaryNodeData ( TempNodeData tempData , QPath parentPath , AccessControlList parentACL ) throws RepositoryException , SQLException , IOException { } }
return loadNodeRecord ( parentPath , tempData . cname , tempData . cid , tempData . cpid , tempData . cindex , tempData . cversion , tempData . cnordernumb , tempData . properties , parentACL ) ;
public class Member { /** * Apply a hedge to the member . * @ param hedge is the hedge * @ return the hedged member */ public Member applyHedge ( Hedge hedge ) { } }
String newMemberName = hedge . getName ( ) + "$" + name ; Member member = new Member ( newMemberName , functionCall , hedge ) ; this . points . stream ( ) . forEach ( point -> member . points . add ( point ) ) ; return member ;
public class SnowflakeS3Client { /** * Download a file from remote storage * @ param connection connection object * @ param command command to download file * @ param parallelism number of threads for parallel downloading * @ param remoteStorageLocation remote storage location , i . e . bucket for s3 * @ param stageFilePath stage file path * @ param stageRegion region name where the stage persists * @ return input file stream * @ throws SnowflakeSQLException when download failure */ @ Override public InputStream downloadToStream ( SFSession connection , String command , int parallelism , String remoteStorageLocation , String stageFilePath , String stageRegion ) throws SnowflakeSQLException { } }
int retryCount = 0 ; do { try { S3Object file = amazonClient . getObject ( remoteStorageLocation , stageFilePath ) ; ObjectMetadata meta = amazonClient . getObjectMetadata ( remoteStorageLocation , stageFilePath ) ; InputStream stream = file . getObjectContent ( ) ; Map < String , String > metaMap = meta . getUserMetadata ( ) ; String key = metaMap . get ( AMZ_KEY ) ; String iv = metaMap . get ( AMZ_IV ) ; if ( this . isEncrypting ( ) && this . getEncryptionKeySize ( ) < 256 ) { if ( key == null || iv == null ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "File metadata incomplete" ) ; } try { return EncryptionProvider . decryptStream ( stream , key , iv , encMat ) ; } catch ( Exception ex ) { logger . error ( "Error in decrypting file" , ex ) ; throw ex ; } } else { return stream ; } } catch ( Exception ex ) { handleS3Exception ( ex , ++ retryCount , "download" , connection , command , this ) ; } } while ( retryCount <= getMaxRetries ( ) ) ; throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Unexpected: download unsuccessful without exception!" ) ;
public class VoiceApi { /** * Release a call * Release the specified call . * @ param id The connection ID of the call . ( required ) * @ param releaseData Request parameters . ( optional ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse release ( String id , ReleaseData releaseData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = releaseWithHttpInfo ( id , releaseData ) ; return resp . getData ( ) ;
public class PGPUtils { /** * Extracts the PGP private key from an encoded stream . * @ param keyStream stream providing the encoded private key * @ param keyId id of the secret key to extract * @ param password passphrase for the secret key * @ return the private key object * @ throws IOException if there is an error reading from the stream * @ throws PGPException if the secret key cannot be extracted */ protected static PGPPrivateKey findSecretKey ( InputStream keyStream , long keyId , char [ ] password ) throws Exception { } }
PGPSecretKeyRingCollection keyRings = new PGPSecretKeyRingCollection ( PGPUtil . getDecoderStream ( keyStream ) , new BcKeyFingerprintCalculator ( ) ) ; PGPSecretKey secretKey = keyRings . getSecretKey ( keyId ) ; if ( secretKey == null ) { return null ; } PBESecretKeyDecryptor decryptor = new JcePBESecretKeyDecryptorBuilder ( new JcaPGPDigestCalculatorProviderBuilder ( ) . setProvider ( PROVIDER ) . build ( ) ) . setProvider ( PROVIDER ) . build ( password ) ; return secretKey . extractPrivateKey ( decryptor ) ;
public class Codecs { /** * The subset { @ code Codec } can be used for problems where it is required to * find the best < b > variable - sized < / b > subset from given basic set . A typical * usage example of the returned { @ code Codec } is the Knapsack problem . * The following code snippet shows a simplified variation of the Knapsack * problem . * < pre > { @ code * public final class Main { * / / The basic set from where to choose an ' optimal ' subset . * private final static ISeq < Integer > SET = * ISeq . of ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ) ; * / / Fitness function directly takes an ' int ' value . * private static int fitness ( final ISeq < Integer > subset ) { * assert ( subset . size ( ) < = SET . size ( ) ) ; * final int size = subset . stream ( ) * . collect ( Collectors . summingInt ( Integer : : intValue ) ) ; * return size < = 20 ? size : 0; * public static void main ( final String [ ] args ) { * final Engine < BitGene , Double > engine = Engine * . builder ( Main : : fitness , codec . ofSubSet ( SET ) ) * . build ( ) ; * } < / pre > * @ param < T > the element type of the basic set * @ param basicSet the basic set , from where to choose the < i > optimal < / i > * subset . * @ return a new codec which can be used for modelling < i > subset < / i > * problems . * @ throws NullPointerException if the given { @ code basicSet } is * { @ code null } ; { @ code null } elements are allowed . * @ throws IllegalArgumentException if the { @ code basicSet } size is smaller * than one . */ public static < T > Codec < ISeq < T > , BitGene > ofSubSet ( final ISeq < ? extends T > basicSet ) { } }
requireNonNull ( basicSet ) ; require . positive ( basicSet . length ( ) ) ; return Codec . of ( Genotype . of ( BitChromosome . of ( basicSet . length ( ) ) ) , gt -> gt . getChromosome ( ) . as ( BitChromosome . class ) . ones ( ) . < T > mapToObj ( basicSet ) . collect ( ISeq . toISeq ( ) ) ) ;
public class HttpBody { /** * Sets the given { @ code contents } as the body . * If the { @ code contents } are { @ code null } the call to this method has no effect . * @ param contents the new contents of the body , might be { @ code null } */ public void setBody ( byte [ ] contents ) { } }
if ( contents == null ) { return ; } cachedString = null ; body = new byte [ contents . length ] ; System . arraycopy ( contents , 0 , body , 0 , contents . length ) ; pos = body . length ;
public class ProbeUtils { /** * Gets the accessible object probe type for this class object type . * accessible object probe class object * TYPE _ PRIMITIVE _ LONG = 1 byte , short , int , long * TYPE _ LONG _ NUMBER = 2 Byte , Short , Integer , Long , AtomicInteger , AtomicLong * TYPE _ DOUBLE _ PRIMITIVE = 3 double , float * TYPE _ DOUBLE _ NUMBER = 4 Double , Float * TYPE _ COLLECTION = 5 Collection * TYPE _ MAP = 6 Map * TYPE _ COUNTER = 7 Counter * @ param classType the class object type . * @ return the accessible object probe type . */ static int getType ( Class classType ) { } }
Integer type = TYPES . get ( classType ) ; if ( type != null ) { return type ; } List < Class < ? > > flattenedClasses = new ArrayList < Class < ? > > ( ) ; flatten ( classType , flattenedClasses ) ; for ( Class < ? > clazz : flattenedClasses ) { type = TYPES . get ( clazz ) ; if ( type != null ) { return type ; } } return - 1 ;
public class GsonJsonEngine { protected void setupFieldPolicy ( GsonBuilder builder ) { } }
final JsonFieldNaming naming = option . getFieldNaming ( ) . orElse ( getDefaultFieldNaming ( ) ) ; builder . setFieldNamingPolicy ( deriveFieldNamingPolicy ( naming ) ) ;
public class RaftSessionSequencer { /** * Completes all sequenced responses . */ private void completeResponses ( ) { } }
// Iterate through queued responses and complete as many as possible . ResponseCallback response = responseCallbacks . get ( responseSequence + 1 ) ; while ( response != null ) { // If the response was completed , remove the response callback from the response queue , // increment the response sequence number , and check the next response . if ( completeResponse ( response . response , response . callback ) ) { responseCallbacks . remove ( ++ responseSequence ) ; response = responseCallbacks . get ( responseSequence + 1 ) ; } else { break ; } } // Once we ' ve completed as many responses as possible , if no more operations are outstanding // and events remain in the event queue , complete the events . if ( requestSequence == responseSequence ) { EventCallback eventCallback = eventCallbacks . poll ( ) ; while ( eventCallback != null ) { log . trace ( "Completing {}" , eventCallback . request ) ; eventCallback . run ( ) ; eventIndex = eventCallback . request . eventIndex ( ) ; eventCallback = eventCallbacks . poll ( ) ; } }
public class PreferenceFragment { /** * Returns , whether items should be disabled , depending on the app ' s settings , or not . * @ return True , if items should be disabled , false otherwise . */ private boolean shouldItemsBeDisabled ( ) { } }
SharedPreferences sharedPreferences = PreferenceManager . getDefaultSharedPreferences ( getActivity ( ) ) ; String key = getString ( R . string . disable_items_preference_key ) ; boolean defaultValue = getResources ( ) . getBoolean ( R . bool . disable_items_preference_default_value ) ; return sharedPreferences . getBoolean ( key , defaultValue ) ;
public class ArrayOfDoublesIntersection { /** * Updates the internal set by intersecting it with the given sketch . * @ param sketchIn Input sketch to intersect with the internal set . * @ param combiner Method of combining two arrays of double values */ public void update ( final ArrayOfDoublesSketch sketchIn , final ArrayOfDoublesCombiner combiner ) { } }
final boolean isFirstCall = isFirstCall_ ; isFirstCall_ = false ; if ( sketchIn == null ) { isEmpty_ = true ; sketch_ = null ; return ; } Util . checkSeedHashes ( seedHash_ , sketchIn . getSeedHash ( ) ) ; theta_ = min ( theta_ , sketchIn . getThetaLong ( ) ) ; isEmpty_ |= sketchIn . isEmpty ( ) ; if ( isEmpty_ || sketchIn . getRetainedEntries ( ) == 0 ) { sketch_ = null ; return ; } if ( isFirstCall ) { sketch_ = createSketch ( sketchIn . getRetainedEntries ( ) , numValues_ , seed_ ) ; final ArrayOfDoublesSketchIterator it = sketchIn . iterator ( ) ; while ( it . next ( ) ) { sketch_ . insert ( it . getKey ( ) , it . getValues ( ) ) ; } } else { // not the first call final int matchSize = min ( sketch_ . getRetainedEntries ( ) , sketchIn . getRetainedEntries ( ) ) ; final long [ ] matchKeys = new long [ matchSize ] ; final double [ ] [ ] matchValues = new double [ matchSize ] [ ] ; int matchCount = 0 ; final ArrayOfDoublesSketchIterator it = sketchIn . iterator ( ) ; while ( it . next ( ) ) { final double [ ] values = sketch_ . find ( it . getKey ( ) ) ; if ( values != null ) { matchKeys [ matchCount ] = it . getKey ( ) ; matchValues [ matchCount ] = combiner . combine ( values , it . getValues ( ) ) ; matchCount ++ ; } } sketch_ = null ; if ( matchCount > 0 ) { sketch_ = createSketch ( matchCount , numValues_ , seed_ ) ; for ( int i = 0 ; i < matchCount ; i ++ ) { sketch_ . insert ( matchKeys [ i ] , matchValues [ i ] ) ; } } if ( sketch_ != null ) { sketch_ . setThetaLong ( theta_ ) ; sketch_ . setNotEmpty ( ) ; } }
public class Request { /** * Returns the length descriptor for a data packet . Length descriptor consists of a descriptor * length byte whose high nybble is the number of additional bytes in the descriptor and whose * low nybble is the low nybble of the data length value . The additional descriptor bytes ( if * any ) form the rest of the data length value from low byte to high byte . * @ param length The length to be encoded . * @ return The length descriptor . */ private byte [ ] getLengthDescriptor ( int length ) { } }
int c = 0 ; int j = length & 15 ; length = length >> 4 ; int max = 9 ; byte [ ] result = new byte [ max + 1 ] ; while ( length > 0 ) { byte b = ( byte ) ( length & 255 ) ; result [ max - c ] = b ; length = length >> 8 ; c ++ ; } byte b = ( byte ) ( ( c << 4 ) + j ) ; result [ max - c ] = b ; return Arrays . copyOfRange ( result , max - c , max + 1 ) ;
public class AssociationOverviewMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AssociationOverview associationOverview , ProtocolMarshaller protocolMarshaller ) { } }
if ( associationOverview == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( associationOverview . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( associationOverview . getDetailedStatus ( ) , DETAILEDSTATUS_BINDING ) ; protocolMarshaller . marshall ( associationOverview . getAssociationStatusAggregatedCount ( ) , ASSOCIATIONSTATUSAGGREGATEDCOUNT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FpUtils { /** * Return { @ code d } & times ; * 2 < sup > { @ code scale _ factor } < / sup > rounded as if performed * by a single correctly rounded floating - point multiply to a * member of the double value set . See section 4.2.3 of * < cite > The Java & trade ; Language Specification < / cite > * for a discussion of floating - point * value sets . If the exponent of the result is between the * { @ code double } ' s minimum exponent and maximum exponent , * the answer is calculated exactly . If the exponent of the * result would be larger than { @ code doubles } ' s maximum * exponent , an infinity is returned . Note that if the result is * subnormal , precision may be lost ; that is , when { @ code scalb ( x , * n ) } is subnormal , { @ code scalb ( scalb ( x , n ) , - n ) } may * not equal < i > x < / i > . When the result is non - NaN , the result has * the same sign as { @ code d } . * Special cases : * < ul > * < li > If the first argument is NaN , NaN is returned . * < li > If the first argument is infinite , then an infinity of the * same sign is returned . * < li > If the first argument is zero , then a zero of the same * sign is returned . * < / ul > * @ param d number to be scaled by a power of two . * @ param scale _ factor power of 2 used to scale { @ code d } * @ return { @ code d * } 2 < sup > { @ code scale _ factor } < / sup > * @ author Joseph D . Darcy */ public static double scalb ( double d , int scale_factor ) { } }
/* * This method does not need to be declared strictfp to * compute the same correct result on all platforms . When * scaling up , it does not matter what order the * multiply - store operations are done ; the result will be * finite or overflow regardless of the operation ordering . * However , to get the correct result when scaling down , a * particular ordering must be used . * When scaling down , the multiply - store operations are * sequenced so that it is not possible for two consecutive * multiply - stores to return subnormal results . If one * multiply - store result is subnormal , the next multiply will * round it away to zero . This is done by first multiplying * by 2 ^ ( scale _ factor % n ) and then multiplying several * times by by 2 ^ n as needed where n is the exponent of number * that is a covenient power of two . In this way , at most one * real rounding error occurs . If the double value set is * being used exclusively , the rounding will occur on a * multiply . If the double - extended - exponent value set is * being used , the products will ( perhaps ) be exact but the * stores to d are guaranteed to round to the double value * set . * It is _ not _ a valid implementation to first multiply d by * 2 ^ MIN _ EXPONENT and then by 2 ^ ( scale _ factor % * MIN _ EXPONENT ) since even in a strictfp program double * rounding on underflow could occur ; e . g . if the scale _ factor * argument was ( MIN _ EXPONENT - n ) and the exponent of d was a * little less than - ( MIN _ EXPONENT - n ) , meaning the final * result would be subnormal . * Since exact reproducibility of this method can be achieved * without any undue performance burden , there is no * compelling reason to allow double rounding on underflow in * scalb . */ // magnitude of a power of two so large that scaling a finite // nonzero value by it would be guaranteed to over or // underflow ; due to rounding , scaling down takes takes an // additional power of two which is reflected here final int MAX_SCALE = DoubleConsts . MAX_EXPONENT + - DoubleConsts . MIN_EXPONENT + DoubleConsts . SIGNIFICAND_WIDTH + 1 ; int exp_adjust = 0 ; int scale_increment = 0 ; double exp_delta = Double . NaN ; // Make sure scaling factor is in a reasonable range if ( scale_factor < 0 ) { scale_factor = Math . max ( scale_factor , - MAX_SCALE ) ; scale_increment = - 512 ; exp_delta = twoToTheDoubleScaleDown ; } else { scale_factor = Math . min ( scale_factor , MAX_SCALE ) ; scale_increment = 512 ; exp_delta = twoToTheDoubleScaleUp ; } // Calculate ( scale _ factor % + / - 512 ) , 512 = 2 ^ 9 , using // technique from " Hacker ' s Delight " section 10-2. int t = ( scale_factor >> 9 - 1 ) >>> 32 - 9 ; exp_adjust = ( ( scale_factor + t ) & ( 512 - 1 ) ) - t ; d *= powerOfTwoD ( exp_adjust ) ; scale_factor -= exp_adjust ; while ( scale_factor != 0 ) { d *= exp_delta ; scale_factor -= scale_increment ; } return d ;
public class hqlParser { /** * hql . g : 242:1 : selectFrom : ( s = selectClause ) ? ( f = fromClause ) ? - > { $ f . tree = = null & & filter } ? ^ ( SELECT _ FROM FROM [ \ " { filter - implied FROM } \ " ] ( selectClause ) ? ) - > ^ ( SELECT _ FROM ( fromClause ) ? ( selectClause ) ? ) ; */ public final hqlParser . selectFrom_return selectFrom ( ) throws RecognitionException { } }
hqlParser . selectFrom_return retval = new hqlParser . selectFrom_return ( ) ; retval . start = input . LT ( 1 ) ; CommonTree root_0 = null ; ParserRuleReturnScope s = null ; ParserRuleReturnScope f = null ; RewriteRuleSubtreeStream stream_selectClause = new RewriteRuleSubtreeStream ( adaptor , "rule selectClause" ) ; RewriteRuleSubtreeStream stream_fromClause = new RewriteRuleSubtreeStream ( adaptor , "rule fromClause" ) ; try { // hql . g : 243:2 : ( ( s = selectClause ) ? ( f = fromClause ) ? - > { $ f . tree = = null & & filter } ? ^ ( SELECT _ FROM FROM [ \ " { filter - implied FROM } \ " ] ( selectClause ) ? ) - > ^ ( SELECT _ FROM ( fromClause ) ? ( selectClause ) ? ) ) // hql . g : 243:5 : ( s = selectClause ) ? ( f = fromClause ) ? { // hql . g : 243:5 : ( s = selectClause ) ? int alt15 = 2 ; int LA15_0 = input . LA ( 1 ) ; if ( ( LA15_0 == SELECT ) ) { alt15 = 1 ; } switch ( alt15 ) { case 1 : // hql . g : 243:6 : s = selectClause { pushFollow ( FOLLOW_selectClause_in_selectFrom962 ) ; s = selectClause ( ) ; state . _fsp -- ; stream_selectClause . add ( s . getTree ( ) ) ; } break ; } // hql . g : 243:23 : ( f = fromClause ) ? int alt16 = 2 ; int LA16_0 = input . LA ( 1 ) ; if ( ( LA16_0 == FROM ) ) { alt16 = 1 ; } switch ( alt16 ) { case 1 : // hql . g : 243:24 : f = fromClause { pushFollow ( FOLLOW_fromClause_in_selectFrom969 ) ; f = fromClause ( ) ; state . _fsp -- ; stream_fromClause . add ( f . getTree ( ) ) ; } break ; } if ( ( f != null ? ( ( CommonTree ) f . getTree ( ) ) : null ) == null && ! filter ) { assert false : "FROM expected (non-filter queries must contain a FROM clause)" ; throw new RecognitionException ( ) ; // ( " FROM expected ( non - filter queries must contain a FROM clause ) " ) ; } // AST REWRITE // elements : selectClause , selectClause , fromClause // token labels : // rule labels : retval // token list labels : // rule list labels : // wildcard labels : retval . tree = root_0 ; RewriteRuleSubtreeStream stream_retval = new RewriteRuleSubtreeStream ( adaptor , "rule retval" , retval != null ? retval . getTree ( ) : null ) ; root_0 = ( CommonTree ) adaptor . nil ( ) ; // 251:3 : - > { $ f . tree = = null & & filter } ? ^ ( SELECT _ FROM FROM [ \ " { filter - implied FROM } \ " ] ( selectClause ) ? ) if ( ( f != null ? ( ( CommonTree ) f . getTree ( ) ) : null ) == null && filter ) { // hql . g : 251:35 : ^ ( SELECT _ FROM FROM [ \ " { filter - implied FROM } \ " ] ( selectClause ) ? ) { CommonTree root_1 = ( CommonTree ) adaptor . nil ( ) ; root_1 = ( CommonTree ) adaptor . becomeRoot ( adaptor . create ( SELECT_FROM , "SELECT_FROM" ) , root_1 ) ; adaptor . addChild ( root_1 , adaptor . create ( FROM , "{filter-implied FROM}" ) ) ; // hql . g : 251:79 : ( selectClause ) ? if ( stream_selectClause . hasNext ( ) ) { adaptor . addChild ( root_1 , stream_selectClause . nextTree ( ) ) ; } stream_selectClause . reset ( ) ; adaptor . addChild ( root_0 , root_1 ) ; } } else // 252:3 : - > ^ ( SELECT _ FROM ( fromClause ) ? ( selectClause ) ? ) { // hql . g : 252:6 : ^ ( SELECT _ FROM ( fromClause ) ? ( selectClause ) ? ) { CommonTree root_1 = ( CommonTree ) adaptor . nil ( ) ; root_1 = ( CommonTree ) adaptor . becomeRoot ( adaptor . create ( SELECT_FROM , "SELECT_FROM" ) , root_1 ) ; // hql . g : 252:20 : ( fromClause ) ? if ( stream_fromClause . hasNext ( ) ) { adaptor . addChild ( root_1 , stream_fromClause . nextTree ( ) ) ; } stream_fromClause . reset ( ) ; // hql . g : 252:32 : ( selectClause ) ? if ( stream_selectClause . hasNext ( ) ) { adaptor . addChild ( root_1 , stream_selectClause . nextTree ( ) ) ; } stream_selectClause . reset ( ) ; adaptor . addChild ( root_0 , root_1 ) ; } } retval . tree = root_0 ; } retval . stop = input . LT ( - 1 ) ; retval . tree = ( CommonTree ) adaptor . rulePostProcessing ( root_0 ) ; adaptor . setTokenBoundaries ( retval . tree , retval . start , retval . stop ) ; } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; retval . tree = ( CommonTree ) adaptor . errorNode ( input , retval . start , input . LT ( - 1 ) , re ) ; } finally { // do for sure before leaving } return retval ;
public class AWSOrganizationsClient { /** * Cancels a handshake . Canceling a handshake sets the handshake state to < code > CANCELED < / code > . * This operation can be called only from the account that originated the handshake . The recipient of the handshake * can ' t cancel it , but can use < a > DeclineHandshake < / a > instead . After a handshake is canceled , the recipient can no * longer respond to that handshake . * After you cancel a handshake , it continues to appear in the results of relevant APIs for only 30 days . After that * it is deleted . * @ param cancelHandshakeRequest * @ return Result of the CancelHandshake operation returned by the service . * @ throws AccessDeniedException * You don ' t have permissions to perform the requested operation . The user or role that is making the * request must have at least one IAM permissions policy attached that grants the required permissions . For * more information , see < a href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / access . html " > Access * Management < / a > in the < i > IAM User Guide < / i > . * @ throws ConcurrentModificationException * The target of the operation is currently being modified by a different request . Try again later . * @ throws HandshakeNotFoundException * We can ' t find a handshake with the < code > HandshakeId < / code > that you specified . * @ throws InvalidHandshakeTransitionException * You can ' t perform the operation on the handshake in its current state . For example , you can ' t cancel a * handshake that was already accepted or accept a handshake that was already declined . * @ throws HandshakeAlreadyInStateException * The specified handshake is already in the requested state . For example , you can ' t accept a handshake that * was already accepted . * @ throws InvalidInputException * The requested operation failed because you provided invalid values for one or more of the request * parameters . This exception includes a reason that contains additional information about the violated * limit : < / p > < note > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < / note > * < ul > * < li > * IMMUTABLE _ POLICY : You specified a policy that is managed by AWS and can ' t be modified . * < / li > * < li > * INPUT _ REQUIRED : You must include a value for all required parameters . * < / li > * < li > * INVALID _ ENUM : You specified a value that isn ' t valid for that parameter . * < / li > * < li > * INVALID _ FULL _ NAME _ TARGET : You specified a full name that contains invalid characters . * < / li > * < li > * INVALID _ LIST _ MEMBER : You provided a list to a parameter that contains at least one invalid value . * < / li > * < li > * INVALID _ PARTY _ TYPE _ TARGET : You specified the wrong type of entity ( account , organization , or email ) as a * party . * < / li > * < li > * INVALID _ PAGINATION _ TOKEN : Get the value for the < code > NextToken < / code > parameter from the response to a * previous call of the operation . * < / li > * < li > * INVALID _ PATTERN : You provided a value that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ PATTERN _ TARGET _ ID : You specified a policy target ID that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ ROLE _ NAME : You provided a role name that isn ' t valid . A role name can ' t begin with the reserved * prefix < code > AWSServiceRoleFor < / code > . * < / li > * < li > * INVALID _ SYNTAX _ ORGANIZATION _ ARN : You specified an invalid Amazon Resource Name ( ARN ) for the * organization . * < / li > * < li > * INVALID _ SYNTAX _ POLICY _ ID : You specified an invalid policy ID . * < / li > * < li > * MAX _ FILTER _ LIMIT _ EXCEEDED : You can specify only one filter parameter for the operation . * < / li > * < li > * MAX _ LENGTH _ EXCEEDED : You provided a string parameter that is longer than allowed . * < / li > * < li > * MAX _ VALUE _ EXCEEDED : You provided a numeric parameter that has a larger value than allowed . * < / li > * < li > * MIN _ LENGTH _ EXCEEDED : You provided a string parameter that is shorter than allowed . * < / li > * < li > * MIN _ VALUE _ EXCEEDED : You provided a numeric parameter that has a smaller value than allowed . * < / li > * < li > * MOVING _ ACCOUNT _ BETWEEN _ DIFFERENT _ ROOTS : You can move an account only between entities in the same root . * < / li > * @ throws ServiceException * AWS Organizations can ' t complete your request because of an internal service error . Try again later . * @ throws TooManyRequestsException * You ' ve sent too many requests in too short a period of time . The limit helps protect against * denial - of - service attacks . Try again later . < / p > * For information on limits that affect Organizations , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ reference _ limits . html " > Limits of * AWS Organizations < / a > in the < i > AWS Organizations User Guide < / i > . * @ sample AWSOrganizations . CancelHandshake * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / organizations - 2016-11-28 / CancelHandshake " target = " _ top " > AWS * API Documentation < / a > */ @ Override public CancelHandshakeResult cancelHandshake ( CancelHandshakeRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCancelHandshake ( request ) ;
public class GrailsPrintWriter { /** * Write a single character . * @ param c int specifying a character to be written . */ @ Override public void write ( final int c ) { } }
usageFlag = true ; if ( trouble ) return ; try { getOut ( ) . write ( c ) ; } catch ( IOException e ) { handleIOException ( e ) ; }
public class MonthlyCalendar { /** * Determine whether the given time ( in milliseconds ) is ' included ' by the * Calendar . * Note that this Calendar is only has full - day precision . */ @ Override public boolean isTimeIncluded ( final long timeStamp ) { } }
if ( m_aExcludeAll == true ) { return false ; } // Test the base calendar first . Only if the base calendar not already // excludes the time / date , continue evaluating this calendar instance . if ( super . isTimeIncluded ( timeStamp ) == false ) { return false ; } final Calendar cl = createJavaCalendar ( timeStamp ) ; final int day = cl . get ( Calendar . DAY_OF_MONTH ) ; return ! ( isDayExcluded ( day ) ) ;
public class UCharacterName { /** * Gets the name of the argument group index . * UnicodeData . txt uses ' ; ' as a field separator , so no field can contain * ' ; ' as part of its contents . In unames . icu , it is marked as * token [ ' ; ' ] = = - 1 only if the semicolon is used in the data file - which * is iff we have Unicode 1.0 names or ISO comments or aliases . * So , it will be token [ ' ; ' ] = = - 1 if we store U1.0 names / ISO comments / aliases * although we know that it will never be part of a name . * Equivalent to ICU4C ' s expandName . * @ param index of the group name string in byte count * @ param length of the group name string * @ param choice of Unicode 1.0 name or the most current name * @ return name of the group */ public String getGroupName ( int index , int length , int choice ) { } }
if ( choice != UCharacterNameChoice . UNICODE_CHAR_NAME && choice != UCharacterNameChoice . EXTENDED_CHAR_NAME ) { if ( ';' >= m_tokentable_ . length || m_tokentable_ [ ';' ] == 0xFFFF ) { /* * skip the modern name if it is not requested _ and _ * if the semicolon byte value is a character , not a token number */ int fieldIndex = choice == UCharacterNameChoice . ISO_COMMENT_ ? 2 : choice ; do { int oldindex = index ; index += UCharacterUtility . skipByteSubString ( m_groupstring_ , index , length , ( byte ) ';' ) ; length -= ( index - oldindex ) ; } while ( -- fieldIndex > 0 ) ; } else { // the semicolon byte is a token number , therefore only modern // names are stored in unames . dat and there is no such // requested alternate name here length = 0 ; } } synchronized ( m_utilStringBuffer_ ) { m_utilStringBuffer_ . setLength ( 0 ) ; byte b ; char token ; for ( int i = 0 ; i < length ; ) { b = m_groupstring_ [ index + i ] ; i ++ ; if ( b >= m_tokentable_ . length ) { if ( b == ';' ) { break ; } m_utilStringBuffer_ . append ( b ) ; // implicit letter } else { token = m_tokentable_ [ b & 0x00ff ] ; if ( token == 0xFFFE ) { // this is a lead byte for a double - byte token token = m_tokentable_ [ b << 8 | ( m_groupstring_ [ index + i ] & 0x00ff ) ] ; i ++ ; } if ( token == 0xFFFF ) { if ( b == ';' ) { // skip the semicolon if we are seeking extended // names and there was no 2.0 name but there // is a 1.0 name . if ( m_utilStringBuffer_ . length ( ) == 0 && choice == UCharacterNameChoice . EXTENDED_CHAR_NAME ) { continue ; } break ; } // explicit letter m_utilStringBuffer_ . append ( ( char ) ( b & 0x00ff ) ) ; } else { // write token word UCharacterUtility . getNullTermByteSubString ( m_utilStringBuffer_ , m_tokenstring_ , token ) ; } } } if ( m_utilStringBuffer_ . length ( ) > 0 ) { return m_utilStringBuffer_ . toString ( ) ; } } return null ;
public class MapApi { /** * Associates new value in map placed at path . New nodes are created with same class as map if needed . * @ param map subject original map * @ param path nodes to walk in map path to place new value * @ param value new value * @ return original map */ public static Map assoc ( final Map map , final Object [ ] path , final Object value ) { } }
return assoc ( map , map . getClass ( ) , path , value ) ;
public class JobsInner { /** * Deletes the specified Batch AI job . * @ param resourceGroupName Name of the resource group to which the resource belongs . * @ param jobName The name of the job within the specified resource group . Job names can only contain a combination of alphanumeric characters along with dash ( - ) and underscore ( _ ) . The name must be from 1 through 64 characters long . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void beginDelete ( String resourceGroupName , String jobName ) { } }
beginDeleteWithServiceResponseAsync ( resourceGroupName , jobName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EEnum getIfcElectricApplianceTypeEnum ( ) { } }
if ( ifcElectricApplianceTypeEnumEEnum == null ) { ifcElectricApplianceTypeEnumEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 822 ) ; } return ifcElectricApplianceTypeEnumEEnum ;
public class WindowsProcessFaxClientSpi { /** * Creates the process command from the fax job data . * @ param faxJob * The fax job object * @ return The process command to execute */ @ Override protected String createSuspendFaxJobProcessCommand ( FaxJob faxJob ) { } }
// get arguments String commandArguments = this . createProcessCommandArgumentsForExistingFaxJob ( Fax4jExeConstants . SUSPEND_ACTION_FAX4J_EXE_COMMAND_LINE_ARGUMENT_VALUE . toString ( ) , faxJob ) ; // create command String command = this . createProcessCommand ( commandArguments ) ; return command ;
public class WhileyFileParser { /** * Attempt to match a given token ( s ) , whilst ignoring any whitespace in * between . Note that , in the case it fails to match , then the index will be * unchanged . This latter point is important , otherwise we could * accidentally gobble up some important indentation . If more than one kind * is provided then this will try to match any of them . * @ param terminated * Indicates whether or not this function should be concerned * with new lines . The terminated flag indicates whether or not * the current construct being parsed is known to be terminated . * If so , then we don ' t need to worry about newlines and can * greedily consume them ( i . e . since we ' ll eventually run into * the terminating symbol ) . * @ param kinds * @ return */ private Token tryAndMatch ( boolean terminated , Token . Kind ... kinds ) { } }
// If the construct being parsed is know to be terminated , then we can // skip all whitespace . Otherwise , we can ' t skip newlines as these are // significant . int next = terminated ? skipWhiteSpace ( index ) : skipLineSpace ( index ) ; if ( next < tokens . size ( ) ) { Token t = tokens . get ( next ) ; for ( int i = 0 ; i != kinds . length ; ++ i ) { if ( t . kind == kinds [ i ] ) { index = next + 1 ; return t ; } } } return null ;
public class Index { /** * Get a query rule * @ param objectID the objectID of the query rule to get * @ param requestOptions Options to pass to this request */ public JSONObject getRule ( String objectID , RequestOptions requestOptions ) throws AlgoliaException { } }
if ( objectID == null || objectID . length ( ) == 0 ) { throw new AlgoliaException ( "Invalid objectID" ) ; } try { return client . getRequest ( "/1/indexes/" + encodedIndexName + "/rules/" + URLEncoder . encode ( objectID , "UTF-8" ) , true , requestOptions ) ; } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( e ) ; }
public class ContractsApi { /** * Get public contract bids Lists bids on a public auction contract - - - This * route is cached for up to 300 seconds * @ param contractId * ID of a contract ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ return List & lt ; PublicContractsBidsResponse & gt ; * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public List < PublicContractsBidsResponse > getContractsPublicBidsContractId ( Integer contractId , String datasource , String ifNoneMatch , Integer page ) throws ApiException { } }
ApiResponse < List < PublicContractsBidsResponse > > resp = getContractsPublicBidsContractIdWithHttpInfo ( contractId , datasource , ifNoneMatch , page ) ; return resp . getData ( ) ;
public class IntegrationAccountsInner { /** * Updates an integration account . * @ param resourceGroupName The resource group name . * @ param integrationAccountName The integration account name . * @ param integrationAccount The integration account . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the IntegrationAccountInner object if successful . */ public IntegrationAccountInner update ( String resourceGroupName , String integrationAccountName , IntegrationAccountInner integrationAccount ) { } }
return updateWithServiceResponseAsync ( resourceGroupName , integrationAccountName , integrationAccount ) . toBlocking ( ) . single ( ) . body ( ) ;
public class RetentionPolicyOnTime { /** * Sets the retention duration period of this policy . * @ param duration - the retention duration * @ param timeUnit - the time unit as defined by { @ link TimeUnit } */ public void setRetention ( long duration , TimeUnit timeUnit ) { } }
this . _duration = Math . max ( 1 , duration ) ; this . _timeUnit = timeUnit ; long millis = TimeUnit . MILLISECONDS == timeUnit ? _duration : TimeUnit . MILLISECONDS . convert ( _duration , timeUnit ) ; _timeMillis = Math . max ( millis , MIN_DURATION_MILLIS ) ;
public class JsonWriter { /** * Encodes an object that can be a : * < ul > * < li > primitive types : String , Number , Boolean < / li > * < li > java . util . Date : encoded as datetime ( see { @ link # valueDateTime ( java . util . Date ) } < / li > * < li > { @ code Map < Object , Object > } . Method toString is called for the key . < / li > * < li > Iterable < / li > * < / ul > * @ throws org . sonar . api . utils . text . WriterException on any failure */ public JsonWriter valueObject ( @ Nullable Object value ) { } }
try { if ( value == null ) { stream . nullValue ( ) ; return this ; } valueNonNullObject ( value ) ; return this ; } catch ( IllegalArgumentException e ) { throw e ; } catch ( Exception e ) { throw rethrow ( e ) ; }
public class SSSRFinder { /** * Finds the Set of Relevant Rings . * These rings are contained in every possible SSSR . * The returned set is uniquely defined . * @ return a RingSet containing the Relevant Rings */ public IRingSet findRelevantRings ( ) { } }
if ( atomContainer == null ) { return null ; } IRingSet ringSet = toRingSet ( atomContainer , cycleBasis ( ) . relevantCycles ( ) . keySet ( ) ) ; // atomContainer . setProperty ( CDKConstants . RELEVANT _ RINGS , ringSet ) ; return ringSet ;
public class DefaultSentryClientFactory { /** * Additional tags to send with { @ link io . sentry . event . Event } s . * @ param dsn Sentry server DSN which may contain options . * @ return Additional tags to send with { @ link io . sentry . event . Event } s . */ protected Map < String , String > getTags ( Dsn dsn ) { } }
return Util . parseTags ( Lookup . lookup ( TAGS_OPTION , dsn ) ) ;
public class servicegroup { /** * Use this API to update servicegroup . */ public static base_response update ( nitro_service client , servicegroup resource ) throws Exception { } }
servicegroup updateresource = new servicegroup ( ) ; updateresource . servicegroupname = resource . servicegroupname ; updateresource . servername = resource . servername ; updateresource . port = resource . port ; updateresource . weight = resource . weight ; updateresource . customserverid = resource . customserverid ; updateresource . serverid = resource . serverid ; updateresource . hashid = resource . hashid ; updateresource . monitor_name_svc = resource . monitor_name_svc ; updateresource . dup_weight = resource . dup_weight ; updateresource . maxclient = resource . maxclient ; updateresource . maxreq = resource . maxreq ; updateresource . healthmonitor = resource . healthmonitor ; updateresource . cacheable = resource . cacheable ; updateresource . cip = resource . cip ; updateresource . cipheader = resource . cipheader ; updateresource . usip = resource . usip ; updateresource . pathmonitor = resource . pathmonitor ; updateresource . pathmonitorindv = resource . pathmonitorindv ; updateresource . useproxyport = resource . useproxyport ; updateresource . sc = resource . sc ; updateresource . sp = resource . sp ; updateresource . rtspsessionidremap = resource . rtspsessionidremap ; updateresource . clttimeout = resource . clttimeout ; updateresource . svrtimeout = resource . svrtimeout ; updateresource . cka = resource . cka ; updateresource . tcpb = resource . tcpb ; updateresource . cmp = resource . cmp ; updateresource . maxbandwidth = resource . maxbandwidth ; updateresource . monthreshold = resource . monthreshold ; updateresource . downstateflush = resource . downstateflush ; updateresource . tcpprofilename = resource . tcpprofilename ; updateresource . httpprofilename = resource . httpprofilename ; updateresource . comment = resource . comment ; updateresource . appflowlog = resource . appflowlog ; updateresource . netprofile = resource . netprofile ; return updateresource . update_resource ( client ) ;
public class JBBPDslBuilder { /** * Add anonymous bit array with size calculated through expression . * @ param bits length of the field , must not be null * @ param sizeExpression expression to be used to calculate array size , must not be null * @ return the builder instance , must not be null */ public JBBPDslBuilder BitArray ( final JBBPBitNumber bits , final String sizeExpression ) { } }
return this . BitArray ( null , bits , assertExpressionChars ( sizeExpression ) ) ;
public class Maps { /** * Returns the value associated with the specified { @ code key } if it exists in the specified { @ code map } contains , or the new put { @ code List } if it ' s absent . * @ param map * @ param key * @ return */ public static < K , E > List < E > getAndPutListIfAbsent ( final Map < K , List < E > > map , final K key ) { } }
List < E > v = map . get ( key ) ; if ( v == null ) { v = new ArrayList < > ( ) ; v = map . put ( key , v ) ; } return v ;
public class PrettyTime { /** * / * [ deutsch ] * < p > Formatiert das angegebene Datum relativ zum aktuellen Datum der Referenzuhr { @ link # getReferenceClock ( ) } * als Dauer oder als absolute Datumszeit . < / p > * @ param date calendar date whose deviation from clock is to be printed * @ param tzid time zone identifier for getting current reference date * @ param maxRelativeUnit maximum calendar unit which will still be printed in a relative way * @ param formatter used for printing absolute date if the leading unit is bigger than maxRelativeUnit * @ return formatted output of relative date , either in past or in future * @ since 3.7/4.5 */ public String printRelativeOrDate ( PlainDate date , TZID tzid , CalendarUnit maxRelativeUnit , TemporalFormatter < PlainDate > formatter ) { } }
if ( maxRelativeUnit == null ) { throw new NullPointerException ( "Missing max relative unit." ) ; } Moment refTime = Moment . from ( this . getReferenceClock ( ) . currentTime ( ) ) ; PlainDate refDate = refTime . toZonalTimestamp ( tzid ) . toDate ( ) ; Duration < CalendarUnit > duration ; if ( this . weekToDays ) { duration = Duration . inYearsMonthsDays ( ) . between ( refDate , date ) ; } else { CalendarUnit [ ] stdUnits = { YEARS , MONTHS , WEEKS , DAYS } ; duration = Duration . in ( stdUnits ) . between ( refDate , date ) ; } if ( duration . isEmpty ( ) ) { return this . getEmptyRelativeString ( TimeUnit . DAYS ) ; } TimeSpan . Item < CalendarUnit > item = duration . getTotalLength ( ) . get ( 0 ) ; long amount = item . getAmount ( ) ; CalendarUnit unit = item . getUnit ( ) ; if ( Double . compare ( unit . getLength ( ) , maxRelativeUnit . getLength ( ) ) > 0 ) { return formatter . format ( date ) ; } else if ( unit . equals ( CalendarUnit . DAYS ) ) { String replacement = this . getRelativeReplacement ( date , duration . isNegative ( ) , amount ) ; if ( ! replacement . isEmpty ( ) ) { return replacement ; } } String pattern = ( duration . isNegative ( ) ? this . getPastPattern ( amount , unit ) : this . getFuturePattern ( amount , unit ) ) ; return this . format ( pattern , amount ) ;
public class ProjectCalendar { /** * Copy the settings from another calendar to this calendar . * @ param cal calendar data source */ public void copy ( ProjectCalendar cal ) { } }
setName ( cal . getName ( ) ) ; setParent ( cal . getParent ( ) ) ; System . arraycopy ( cal . getDays ( ) , 0 , getDays ( ) , 0 , getDays ( ) . length ) ; for ( ProjectCalendarException ex : cal . m_exceptions ) { addCalendarException ( ex . getFromDate ( ) , ex . getToDate ( ) ) ; for ( DateRange range : ex ) { ex . addRange ( new DateRange ( range . getStart ( ) , range . getEnd ( ) ) ) ; } } for ( ProjectCalendarHours hours : getHours ( ) ) { if ( hours != null ) { ProjectCalendarHours copyHours = cal . addCalendarHours ( hours . getDay ( ) ) ; for ( DateRange range : hours ) { copyHours . addRange ( new DateRange ( range . getStart ( ) , range . getEnd ( ) ) ) ; } } }
public class DescribeDirectConnectGatewayAssociationProposalsResult { /** * Describes the Direct Connect gateway association proposals . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDirectConnectGatewayAssociationProposals ( java . util . Collection ) } or * { @ link # withDirectConnectGatewayAssociationProposals ( java . util . Collection ) } if you want to override the existing * values . * @ param directConnectGatewayAssociationProposals * Describes the Direct Connect gateway association proposals . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeDirectConnectGatewayAssociationProposalsResult withDirectConnectGatewayAssociationProposals ( DirectConnectGatewayAssociationProposal ... directConnectGatewayAssociationProposals ) { } }
if ( this . directConnectGatewayAssociationProposals == null ) { setDirectConnectGatewayAssociationProposals ( new com . amazonaws . internal . SdkInternalList < DirectConnectGatewayAssociationProposal > ( directConnectGatewayAssociationProposals . length ) ) ; } for ( DirectConnectGatewayAssociationProposal ele : directConnectGatewayAssociationProposals ) { this . directConnectGatewayAssociationProposals . add ( ele ) ; } return this ;
public class SchemaValidationUtil { /** * Returns true if the Parser contains any Error symbol , indicating that it may fail * for some inputs . */ private static boolean hasErrors ( Symbol symbol ) { } }
switch ( symbol . kind ) { case ALTERNATIVE : return hasErrors ( symbol , ( ( Symbol . Alternative ) symbol ) . symbols ) ; case EXPLICIT_ACTION : return false ; case IMPLICIT_ACTION : return symbol instanceof Symbol . ErrorAction ; case REPEATER : Symbol . Repeater r = ( Symbol . Repeater ) symbol ; return hasErrors ( r . end ) || hasErrors ( symbol , r . production ) ; case ROOT : case SEQUENCE : return hasErrors ( symbol , symbol . production ) ; case TERMINAL : return false ; default : throw new RuntimeException ( "unknown symbol kind: " + symbol . kind ) ; }
public class TaskQueue { /** * Add a Task to the queue and wait until it ' s run . The Task will be executed after " inMs " milliseconds . It is guaranteed that the Runnable will be processed * when this method returns . * @ param the runnable to be executed * @ param inMs The time after which the task should be processed * @ return the runnable , as a convenience method */ public < T extends Runnable > T executeSyncTimed ( T runnable , long inMs ) { } }
try { Thread . sleep ( inMs ) ; this . executeSync ( runnable ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } return runnable ;
public class PersistentPropertyBinder { /** * @ see # bind ( ObjectProperty , String ) * @ param property * { @ link Property } to bind * @ param key * unique application store key */ public void bind ( IntegerProperty property , String key ) { } }
if ( prefs . get ( validateKey ( key ) , null ) != null ) { property . set ( prefs . getInt ( key , Integer . MIN_VALUE ) ) ; } property . addListener ( o -> prefs . putInt ( key , property . getValue ( ) ) ) ;
public class TFormatConversionProvider { /** * $ $ fb2000-10-04 : use AudioSystem . NOT _ SPECIFIED for all fields . */ @ Override public AudioInputStream getAudioInputStream ( AudioFormat . Encoding targetEncoding , AudioInputStream audioInputStream ) { } }
AudioFormat sourceFormat = audioInputStream . getFormat ( ) ; AudioFormat targetFormat = new AudioFormat ( targetEncoding , AudioSystem . NOT_SPECIFIED , // sample rate AudioSystem . NOT_SPECIFIED , // sample size in bits AudioSystem . NOT_SPECIFIED , // channels AudioSystem . NOT_SPECIFIED , // frame size AudioSystem . NOT_SPECIFIED , // frame rate sourceFormat . isBigEndian ( ) ) ; // big endian LOG . log ( Level . FINE , "TFormatConversionProvider.getAudioInputStream(AudioFormat.Encoding, AudioInputStream):" ) ; LOG . log ( Level . FINE , "trying to convert to {0}" , targetFormat ) ; return getAudioInputStream ( targetFormat , audioInputStream ) ;
public class DiscriminationProcessImpl { /** * remove the discriminatorNode from the linkedList . * @ param d * @ throws DiscriminationProcessException */ private void removeDiscriminatorNode ( Discriminator d ) throws DiscriminationProcessException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "removeDiscriminatorNode: " + d ) ; } if ( d == null ) { DiscriminationProcessException e = new DiscriminationProcessException ( "Can't remove a null discriminator" ) ; FFDCFilter . processException ( e , getClass ( ) . getName ( ) + ".removeDiscriminatorNode" , "484" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "removeDiscriminatorNode" ) ; } throw e ; } if ( discriminators . disc . equals ( d ) ) { // removing the first discriminator discriminators = discriminators . next ; if ( discriminators != null ) { discriminators . prev = null ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "removeDiscriminatorNode" ) ; } return ; } // search through the list of discriminators DiscriminatorNode thisDN = discriminators . next , lastDN = discriminators ; while ( thisDN . next != null ) { if ( thisDN . disc . equals ( d ) ) { thisDN . next . prev = lastDN ; lastDN . next = thisDN . next ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "removeDiscriminatorNode" ) ; } return ; } // somewhere in the middle lastDN = thisDN ; thisDN = thisDN . next ; } if ( thisDN . disc . equals ( d ) ) { // found it ! lastDN . next = null ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "removeDiscriminatorNode" ) ; } return ; } // Does not exist ? if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "removeDiscriminatorNode: not found" ) ; } throw new NoSuchElementException ( ) ;
public class CheckPreCommitHook { /** * Checks if all oid ' s of the given JPAObjects are not existing yet . Returns a list of objects where the JPAObject * already exists . */ private List < JPAObject > checkInserts ( List < JPAObject > inserts ) { } }
List < JPAObject > failedObjects = new ArrayList < JPAObject > ( ) ; for ( JPAObject insert : inserts ) { String oid = insert . getOID ( ) ; if ( checkIfActiveOidExisting ( oid ) ) { failedObjects . add ( insert ) ; } else { insert . addEntry ( new JPAEntry ( EDBConstants . MODEL_VERSION , "1" , Integer . class . getName ( ) , insert ) ) ; } } return failedObjects ;
public class OCommandExecutorSQLInsert { /** * Execute the INSERT and return the ODocument object created . */ public Object execute ( final Map < Object , Object > iArgs ) { } }
if ( newRecords == null ) throw new OCommandExecutionException ( "Cannot execute the command because it has not been parsed yet" ) ; final OCommandParameters commandParameters = new OCommandParameters ( iArgs ) ; if ( indexName != null ) { final OIndex < ? > index = getDatabase ( ) . getMetadata ( ) . getIndexManager ( ) . getIndex ( indexName ) ; if ( index == null ) throw new OCommandExecutionException ( "Target index '" + indexName + "' not found" ) ; // BIND VALUES Map < String , Object > result = null ; for ( Map < String , Object > candidate : newRecords ) { index . put ( getIndexKeyValue ( commandParameters , candidate ) , getIndexValue ( commandParameters , candidate ) ) ; result = candidate ; } // RETURN LAST ENTRY return new ODocument ( result ) ; } else { // CREATE NEW DOCUMENTS final List < ODocument > docs = new ArrayList < ODocument > ( ) ; for ( Map < String , Object > candidate : newRecords ) { final ODocument doc = className != null ? new ODocument ( className ) : new ODocument ( ) ; OSQLHelper . bindParameters ( doc , candidate , commandParameters ) ; if ( clusterName != null ) { doc . save ( clusterName ) ; } else { doc . save ( ) ; } docs . add ( doc ) ; } if ( docs . size ( ) == 1 ) { return docs . get ( 0 ) ; } else { return docs ; } }
public class BeforeAudioRendition { /** * Be sure each part has it ' s proper key signature . * Imagine . . . you have * < code > K : Bb * P : A * P : B * P : C * K : G * . . . < / code > * in a < tt > ABCA < / tt > structure , the last part A will have * the G key of part C . * This method corrects this , the key was Bb before the * first part A , so before the second part A we add a Bb * key . */ public static Music correctPartsKeys ( Music music ) { } }
// we may have key / clef changes // repeat operation for each voice . Key is for all voices // but clef can change from one voice to another for ( Object o : music . getVoices ( ) ) { Voice voice = ( Voice ) o ; if ( voice . hasObject ( KeySignature . class ) && voice . hasObject ( PartLabel . class ) ) { Hashtable partsKey = new Hashtable ( ) ; KeySignature tuneKey = null ; int size = voice . size ( ) ; int i = 0 ; while ( i < size ) { MusicElement me = ( MusicElement ) voice . elementAt ( i ) ; if ( me instanceof KeySignature ) { tuneKey = ( KeySignature ) me ; } else if ( me instanceof PartLabel ) { PartLabel pl = ( PartLabel ) me ; if ( ( tuneKey != null ) && ( partsKey . get ( pl . getLabel ( ) + "" ) == null ) ) { // first time we see this part , store the key partsKey . put ( pl . getLabel ( ) + "" , tuneKey ) ; } else { // not the first time we see this part // add the key tuneKey = ( KeySignature ) partsKey . get ( pl . getLabel ( ) + "" ) ; if ( i < ( size - 1 ) ) { if ( ! ( voice . elementAt ( i + 1 ) instanceof KeySignature ) ) { // if next element is a key , no need to insert one // just before , next while step it ' ll be defined as // tuneKey voice . insertElementAt ( tuneKey , i + 1 ) ; i ++ ; size ++ ; } } } } i ++ ; } // end for each element of voice } // end voice has key ( s ) and part ( s ) } // end for each voice of music // else nothing to do return music ;
public class JSONValue { /** * Escape quotes , \ , / , \ r , \ n , \ b , \ f , \ t and other control characters * ( U + 0000 through U + 001F ) . */ public static String escape ( String s , JSONStyle compression ) { } }
if ( s == null ) return null ; StringBuilder sb = new StringBuilder ( ) ; compression . escape ( s , sb ) ; return sb . toString ( ) ;
public class PropertyChangeUtils { /** * Add the given PropertyChangeListener to the given target object . * If the given target object does not * { @ link # maintainsNamedPropertyChangeListeners ( Class ) * maintain named PropertyChangeListeners } , or the invocation of * the method to add such a listener caused an error , a * { @ link IllegalArgumentException } will be thrown . * @ param target The target object * @ param propertyName The property name * @ param propertyChangeListener The PropertyChangeListener to add * @ throws IllegalArgumentException If the given object does not maintain * named PropertyChangeListener , or the attempt to invoke the method * for adding the given listener failed . */ private static void addNamedPropertyChangeListenerUnchecked ( Object target , String propertyName , PropertyChangeListener propertyChangeListener ) { } }
Class < ? > c = target . getClass ( ) ; if ( ! maintainsNamedPropertyChangeListeners ( c ) ) { throw new IllegalArgumentException ( "Class " + c + " does not maintain " + "named PropertyChangeListeners" ) ; } Method addMethod = Methods . getMethodUnchecked ( c , "addPropertyChangeListener" , String . class , PropertyChangeListener . class ) ; Methods . invokeUnchecked ( addMethod , target , propertyName , propertyChangeListener ) ;
public class XPathContext { /** * Set the ErrorListener where errors and warnings are to be reported . * @ param listener A non - null ErrorListener reference . */ public void setErrorListener ( ErrorListener listener ) throws IllegalArgumentException { } }
if ( listener == null ) throw new IllegalArgumentException ( XSLMessages . createXPATHMessage ( XPATHErrorResources . ER_NULL_ERROR_HANDLER , null ) ) ; // " Null error handler " ) ; m_errorListener = listener ;
public class ChangeObjects { /** * method to add an annotation to a PolymerNotation * @ param polymer * PolymerNotation * @ param annotation * new annotation * @ return PolymerNotation with the annotation */ public final static PolymerNotation addAnnotationToPolymer ( final PolymerNotation polymer , final String annotation ) { } }
if ( polymer . getAnnotation ( ) != null ) { return new PolymerNotation ( polymer . getPolymerID ( ) , polymer . getPolymerElements ( ) , polymer . getAnnotation ( ) + " | " + annotation ) ; } return new PolymerNotation ( polymer . getPolymerID ( ) , polymer . getPolymerElements ( ) , annotation ) ;
public class ExecutionPipeline { /** * Checks if the pipeline is currently finishing its execution , i . e . all vertices contained in the pipeline have * switched to the < code > FINISHING < / code > or < code > FINISHED < / code > state . * @ return < code > true < / code > if the pipeline is currently finishing , < code > false < / code > otherwise */ public boolean isFinishing ( ) { } }
final Iterator < ExecutionVertex > it = this . vertices . iterator ( ) ; while ( it . hasNext ( ) ) { final ExecutionState state = it . next ( ) . getExecutionState ( ) ; if ( state != ExecutionState . FINISHING && state != ExecutionState . FINISHED ) { return false ; } } return true ;
public class AmazonChimeClient { /** * Updates user details within the < a > UpdateUserRequestItem < / a > object for up to 20 users for the specified Amazon * Chime account . Currently , only < code > LicenseType < / code > updates are supported for this action . * @ param batchUpdateUserRequest * @ return Result of the BatchUpdateUser operation returned by the service . * @ throws UnauthorizedClientException * The client is not currently authorized to make the request . * @ throws NotFoundException * One or more of the resources in the request does not exist in the system . * @ throws ForbiddenException * The client is permanently forbidden from making the request . For example , when a user tries to create an * account from an unsupported region . * @ throws BadRequestException * The input parameters don ' t match the service ' s restrictions . * @ throws ThrottledClientException * The client exceeded its request rate limit . * @ throws ServiceUnavailableException * The service is currently unavailable . * @ throws ServiceFailureException * The service encountered an unexpected error . * @ sample AmazonChime . BatchUpdateUser * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / chime - 2018-05-01 / BatchUpdateUser " target = " _ top " > AWS API * Documentation < / a > */ @ Override public BatchUpdateUserResult batchUpdateUser ( BatchUpdateUserRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeBatchUpdateUser ( request ) ;
public class DelegateEbeanServer { /** * - - refresh - - - - - */ @ Override public void refresh ( Object bean ) { } }
methodCalls . add ( MethodCall . of ( "refresh" ) . with ( "bean" , bean ) ) ; find . refresh ( bean ) ;
public class UnsynchronizedRateLimiter { /** * Creates a { @ code UnsynchronizedRateLimiter } with the specified stable throughput , given as * " permits per second " ( commonly referred to as < i > QPS < / i > , queries per second ) , and a * < i > warmup period < / i > , during which the { @ code UnsynchronizedRateLimiter } smoothly ramps up its rate , * until it reaches its maximum rate at the end of the period ( as long as there are enough * requests to saturate it ) . Similarly , if the { @ code UnsynchronizedRateLimiter } is left < i > unused < / i > for * a duration of { @ code warmupPeriod } , it will gradually return to its " cold " state , * i . e . it will go through the same warming up process as when it was first created . * < p > The returned { @ code UnsynchronizedRateLimiter } is intended for cases where the resource that actually * fulfills the requests ( e . g . , a remote server ) needs " warmup " time , rather than * being immediately accessed at the stable ( maximum ) rate . * < p > The returned { @ code UnsynchronizedRateLimiter } starts in a " cold " state ( i . e . the warmup period * will follow ) , and if it is left unused for long enough , it will return to that state . * @ param permitsPerSecond the rate of the returned { @ code UnsynchronizedRateLimiter } , measured in * how many permits become available per second . Must be positive * @ param warmupPeriod the duration of the period where the { @ code UnsynchronizedRateLimiter } ramps up its * rate , before reaching its stable ( maximum ) rate * @ param unit the time unit of the warmupPeriod argument */ public static UnsynchronizedRateLimiter create ( double permitsPerSecond , long warmupPeriod , TimeUnit unit ) { } }
return create ( SleepingTicker . SYSTEM_TICKER , permitsPerSecond , warmupPeriod , unit ) ;
public class UpdateTableRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateTableRequest updateTableRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateTableRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateTableRequest . getAttributeDefinitions ( ) , ATTRIBUTEDEFINITIONS_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getTableName ( ) , TABLENAME_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getBillingMode ( ) , BILLINGMODE_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getProvisionedThroughput ( ) , PROVISIONEDTHROUGHPUT_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getGlobalSecondaryIndexUpdates ( ) , GLOBALSECONDARYINDEXUPDATES_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getStreamSpecification ( ) , STREAMSPECIFICATION_BINDING ) ; protocolMarshaller . marshall ( updateTableRequest . getSSESpecification ( ) , SSESPECIFICATION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class GISTreeSetUtil { /** * Computes the area covered by a child node of an icosep - node . * @ param region is the id of the region for which the area must be computed * @ param area is the parent icosep area . * @ return the area covered by the child node . */ private static Rectangle2d computeIcosepSubarea ( IcosepQuadTreeZone region , Rectangle2d area ) { } }
if ( area == null || area . isEmpty ( ) ) { return area ; } final double demiWidth = area . getWidth ( ) / 2. ; final double demiHeight = area . getHeight ( ) / 2. ; switch ( region ) { case ICOSEP : return area ; case SOUTH_WEST : return new Rectangle2d ( area . getMinX ( ) , area . getMinY ( ) , demiWidth , demiHeight ) ; case NORTH_WEST : return new Rectangle2d ( area . getMinX ( ) , area . getCenterY ( ) , demiWidth , demiHeight ) ; case NORTH_EAST : return new Rectangle2d ( area . getCenterX ( ) , area . getMinY ( ) , demiWidth , demiHeight ) ; case SOUTH_EAST : return new Rectangle2d ( area . getMinX ( ) , area . getMinY ( ) , demiWidth , demiHeight ) ; default : } throw new IllegalStateException ( ) ;
public class LegacyRegionXmlMetadataBuilder { /** * Loads region metadata from file location specified in * { @ link # REGIONS _ FILE _ OVERRIDE } property . * Returns null if no such property exists . * @ throws SdkClientException if any error occurs while loading the * metadata file . */ private RegionMetadata loadFromStream ( final InputStream stream ) { } }
try { return LegacyRegionXmlLoadUtils . load ( stream ) ; } catch ( IOException exception ) { throw new SdkClientException ( "Error parsing region metadata from input stream" , exception ) ; }
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getBAG ( ) { } }
if ( bagEClass == null ) { bagEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 193 ) ; } return bagEClass ;
public class CPRuleUserSegmentRelPersistenceImpl { /** * Creates a new cp rule user segment rel with the primary key . Does not add the cp rule user segment rel to the database . * @ param CPRuleUserSegmentRelId the primary key for the new cp rule user segment rel * @ return the new cp rule user segment rel */ @ Override public CPRuleUserSegmentRel create ( long CPRuleUserSegmentRelId ) { } }
CPRuleUserSegmentRel cpRuleUserSegmentRel = new CPRuleUserSegmentRelImpl ( ) ; cpRuleUserSegmentRel . setNew ( true ) ; cpRuleUserSegmentRel . setPrimaryKey ( CPRuleUserSegmentRelId ) ; cpRuleUserSegmentRel . setCompanyId ( companyProvider . getCompanyId ( ) ) ; return cpRuleUserSegmentRel ;
public class PrettyTimeParser { /** * Provides a string representation for the number passed . This method works for limited set of numbers as parsing * will only be done at maximum for 2400 , which will be used in military time format . */ private String provideRepresentation ( int number ) { } }
String key ; if ( number == 0 ) key = "zero" ; else if ( number < 20 ) key = numNames [ number ] ; else if ( number < 100 ) { int unit = number % 10 ; key = tensNames [ number / 10 ] + numNames [ unit ] ; } else { int unit = number % 10 ; int ten = number % 100 - unit ; int hundred = ( number - ten ) / 100 ; if ( hundred < 20 ) key = numNames [ hundred ] + " hundred" ; else key = tensNames [ hundred / 10 ] + numNames [ hundred % 10 ] + " hundred" ; if ( ten + unit < 20 && ten + unit > 10 ) key += numNames [ ten + unit ] ; else key += tensNames [ ten / 10 ] + numNames [ unit ] ; } return key . trim ( ) ;
public class TableFactor { /** * Marginalizes out a variable by applying an associative join operation for each possible assignment to the * marginalized variable . * @ param variable the variable ( by ' name ' , not offset into neighborIndices ) * @ param startingValue associativeJoin is basically a foldr over a table , and this is the initialization * @ param curriedFoldr the associative function to use when applying the join operation , taking first the * assignment to the value being marginalized , and then a foldr operation * @ return a new TableFactor that doesn ' t contain ' variable ' , where values were gotten through associative * marginalization . */ private TableFactor marginalize ( int variable , double startingValue , BiFunction < Integer , int [ ] , BiFunction < Double , Double , Double > > curriedFoldr ) { } }
// Can ' t marginalize the last variable assert ( getDimensions ( ) . length > 1 ) ; // Calculate the result domain List < Integer > resultDomain = new ArrayList < > ( ) ; for ( int n : neighborIndices ) { if ( n != variable ) { resultDomain . add ( n ) ; } } // Create result TableFactor int [ ] resultNeighborIndices = new int [ resultDomain . size ( ) ] ; int [ ] resultDimensions = new int [ resultNeighborIndices . length ] ; for ( int i = 0 ; i < resultDomain . size ( ) ; i ++ ) { int var = resultDomain . get ( i ) ; resultNeighborIndices [ i ] = var ; resultDimensions [ i ] = getVariableSize ( var ) ; } TableFactor result = new TableFactor ( resultNeighborIndices , resultDimensions ) ; // Calculate forward - pointers from the old domain to new domain int [ ] mapping = new int [ neighborIndices . length ] ; for ( int i = 0 ; i < neighborIndices . length ; i ++ ) { mapping [ i ] = resultDomain . indexOf ( neighborIndices [ i ] ) ; } // Initialize for ( int [ ] assignment : result ) { result . setAssignmentLogValue ( assignment , startingValue ) ; } // Do the actual fold into the result int [ ] resultAssignment = new int [ result . neighborIndices . length ] ; int marginalizedVariableValue = 0 ; // OPTIMIZATION : // Rather than use the standard iterator , which creates lots of int [ ] arrays on the heap , which need to be GC ' d , // we use the fast version that just mutates one array . Since this is read once for us here , this is ideal . Iterator < int [ ] > fastPassByReferenceIterator = fastPassByReferenceIterator ( ) ; int [ ] assignment = fastPassByReferenceIterator . next ( ) ; while ( true ) { // Set the assignment arrays correctly for ( int i = 0 ; i < assignment . length ; i ++ ) { if ( mapping [ i ] != - 1 ) resultAssignment [ mapping [ i ] ] = assignment [ i ] ; else marginalizedVariableValue = assignment [ i ] ; } double value = curriedFoldr . apply ( marginalizedVariableValue , resultAssignment ) . apply ( result . getAssignmentLogValue ( resultAssignment ) , getAssignmentLogValue ( assignment ) ) ; assert ( ! Double . isNaN ( value ) ) ; result . setAssignmentLogValue ( resultAssignment , value ) ; if ( fastPassByReferenceIterator . hasNext ( ) ) fastPassByReferenceIterator . next ( ) ; else break ; } return result ;
public class Expressive { /** * Report whether a polled sample of the variable satisfies the criteria . */ public < V > boolean the ( Sampler < V > variable , Ticker ticker , Matcher < ? super V > criteria ) { } }
return the ( sampleOf ( variable , criteria ) , ticker ) ;
public class ComponentPrimitiveFactoryImpl { /** * ( non - Javadoc ) * @ see org . restcomm . protocols . ss7 . tcap . api . ComponentPrimitiveFactory # createParameter ( int , int , boolean ) */ public Parameter createParameter ( int tag , int tagClass , boolean isPrimitive ) { } }
Parameter p = TcapFactory . createParameter ( ) ; p . setTag ( tag ) ; p . setTagClass ( tagClass ) ; p . setPrimitive ( isPrimitive ) ; return p ;
public class CalendarRecordItem { /** * Delete this item . */ public boolean remove ( ) { } }
boolean bSuccess = false ; try { Record recGrid = this . getMainRecord ( ) ; if ( recGrid . getEditMode ( ) == Constants . EDIT_CURRENT ) recGrid . edit ( ) ; if ( recGrid . getEditMode ( ) == Constants . EDIT_IN_PROGRESS ) recGrid . remove ( ) ; bSuccess = true ; } catch ( DBException ex ) { ex . printStackTrace ( ) ; bSuccess = false ; } return bSuccess ;
public class ExecutorUtil { /** * 执行手动设置的 count 查询 , 该查询支持的参数必须和被分页的方法相同 * @ param executor * @ param countMs * @ param parameter * @ param boundSql * @ param resultHandler * @ return * @ throws SQLException */ public static Long executeManualCount ( Executor executor , MappedStatement countMs , Object parameter , BoundSql boundSql , ResultHandler resultHandler ) throws SQLException { } }
CacheKey countKey = executor . createCacheKey ( countMs , parameter , RowBounds . DEFAULT , boundSql ) ; BoundSql countBoundSql = countMs . getBoundSql ( parameter ) ; Object countResultList = executor . query ( countMs , parameter , RowBounds . DEFAULT , resultHandler , countKey , countBoundSql ) ; Long count = ( ( Number ) ( ( List ) countResultList ) . get ( 0 ) ) . longValue ( ) ; return count ;
public class I18nUtils { public static String getMessage ( String code , Object ... args ) { } }
return getMessage ( I18nUtils . getLocale ( ) , code , args ) ;
public class ImapSessionFolder { /** * Adjust an actual mailbox msn for the expunged messages in this mailbox that have not * yet been notified . * TODO - need a test for this */ private int correctForExpungedMessages ( int absoluteMsn ) { } }
int correctedMsn = absoluteMsn ; // Loop through the expunged list backwards , adjusting the msn as we go . for ( int i = expungedMsns . size ( ) - 1 ; i >= 0 ; i -- ) { int expunged = expungedMsns . get ( i ) ; if ( expunged <= absoluteMsn ) { correctedMsn ++ ; } } return correctedMsn ;
public class Percentile { /** * print a nice histogram of percentiles * @ param out - output stream * @ param name - data set name * @ param p - percentile */ public static void print ( final PrintStream out , final String name , final Percentile p ) { } }
if ( p . isReady ( ) ) { try { final StringBuilder sb = new StringBuilder ( 512 ) ; final float [ ] q = p . getQuantiles ( ) ; final float [ ] e = p . getEstimates ( ) ; final int SCREENWIDTH = 80 ; sb . append ( name ) ; sb . append ( ", min(" ) ; sb . append ( p . getMin ( ) ) ; sb . append ( "), max(" ) ; sb . append ( p . getMax ( ) ) ; sb . append ( ')' ) ; sb . append ( "\n" ) ; final float max = e [ e . length - 1 ] ; for ( int i = 0 ; i < q . length ; i ++ ) { sb . append ( String . format ( "%4.3f" , q [ i ] ) ) ; sb . append ( ": " ) ; final int len = ( int ) ( e [ i ] / max * SCREENWIDTH ) ; for ( int j = 0 ; j < len ; j ++ ) { sb . append ( '#' ) ; } sb . append ( " " ) ; sb . append ( String . format ( "%4.3f\n" , e [ i ] ) ) ; } out . println ( sb . toString ( ) ) ; } catch ( InsufficientSamplesException e ) { // this can never occur } }
public class Scale { /** * Sets the offset for the scaling . * @ param x the x * @ param y the y * @ param z the z * @ return the scale */ public Scale offset ( float x , float y , float z ) { } }
offsetX = x ; offsetY = y ; offsetZ = z ; return this ;
public class TasksInner { /** * Cancel a task . * The tasks resource is a nested , proxy - only resource representing work performed by a DMS instance . This method cancels a task if it ' s currently queued or running . * @ param groupName Name of the resource group * @ param serviceName Name of the service * @ param projectName Name of the project * @ param taskName Name of the Task * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ApiErrorException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the ProjectTaskInner object if successful . */ public ProjectTaskInner cancel ( String groupName , String serviceName , String projectName , String taskName ) { } }
return cancelWithServiceResponseAsync ( groupName , serviceName , projectName , taskName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class InternalXtextParser { /** * InternalXtext . g : 1455:1 : ruleAbstractTerminal returns [ EObject current = null ] : ( this _ Keyword _ 0 = ruleKeyword | this _ RuleCall _ 1 = ruleRuleCall | this _ ParenthesizedElement _ 2 = ruleParenthesizedElement | this _ PredicatedKeyword _ 3 = rulePredicatedKeyword | this _ PredicatedRuleCall _ 4 = rulePredicatedRuleCall | this _ PredicatedGroup _ 5 = rulePredicatedGroup ) ; */ public final EObject ruleAbstractTerminal ( ) throws RecognitionException { } }
EObject current = null ; EObject this_Keyword_0 = null ; EObject this_RuleCall_1 = null ; EObject this_ParenthesizedElement_2 = null ; EObject this_PredicatedKeyword_3 = null ; EObject this_PredicatedRuleCall_4 = null ; EObject this_PredicatedGroup_5 = null ; enterRule ( ) ; try { // InternalXtext . g : 1461:2 : ( ( this _ Keyword _ 0 = ruleKeyword | this _ RuleCall _ 1 = ruleRuleCall | this _ ParenthesizedElement _ 2 = ruleParenthesizedElement | this _ PredicatedKeyword _ 3 = rulePredicatedKeyword | this _ PredicatedRuleCall _ 4 = rulePredicatedRuleCall | this _ PredicatedGroup _ 5 = rulePredicatedGroup ) ) // InternalXtext . g : 1462:2 : ( this _ Keyword _ 0 = ruleKeyword | this _ RuleCall _ 1 = ruleRuleCall | this _ ParenthesizedElement _ 2 = ruleParenthesizedElement | this _ PredicatedKeyword _ 3 = rulePredicatedKeyword | this _ PredicatedRuleCall _ 4 = rulePredicatedRuleCall | this _ PredicatedGroup _ 5 = rulePredicatedGroup ) { // InternalXtext . g : 1462:2 : ( this _ Keyword _ 0 = ruleKeyword | this _ RuleCall _ 1 = ruleRuleCall | this _ ParenthesizedElement _ 2 = ruleParenthesizedElement | this _ PredicatedKeyword _ 3 = rulePredicatedKeyword | this _ PredicatedRuleCall _ 4 = rulePredicatedRuleCall | this _ PredicatedGroup _ 5 = rulePredicatedGroup ) int alt39 = 6 ; switch ( input . LA ( 1 ) ) { case RULE_STRING : { alt39 = 1 ; } break ; case RULE_ID : case 39 : case 40 : { alt39 = 2 ; } break ; case 15 : { alt39 = 3 ; } break ; case 42 : { switch ( input . LA ( 2 ) ) { case 15 : { alt39 = 6 ; } break ; case RULE_ID : case 39 : case 40 : { alt39 = 5 ; } break ; case RULE_STRING : { alt39 = 4 ; } break ; default : NoViableAltException nvae = new NoViableAltException ( "" , 39 , 4 , input ) ; throw nvae ; } } break ; case 43 : { switch ( input . LA ( 2 ) ) { case RULE_STRING : { alt39 = 4 ; } break ; case 15 : { alt39 = 6 ; } break ; case RULE_ID : case 39 : case 40 : { alt39 = 5 ; } break ; default : NoViableAltException nvae = new NoViableAltException ( "" , 39 , 5 , input ) ; throw nvae ; } } break ; default : NoViableAltException nvae = new NoViableAltException ( "" , 39 , 0 , input ) ; throw nvae ; } switch ( alt39 ) { case 1 : // InternalXtext . g : 1463:3 : this _ Keyword _ 0 = ruleKeyword { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getKeywordParserRuleCall_0 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_Keyword_0 = ruleKeyword ( ) ; state . _fsp -- ; current = this_Keyword_0 ; afterParserOrEnumRuleCall ( ) ; } break ; case 2 : // InternalXtext . g : 1472:3 : this _ RuleCall _ 1 = ruleRuleCall { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getRuleCallParserRuleCall_1 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_RuleCall_1 = ruleRuleCall ( ) ; state . _fsp -- ; current = this_RuleCall_1 ; afterParserOrEnumRuleCall ( ) ; } break ; case 3 : // InternalXtext . g : 1481:3 : this _ ParenthesizedElement _ 2 = ruleParenthesizedElement { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getParenthesizedElementParserRuleCall_2 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_ParenthesizedElement_2 = ruleParenthesizedElement ( ) ; state . _fsp -- ; current = this_ParenthesizedElement_2 ; afterParserOrEnumRuleCall ( ) ; } break ; case 4 : // InternalXtext . g : 1490:3 : this _ PredicatedKeyword _ 3 = rulePredicatedKeyword { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getPredicatedKeywordParserRuleCall_3 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_PredicatedKeyword_3 = rulePredicatedKeyword ( ) ; state . _fsp -- ; current = this_PredicatedKeyword_3 ; afterParserOrEnumRuleCall ( ) ; } break ; case 5 : // InternalXtext . g : 1499:3 : this _ PredicatedRuleCall _ 4 = rulePredicatedRuleCall { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getPredicatedRuleCallParserRuleCall_4 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_PredicatedRuleCall_4 = rulePredicatedRuleCall ( ) ; state . _fsp -- ; current = this_PredicatedRuleCall_4 ; afterParserOrEnumRuleCall ( ) ; } break ; case 6 : // InternalXtext . g : 1508:3 : this _ PredicatedGroup _ 5 = rulePredicatedGroup { newCompositeNode ( grammarAccess . getAbstractTerminalAccess ( ) . getPredicatedGroupParserRuleCall_5 ( ) ) ; pushFollow ( FollowSets000 . FOLLOW_2 ) ; this_PredicatedGroup_5 = rulePredicatedGroup ( ) ; state . _fsp -- ; current = this_PredicatedGroup_5 ; afterParserOrEnumRuleCall ( ) ; } break ; } } leaveRule ( ) ; } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ;
public class AmazonRoute53DomainsClient { /** * This operation replaces the current set of name servers for the domain with the specified set of name servers . If * you use Amazon Route 53 as your DNS service , specify the four name servers in the delegation set for the hosted * zone for the domain . * If successful , this operation returns an operation ID that you can use to track the progress and completion of * the action . If the request is not completed successfully , the domain registrant will be notified by email . * @ param updateDomainNameserversRequest * Replaces the current set of name servers for the domain with the specified set of name servers . If you use * Amazon Route 53 as your DNS service , specify the four name servers in the delegation set for the hosted * zone for the domain . < / p > * If successful , this operation returns an operation ID that you can use to track the progress and * completion of the action . If the request is not completed successfully , the domain registrant will be * notified by email . * @ return Result of the UpdateDomainNameservers operation returned by the service . * @ throws InvalidInputException * The requested item is not acceptable . For example , for an OperationId it might refer to the ID of an * operation that is already completed . For a domain name , it might not be a valid domain name or belong to * the requester account . * @ throws DuplicateRequestException * The request is already in progress for the domain . * @ throws TLDRulesViolationException * The top - level domain does not support this operation . * @ throws OperationLimitExceededException * The number of operations or jobs running exceeded the allowed threshold for the account . * @ throws UnsupportedTLDException * Amazon Route 53 does not support this top - level domain ( TLD ) . * @ sample AmazonRoute53Domains . UpdateDomainNameservers * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53domains - 2014-05-15 / UpdateDomainNameservers " * target = " _ top " > AWS API Documentation < / a > */ @ Override public UpdateDomainNameserversResult updateDomainNameservers ( UpdateDomainNameserversRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateDomainNameservers ( request ) ;
public class Snackbar { /** * Set a Background Drawable using the appropriate Android version api call * @ param view * @ param drawable */ public static void setBackgroundDrawable ( View view , Drawable drawable ) { } }
if ( android . os . Build . VERSION . SDK_INT < android . os . Build . VERSION_CODES . JELLY_BEAN ) { view . setBackgroundDrawable ( drawable ) ; } else { view . setBackground ( drawable ) ; }
public class PNMImageReader { /** * TODO : Candidate util method */ private static void readFully ( final DataInput input , final short [ ] shorts ) throws IOException { } }
if ( input instanceof ImageInputStream ) { // Optimization for ImageInputStreams , read all in one go ( ( ImageInputStream ) input ) . readFully ( shorts , 0 , shorts . length ) ; } else { for ( int i = 0 ; i < shorts . length ; i ++ ) { shorts [ i ] = input . readShort ( ) ; } }
public class AbstractJaxRsResourceProvider { /** * Execute the method described by the requestBuilder and methodKey * @ param theRequestBuilder the requestBuilder that contains the information about the request * @ param methodKey the key determining the method to be executed * @ return the response */ private Response execute ( final Builder theRequestBuilder , final String methodKey ) throws IOException { } }
final JaxRsRequest theRequest = theRequestBuilder . build ( ) ; final BaseMethodBinding < ? > method = getBinding ( theRequest . getRestOperationType ( ) , methodKey ) ; try { return ( Response ) method . invokeServer ( this , theRequest ) ; } catch ( final Throwable theException ) { return handleException ( theRequest , theException ) ; }
public class PlanAssembler { /** * Generate a unique and correct plan for the current SQL statement context . * This method gets called repeatedly until it returns null , meaning there * are no more plans . * @ return A not - previously returned query plan or null if no more * computable plans . */ private CompiledPlan getNextPlan ( ) { } }
CompiledPlan retval ; AbstractParsedStmt nextStmt = null ; if ( m_parsedSelect != null ) { nextStmt = m_parsedSelect ; retval = getNextSelectPlan ( ) ; } else if ( m_parsedInsert != null ) { nextStmt = m_parsedInsert ; retval = getNextInsertPlan ( ) ; } else if ( m_parsedDelete != null ) { nextStmt = m_parsedDelete ; retval = getNextDeletePlan ( ) ; // note that for replicated tables , multi - fragment plans // need to divide the result by the number of partitions } else if ( m_parsedUpdate != null ) { nextStmt = m_parsedUpdate ; retval = getNextUpdatePlan ( ) ; } else if ( m_parsedUnion != null ) { nextStmt = m_parsedUnion ; retval = getNextUnionPlan ( ) ; } else if ( m_parsedSwap != null ) { nextStmt = m_parsedSwap ; retval = getNextSwapPlan ( ) ; } else if ( m_parsedMigrate != null ) { nextStmt = m_parsedMigrate ; retval = getNextMigratePlan ( ) ; } else { throw new RuntimeException ( "setupForNewPlans encountered unsupported statement type." ) ; } if ( retval == null || retval . rootPlanGraph == null ) { return null ; } assert ( nextStmt != null ) ; retval . setParameters ( nextStmt . getParameters ( ) ) ; return retval ;
public class AmazonApiGatewayClient { /** * Deletes a usage plan key and remove the underlying API key from the associated usage plan . * @ param deleteUsagePlanKeyRequest * The DELETE request to delete a usage plan key and remove the underlying API key from the associated usage * plan . * @ return Result of the DeleteUsagePlanKey operation returned by the service . * @ throws BadRequestException * The submitted request is not valid , for example , the input is incomplete or incorrect . See the * accompanying error message for details . * @ throws ConflictException * The request configuration has conflicts . For details , see the accompanying error message . * @ throws UnauthorizedException * The request is denied because the caller has insufficient permissions . * @ throws NotFoundException * The requested resource is not found . Make sure that the request URI is correct . * @ throws TooManyRequestsException * The request has reached its throttling limit . Retry after the specified time period . * @ sample AmazonApiGateway . DeleteUsagePlanKey */ @ Override public DeleteUsagePlanKeyResult deleteUsagePlanKey ( DeleteUsagePlanKeyRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteUsagePlanKey ( request ) ;