signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ResourceHandler { /** * Creates a resource handler that serves from the file system if the directory exists ; otherwise from the class path . * A common use case is for when you want to serve from the file path at development time ( so you can update * files without restarting ) but at deploy time resources are packaged in an uber jar . * @ param fileRootIfExists A path to a directory holding static content , which may not exist , e . g . < code > src / main / resources / web < / code > * @ param classpathRoot A classpath path to a directory holding static content , e . g . < code > / web < / code > * @ return Returns a file - based resource handler builder or a classpath - based one . * @ deprecated Use { @ link ResourceHandlerBuilder # fileOrClasspath ( String , String ) } */ @ Deprecated public static ResourceHandler . Builder fileOrClasspath ( String fileRootIfExists , String classpathRoot ) { } }
Path path = Paths . get ( fileRootIfExists ) ; if ( Files . isDirectory ( path ) ) { return fileHandler ( path ) ; } else { return classpathHandler ( classpathRoot ) ; }
public class ChannelOutboundBuffer { /** * Add a flush to this { @ link ChannelOutboundBuffer } . This means all previous added messages are marked as flushed * and so you will be able to handle them . */ public void addFlush ( ) { } }
// There is no need to process all entries if there was already a flush before and no new messages // where added in the meantime . // See https : / / github . com / netty / netty / issues / 2577 Entry entry = unflushedEntry ; if ( entry != null ) { if ( flushedEntry == null ) { // there is no flushedEntry yet , so start with the entry flushedEntry = entry ; } do { flushed ++ ; if ( ! entry . promise . setUncancellable ( ) ) { // Was cancelled so make sure we free up memory and notify about the freed bytes int pending = entry . cancel ( ) ; decrementPendingOutboundBytes ( pending , false , true ) ; } entry = entry . next ; } while ( entry != null ) ; // All flushed so reset unflushedEntry unflushedEntry = null ; }
public class URepeated { /** * Gets the binding of the underlying identifier in the unifier . */ public JCExpression getUnderlyingBinding ( Unifier unifier ) { } }
return ( unifier == null ) ? null : unifier . getBinding ( new UFreeIdent . Key ( identifier ( ) ) ) ;
public class IOUtil { /** * Open a { @ link javax . swing . JFileChooser } , and return the selected file , or null if closed or cancelled . * @ param title The title of the file - chooser window . * @ param currentDir The root directory . If null , { @ code new File ( " . " ) } will be used . * @ return The chosen file , or null if none was chosen . */ public static File chooseFile ( String title , File currentDir ) { } }
if ( currentDir == null ) currentDir = new File ( "." ) ; JFileChooser fileChooser = new JFileChooser ( ) ; fileChooser . setCurrentDirectory ( currentDir ) ; fileChooser . setDialogTitle ( title ) ; fileChooser . setMultiSelectionEnabled ( false ) ; int result = fileChooser . showOpenDialog ( null ) ; if ( result == JFileChooser . APPROVE_OPTION ) { return fileChooser . getSelectedFile ( ) ; } return null ;
public class InternalXbaseWithAnnotationsParser { /** * Delegated rules */ public final boolean synpred149_InternalXbaseWithAnnotations ( ) { } }
state . backtracking ++ ; int start = input . mark ( ) ; try { synpred149_InternalXbaseWithAnnotations_fragment ( ) ; // can never throw exception } catch ( RecognitionException re ) { System . err . println ( "impossible: " + re ) ; } boolean success = ! state . failed ; input . rewind ( start ) ; state . backtracking -- ; state . failed = false ; return success ;
public class WebhookResourcesImpl { /** * Gets the list of all Webhooks that the user owns ( if a user generated token was used to make the request ) * or the list of all Webhooks associated with the third - party app ( if a third - party app made the request ) . Items * in the response are ordered by API Client name , then Webhook name , then creation date . * It mirrors to the following Smartsheet REST API method : GET / webhooks * @ param paging the object containing the pagination parameters * @ return IndexResult object containing an array of Webhook objects . * @ throws IllegalArgumentException if any argument is null or empty string * @ throws InvalidRequestException if there is any problem with the REST API request * @ throws AuthorizationException if there is any problem with the REST API authorization ( access token ) * @ throws ResourceNotFoundException if the resource cannot be found * @ throws ServiceUnavailableException if the REST API service is not available ( possibly due to rate limiting ) * @ throws SmartsheetException if there is any other error during the operation */ public PagedResult < Webhook > listWebhooks ( PaginationParameters paging ) throws SmartsheetException { } }
String path = "webhooks" ; HashMap < String , Object > parameters = new HashMap < String , Object > ( ) ; if ( paging != null ) { parameters = paging . toHashMap ( ) ; } path += QueryUtil . generateUrl ( null , parameters ) ; return this . listResourcesWithWrapper ( path , Webhook . class ) ;
public class InHttp { /** * priority */ private boolean readPriority ( ReadStream is , int length , int streamId ) throws IOException { } }
int streamRef = BitsUtil . readInt ( is ) ; int weight = is . read ( ) ; is . skip ( length - 5 ) ; return true ;
public class ConfigurationUtils { /** * Returns a unified message for cases when the master hostname cannot be determined . * @ param serviceName the name of the service that couldn ' t run . i . e . Alluxio worker , fsadmin * shell , etc . * @ return a string with the message */ public static String getMasterHostNotConfiguredMessage ( String serviceName ) { } }
return getHostNotConfiguredMessage ( serviceName , "master" , PropertyKey . MASTER_HOSTNAME , PropertyKey . MASTER_EMBEDDED_JOURNAL_ADDRESSES ) ;
public class JTSCurveExpression { /** * The start Point of this Curve . * @ return start point */ public JTSPointExpression < Point > startPoint ( ) { } }
if ( startPoint == null ) { startPoint = JTSGeometryExpressions . pointOperation ( SpatialOps . START_POINT , mixin ) ; } return startPoint ;
public class XMLChecker { /** * Determines if the specified character matches the < em > Extender < / em > production . * See : < a href = " http : / / www . w3 . org / TR / REC - xml # NT - Extender " > Definition of Extender < / a > . * @ param c the character to check . * @ return < code > true < / code > if the character matches the production , or < code > false < / code > if * it does not . */ private static final boolean isExtender ( char c ) { } }
int n = c ; return n == 0x00B7 || n == 0x02D0 || n == 0x02D1 || n == 0x0387 || n == 0x0640 || n == 0x0E46 || n == 0x0EC6 || n == 0x3005 || n >= 0x3031 && n <= 0x3035 || n >= 0x309D && n <= 0x309E || n >= 0x30FC && n <= 0x30FE ;
public class CliClient { /** * DROP INDEX ON < CF > . < COLUMN > */ private void executeDropIndex ( Tree statement ) throws TException , SchemaDisagreementException , InvalidRequestException , NotFoundException { } }
if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; // getColumnFamily will check if CF exists for us String columnFamily = CliCompiler . getColumnFamily ( statement , currentCfDefs ( ) ) ; String rawColumName = CliUtils . unescapeSQLString ( statement . getChild ( 1 ) . getText ( ) ) ; CfDef cfDef = getCfDef ( columnFamily ) ; ByteBuffer columnName = columnNameAsBytes ( rawColumName , cfDef ) ; boolean foundColumn = false ; for ( ColumnDef column : cfDef . getColumn_metadata ( ) ) { if ( column . name . equals ( columnName ) ) { foundColumn = true ; if ( column . getIndex_type ( ) == null ) throw new RuntimeException ( String . format ( "Column '%s' does not have an index." , rawColumName ) ) ; column . setIndex_name ( null ) ; column . setIndex_type ( null ) ; } } if ( ! foundColumn ) throw new RuntimeException ( String . format ( "Column '%s' definition was not found in ColumnFamily '%s'." , rawColumName , columnFamily ) ) ; String mySchemaVersion = thriftClient . system_update_column_family ( cfDef ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . put ( keySpace , thriftClient . describe_keyspace ( keySpace ) ) ;
public class Handler { /** * { @ inheritDoc } */ public void start ( ) { } }
String existingProtocolPathPkgs = PrivilegedSystemHelper . getProperty ( "java.protocol.handler.pkgs" ) ; if ( existingProtocolPathPkgs == null ) PrivilegedSystemHelper . setProperty ( "java.protocol.handler.pkgs" , protocolPathPkg ) ; else if ( existingProtocolPathPkgs . indexOf ( protocolPathPkg ) == - 1 ) PrivilegedSystemHelper . setProperty ( "java.protocol.handler.pkgs" , existingProtocolPathPkgs + "|" + protocolPathPkg ) ;
public class AmazonConfigClient { /** * Returns a list of compliant and noncompliant rules with the number of resources for compliant and noncompliant * rules . * < note > * The results can return an empty result page , but if you have a nextToken , the results are displayed on the next * page . * < / note > * @ param describeAggregateComplianceByConfigRulesRequest * @ return Result of the DescribeAggregateComplianceByConfigRules operation returned by the service . * @ throws ValidationException * The requested action is not valid . * @ throws InvalidLimitException * The specified limit is outside the allowable range . * @ throws InvalidNextTokenException * The specified next token is invalid . Specify the < code > nextToken < / code > string that was returned in the * previous response to get the next page of results . * @ throws NoSuchConfigurationAggregatorException * You have specified a configuration aggregator that does not exist . * @ sample AmazonConfig . DescribeAggregateComplianceByConfigRules * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / config - 2014-11-12 / DescribeAggregateComplianceByConfigRules " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeAggregateComplianceByConfigRulesResult describeAggregateComplianceByConfigRules ( DescribeAggregateComplianceByConfigRulesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeAggregateComplianceByConfigRules ( request ) ;
public class DBAccessFactory { /** * create an IDBAccess ( an accessor ) for a specific database . * @ param dbType the type of database to access . Can be * < br / > DBType . REMOTE or DBType . EMBEDDED or DBType . IN _ MEMORY * @ param properties to configure the database connection . * < br / > The appropriate database access class will pick the properties it needs . * < br / > See also : DBProperties interface for required and optional properties . * @ return an instance of IDBAccess */ public static IDBAccess createDBAccess ( DBType dbType , Properties properties ) { } }
return createDBAccess ( dbType , properties , null , null , null ) ;
public class HandleMetaClass { /** * this method mimics EMC behavior */ public Object getProperty ( String property ) { } }
if ( ExpandoMetaClass . isValidExpandoProperty ( property ) ) { if ( property . equals ( ExpandoMetaClass . STATIC_QUALIFIER ) || property . equals ( ExpandoMetaClass . CONSTRUCTOR ) || myMetaClass . hasProperty ( this , property ) == null ) { return replaceDelegate ( ) . getProperty ( property ) ; } } return myMetaClass . getProperty ( this , property ) ;
public class GenericDraweeHierarchy { /** * Sets a new failure drawable with scale type . */ public void setFailureImage ( Drawable drawable , ScalingUtils . ScaleType scaleType ) { } }
setChildDrawableAtIndex ( FAILURE_IMAGE_INDEX , drawable ) ; getScaleTypeDrawableAtIndex ( FAILURE_IMAGE_INDEX ) . setScaleType ( scaleType ) ;
public class TransactionWriteRequest { /** * Adds put operation ( to be executed on object ) to the list of transaction write operations . * transactionWriteExpression is used to conditionally put object . * returnValuesOnConditionCheckFailure specifies which attributes values ( of existing item ) should be returned if condition check fails . */ public TransactionWriteRequest addPut ( Object object , DynamoDBTransactionWriteExpression transactionWriteExpression , ReturnValuesOnConditionCheckFailure returnValuesOnConditionCheckFailure ) { } }
transactionWriteOperations . add ( new TransactionWriteOperation ( object , TransactionWriteOperationType . Put , transactionWriteExpression , returnValuesOnConditionCheckFailure ) ) ; return this ;
public class JavaSerializationTranscoder { /** * { @ inheritDoc } */ @ Override public byte [ ] serializeAttributes ( final MemcachedBackupSession session , final ConcurrentMap < String , Object > attributes ) { } }
if ( attributes == null ) { throw new NullPointerException ( "Can't serialize null" ) ; } ByteArrayOutputStream bos = null ; ObjectOutputStream oos = null ; try { bos = new ByteArrayOutputStream ( ) ; oos = new ObjectOutputStream ( bos ) ; writeAttributes ( session , attributes , oos ) ; return bos . toByteArray ( ) ; } catch ( final IOException e ) { throw new IllegalArgumentException ( "Non-serializable object" , e ) ; } finally { closeSilently ( bos ) ; closeSilently ( oos ) ; }
public class DialogPreference { /** * Obtains the maximum height of the preference ' s dialog from a specific typed array . * @ param typedArray * The typed array , the maximum height should be obtained from , as an instance of the * class { @ link TypedArray } . The typed array may not be null */ private void obtainDialogMaxHeight ( @ NonNull final TypedArray typedArray ) { } }
try { int maxHeight = typedArray . getDimensionPixelSize ( R . styleable . DialogPreference_dialogMaxHeight , - 1 ) ; if ( maxHeight != - 1 ) { setDialogMaxHeight ( maxHeight ) ; } } catch ( Resources . NotFoundException | UnsupportedOperationException e ) { // No need to handle }
public class ApiOvhDedicatedserver { /** * Alter this object properties * REST : PUT / dedicated / server / { serviceName } / spla / { id } * @ param body [ required ] New object properties * @ param serviceName [ required ] The internal name of your dedicated server * @ param id [ required ] License id */ public void serviceName_spla_id_PUT ( String serviceName , Long id , OvhSpla body ) throws IOException { } }
String qPath = "/dedicated/server/{serviceName}/spla/{id}" ; StringBuilder sb = path ( qPath , serviceName , id ) ; exec ( qPath , "PUT" , sb . toString ( ) , body ) ;
public class RestServerEndpoint { /** * Creates the upload dir if needed . */ @ VisibleForTesting static void createUploadDir ( final Path uploadDir , final Logger log , final boolean initialCreation ) throws IOException { } }
if ( ! Files . exists ( uploadDir ) ) { if ( initialCreation ) { log . info ( "Upload directory {} does not exist. " + uploadDir ) ; } else { log . warn ( "Upload directory {} has been deleted externally. " + "Previously uploaded files are no longer available." , uploadDir ) ; } checkAndCreateUploadDir ( uploadDir , log ) ; }
public class druidGLexer { /** * $ ANTLR start " VALUES " */ public final void mVALUES ( ) throws RecognitionException { } }
try { int _type = VALUES ; int _channel = DEFAULT_TOKEN_CHANNEL ; // druidG . g : 585:17 : ( ( ' VALUES ' | ' values ' ) ) // druidG . g : 585:18 : ( ' VALUES ' | ' values ' ) { // druidG . g : 585:18 : ( ' VALUES ' | ' values ' ) int alt5 = 2 ; int LA5_0 = input . LA ( 1 ) ; if ( ( LA5_0 == 'V' ) ) { alt5 = 1 ; } else if ( ( LA5_0 == 'v' ) ) { alt5 = 2 ; } else { NoViableAltException nvae = new NoViableAltException ( "" , 5 , 0 , input ) ; throw nvae ; } switch ( alt5 ) { case 1 : // druidG . g : 585:19 : ' VALUES ' { match ( "VALUES" ) ; } break ; case 2 : // druidG . g : 585:28 : ' values ' { match ( "values" ) ; } break ; } } state . type = _type ; state . channel = _channel ; } finally { // do for sure before leaving }
public class Element { /** * Find elements that have attributes whose values match the supplied regular expression . * @ param key name of the attribute * @ param pattern compiled regular expression to match against attribute values * @ return elements that have attributes matching this regular expression */ public Elements getElementsByAttributeValueMatching ( String key , Pattern pattern ) { } }
return Collector . collect ( new Evaluator . AttributeWithValueMatching ( key , pattern ) , this ) ;
public class EventReaderFactory { /** * Validate input parameters . */ private void validate ( ) { } }
LibraryUtils . checkArgumentNotNull ( config , "Configuration is null." ) ; LibraryUtils . checkArgumentNotNull ( eventsProcessor , "Events Processor is null." ) ; LibraryUtils . checkArgumentNotNull ( sourceFilter , "Source Filter is null." ) ; LibraryUtils . checkArgumentNotNull ( eventFilter , "Event Filter is null." ) ; LibraryUtils . checkArgumentNotNull ( progressReporter , "Progress Reporter is null." ) ; LibraryUtils . checkArgumentNotNull ( exceptionHandler , "Exception Handler is null." ) ; LibraryUtils . checkArgumentNotNull ( s3Manager , "S3 Manager is null." ) ; LibraryUtils . checkArgumentNotNull ( sqsManager , "SQS Manager is null." ) ;
public class HtmlReport { /** * returns null if no failure */ private RunFailure getRunFailure ( RootMethodRunResult runResult ) { } }
if ( runResult == null || runResult . getRunFailures ( ) . size ( ) == 0 ) { return null ; // no failure } // multiple run failures in one test method are not supported yet return runResult . getRunFailures ( ) . get ( 0 ) ;
public class HashUtilities { /** * Generates a MD5 Hash for a specific byte [ ] * @ param input The byte [ ] to be converted into an MD5 hash . * @ return The MD5 Hash string of the input string . */ public static String generateMD5 ( final byte [ ] input ) { } }
try { final MessageDigest messageDigest = MessageDigest . getInstance ( "MD5" ) ; messageDigest . reset ( ) ; byte [ ] digest = messageDigest . digest ( input ) ; return new String ( Hex . encodeHex ( digest ) ) ; } catch ( Exception e ) { LOG . debug ( "An error occurred generating the MD5 Hash of the input string" , e ) ; return null ; }
public class MpscRelaxedArrayQueue { /** * Validate a producer claim to find out if is an overclaim ( beyond the producer limit ) . * @ return { @ code true } if the claim is valid , { @ code false } otherwise . */ private boolean validateProducerClaim ( final int activeCycleIndex , final long producerCycleClaim , final long cycleId , final int positionOnCycle , final int cycleLengthLog2 , final boolean slowProducer ) { } }
final long producerPosition = producerPosition ( positionOnCycle , cycleId , cycleLengthLog2 ) ; final long claimLimit = lvProducerLimit ( ) ; if ( producerPosition >= claimLimit ) { // it is really full ? if ( isFull ( producerPosition ) ) { return fixProducerOverClaim ( activeCycleIndex , producerCycleClaim , slowProducer ) ; } } return true ;
public class PrimaryBackupServerContext { /** * Registers message listeners . */ private void registerListeners ( ) { } }
protocol . registerExecuteHandler ( this :: execute ) ; protocol . registerBackupHandler ( this :: backup ) ; protocol . registerRestoreHandler ( this :: restore ) ; protocol . registerCloseHandler ( this :: close ) ; protocol . registerMetadataHandler ( this :: metadata ) ;
public class URI { /** * Set the raw - escaped fragment . * @ param escapedFragment the raw - escaped fragment * @ throws URIException escaped fragment not valid */ public void setRawFragment ( char [ ] escapedFragment ) throws URIException { } }
if ( escapedFragment == null || escapedFragment . length == 0 ) { _fragment = escapedFragment ; hash = 0 ; return ; } if ( ! validate ( escapedFragment , fragment ) ) { throw new URIException ( URIException . ESCAPING , "escaped fragment not valid" ) ; } _fragment = escapedFragment ; hash = 0 ;
public class MapPropertyGroup { /** * ~ - - - set methods - - - - - */ @ Override public void setGroupedProperties ( SecurityContext securityContext , PropertyMap source , GraphObject destination ) throws FrameworkException { } }
if ( source == null ) { for ( PropertyKey key : propertyKeys ) { destination . setProperty ( key , null ) ; } return ; } for ( Entry < PropertyKey , Object > entry : source . entrySet ( ) ) { destination . setProperty ( entry . getKey ( ) , entry . getValue ( ) ) ; }
public class DTMDefaultBase { /** * Given a node identity for an attribute , advance to the next attribute . * @ param identity int identity of the attribute node . This * < strong > must < / strong > be an attribute node . * @ return int DTM node - identity of the resolved attr , * or DTM . NULL to indicate none exists . */ protected int getNextAttributeIdentity ( int identity ) { } }
// Assume that attributes and namespace nodes immediately follow the element while ( DTM . NULL != ( identity = getNextNodeIdentity ( identity ) ) ) { int type = _type ( identity ) ; if ( type == DTM . ATTRIBUTE_NODE ) { return identity ; } else if ( type != DTM . NAMESPACE_NODE ) { break ; } } return DTM . NULL ;
public class QueryRunner { /** * Throws a new exception with a more informative error message . * @ param cause The original exception that will be chained to the new * exception when it ' s rethrown . * @ param sql The query that was executing when the exception happened . * @ param params The query replacement parameters ; < code > null < / code > is a * valid value to pass in . * @ throws java . sql . SQLException if a database access error occurs */ protected void rethrow ( SQLException cause , String sql , Object [ ] params ) throws SQLException { } }
StringBuffer msg = new StringBuffer ( cause . getMessage ( ) ) ; msg . append ( " Query: " ) ; msg . append ( sql ) ; msg . append ( " Parameters: " ) ; if ( params == null ) { msg . append ( "[]" ) ; } else { msg . append ( Arrays . asList ( params ) ) ; } SQLException e = new SQLException ( msg . toString ( ) , cause . getSQLState ( ) , cause . getErrorCode ( ) ) ; e . setNextException ( cause ) ; throw e ;
public class ApiClient { /** * Execute HTTP call and deserialize the HTTP response body into the given return type . * @ param returnType The return type used to deserialize HTTP response body * @ param < T > The return type corresponding to ( same with ) returnType * @ param call Call * @ return ApiResponse object containing response status , headers and * data , which is a Java object deserialized from response body and would be null * when returnType is null . * @ throws ApiException If fail to execute the call */ public < T > ApiResponse < T > execute ( Call call , Type returnType ) throws ApiException { } }
try { Response response = call . execute ( ) ; T data = handleResponse ( response , returnType ) ; return new ApiResponse < T > ( response . code ( ) , response . headers ( ) . toMultimap ( ) , data ) ; } catch ( IOException e ) { throw new ApiException ( e ) ; }
public class DatabaseAccountsInner { /** * Offline the specified region for the specified Azure Cosmos DB database account . * @ param resourceGroupName Name of an Azure resource group . * @ param accountName Cosmos DB database account name . * @ param region Cosmos DB region , with spaces between words and each word capitalized . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > offlineRegionAsync ( String resourceGroupName , String accountName , String region , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( offlineRegionWithServiceResponseAsync ( resourceGroupName , accountName , region ) , serviceCallback ) ;
public class StaticTypeCheckingVisitor { /** * Given a field node , checks if we are calling a private field from an inner class . */ private void checkOrMarkPrivateAccess ( Expression source , FieldNode fn ) { } }
if ( fn != null && Modifier . isPrivate ( fn . getModifiers ( ) ) && ( fn . getDeclaringClass ( ) != typeCheckingContext . getEnclosingClassNode ( ) || typeCheckingContext . getEnclosingClosure ( ) != null ) && fn . getDeclaringClass ( ) . getModule ( ) == typeCheckingContext . getEnclosingClassNode ( ) . getModule ( ) ) { addPrivateFieldOrMethodAccess ( source , fn . getDeclaringClass ( ) , StaticTypesMarker . PV_FIELDS_ACCESS , fn ) ; }
public class CommerceSubscriptionEntryLocalServiceWrapper { /** * Returns a range of all the commerce subscription entries . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link com . liferay . commerce . model . impl . CommerceSubscriptionEntryModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce subscription entries * @ param end the upper bound of the range of commerce subscription entries ( not inclusive ) * @ return the range of commerce subscription entries */ @ Override public java . util . List < com . liferay . commerce . model . CommerceSubscriptionEntry > getCommerceSubscriptionEntries ( int start , int end ) { } }
return _commerceSubscriptionEntryLocalService . getCommerceSubscriptionEntries ( start , end ) ;
public class ServletContextAccess { /** * / * ( non - Javadoc ) * @ see com . att . cadi . PropAccess # log ( com . att . cadi . Access . Level , java . lang . Object [ ] ) */ @ Override public void log ( Level level , Object ... elements ) { } }
if ( willLog ( level ) ) { StringBuilder sb = buildMsg ( level , elements ) ; context . log ( sb . toString ( ) ) ; }
public class SearchQuery { /** * Exports the whole query as a { @ link JsonObject } . * @ see # injectParams ( JsonObject ) for the part that deals with global parameters * @ see AbstractFtsQuery # injectParamsAndBoost ( JsonObject ) for the part that deals with the " query " entry */ public JsonObject export ( ) { } }
JsonObject result = JsonObject . create ( ) ; injectParams ( result ) ; JsonObject queryJson = JsonObject . create ( ) ; queryPart . injectParamsAndBoost ( queryJson ) ; return result . put ( "query" , queryJson ) ;
public class QueryParserKraken { /** * Parses an AND expression . */ private ExprKraken parseAndExpr ( ) { } }
// AndExpr oldAndExpr = _ andExpr ; AndExpr andExpr = new AndExpr ( ) ; // _ andExpr = andExpr ; andExpr . add ( parseNotExpr ( ) ) ; while ( true ) { Token token = scanToken ( ) ; switch ( token ) { case AND : andExpr . add ( parseNotExpr ( ) ) ; break ; default : _token = token ; // _ andExpr = oldAndExpr ; return andExpr . getSingleExpr ( ) ; } }
public class LinkedTransferQueue { /** * Version of xfer for poll ( ) and tryTransfer , which simplifies control paths both here and in * xfer */ private Object fulfill ( Object e ) { } }
boolean isData = e != null ; final PaddedAtomicReference < QNode > head = this . head ; final PaddedAtomicReference < QNode > tail = this . tail ; for ( ; ; ) { QNode t = tail . get ( ) ; QNode h = head . get ( ) ; if ( t != null && ( t == h || t . isData == isData ) ) { QNode last = t . next ; if ( t == tail . get ( ) ) { if ( last != null ) { tail . compareAndSet ( t , last ) ; } else { return null ; } } } else if ( h != null ) { QNode first = h . next ; if ( t == tail . get ( ) && first != null && advanceHead ( h , first ) ) { Object x = first . get ( ) ; if ( x != first && first . compareAndSet ( x , e ) ) { LockSupport . unpark ( first . waiter ) ; return isData ? e : x ; } } } }
public class Collections { /** * Reverses the order of the elements in the specified list . < p > * This method runs in linear time . * @ param list the list whose elements are to be reversed . * @ throws UnsupportedOperationException if the specified list or * its list - iterator does not support the < tt > set < / tt > operation . */ @ SuppressWarnings ( { } }
"rawtypes" , "unchecked" } ) public static void reverse ( List < ? > list ) { int size = list . size ( ) ; if ( size < REVERSE_THRESHOLD || list instanceof RandomAccess ) { for ( int i = 0 , mid = size >> 1 , j = size - 1 ; i < mid ; i ++ , j -- ) swap ( list , i , j ) ; } else { // instead of using a raw type here , it ' s possible to capture // the wildcard but it will require a call to a supplementary // private method ListIterator fwd = list . listIterator ( ) ; ListIterator rev = list . listIterator ( size ) ; for ( int i = 0 , mid = list . size ( ) >> 1 ; i < mid ; i ++ ) { Object tmp = fwd . next ( ) ; fwd . set ( rev . previous ( ) ) ; rev . set ( tmp ) ; } }
public class GetReservedInstancesExchangeQuoteRequest { /** * The configuration of the target Convertible Reserved Instance to exchange for your current Convertible Reserved * Instances . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setTargetConfigurations ( java . util . Collection ) } or { @ link # withTargetConfigurations ( java . util . Collection ) } * if you want to override the existing values . * @ param targetConfigurations * The configuration of the target Convertible Reserved Instance to exchange for your current Convertible * Reserved Instances . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetReservedInstancesExchangeQuoteRequest withTargetConfigurations ( TargetConfigurationRequest ... targetConfigurations ) { } }
if ( this . targetConfigurations == null ) { setTargetConfigurations ( new com . amazonaws . internal . SdkInternalList < TargetConfigurationRequest > ( targetConfigurations . length ) ) ; } for ( TargetConfigurationRequest ele : targetConfigurations ) { this . targetConfigurations . add ( ele ) ; } return this ;
public class StartContainersInner { /** * Starts the exec command for a specific container instance . * Starts the exec command for a specified container instance in a specified resource group and container group . * @ param resourceGroupName The name of the resource group . * @ param containerGroupName The name of the container group . * @ param containerName The name of the container instance . * @ param containerExecRequest The request for the exec command . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the ContainerExecResponseInner object */ public Observable < ContainerExecResponseInner > launchExecAsync ( String resourceGroupName , String containerGroupName , String containerName , ContainerExecRequest containerExecRequest ) { } }
return launchExecWithServiceResponseAsync ( resourceGroupName , containerGroupName , containerName , containerExecRequest ) . map ( new Func1 < ServiceResponse < ContainerExecResponseInner > , ContainerExecResponseInner > ( ) { @ Override public ContainerExecResponseInner call ( ServiceResponse < ContainerExecResponseInner > response ) { return response . body ( ) ; } } ) ;
public class CFFFontSubset { /** * The function builds the new output stream according to the subset process * @ param Font the font * @ return the subsetted font stream */ protected byte [ ] BuildNewFile ( int Font ) { } }
// Prepare linked list for new font components OutputList = new LinkedList ( ) ; // copy the header of the font CopyHeader ( ) ; // create a name index BuildIndexHeader ( 1 , 1 , 1 ) ; OutputList . addLast ( new UInt8Item ( ( char ) ( 1 + fonts [ Font ] . name . length ( ) ) ) ) ; OutputList . addLast ( new StringItem ( fonts [ Font ] . name ) ) ; // create the topdict Index BuildIndexHeader ( 1 , 2 , 1 ) ; OffsetItem topdictIndex1Ref = new IndexOffsetItem ( 2 ) ; OutputList . addLast ( topdictIndex1Ref ) ; IndexBaseItem topdictBase = new IndexBaseItem ( ) ; OutputList . addLast ( topdictBase ) ; // Initialize the Dict Items for later use OffsetItem charsetRef = new DictOffsetItem ( ) ; OffsetItem charstringsRef = new DictOffsetItem ( ) ; OffsetItem fdarrayRef = new DictOffsetItem ( ) ; OffsetItem fdselectRef = new DictOffsetItem ( ) ; OffsetItem privateRef = new DictOffsetItem ( ) ; // If the font is not CID create the following keys if ( ! fonts [ Font ] . isCID ) { // create a ROS key OutputList . addLast ( new DictNumberItem ( fonts [ Font ] . nstrings ) ) ; OutputList . addLast ( new DictNumberItem ( fonts [ Font ] . nstrings + 1 ) ) ; OutputList . addLast ( new DictNumberItem ( 0 ) ) ; OutputList . addLast ( new UInt8Item ( ( char ) 12 ) ) ; OutputList . addLast ( new UInt8Item ( ( char ) 30 ) ) ; // create a CIDCount key OutputList . addLast ( new DictNumberItem ( fonts [ Font ] . nglyphs ) ) ; OutputList . addLast ( new UInt8Item ( ( char ) 12 ) ) ; OutputList . addLast ( new UInt8Item ( ( char ) 34 ) ) ; // Sivan ' s comments // What about UIDBase ( 12,35 ) ? Don ' t know what is it . // I don ' t think we need FontName ; the font I looked at didn ' t have it . } // Go to the TopDict of the font being processed seek ( topdictOffsets [ Font ] ) ; // Run until the end of the TopDict while ( getPosition ( ) < topdictOffsets [ Font + 1 ] ) { int p1 = getPosition ( ) ; getDictItem ( ) ; int p2 = getPosition ( ) ; // The encoding key is disregarded since CID has no encoding if ( key == "Encoding" // These keys will be added manually by the process . || key == "Private" || key == "FDSelect" || key == "FDArray" || key == "charset" || key == "CharStrings" ) { } else { // OtherWise copy key " as is " to the output list OutputList . add ( new RangeItem ( buf , p1 , p2 - p1 ) ) ; } } // Create the FDArray , FDSelect , Charset and CharStrings Keys CreateKeys ( fdarrayRef , fdselectRef , charsetRef , charstringsRef ) ; // Mark the end of the top dict area OutputList . addLast ( new IndexMarkerItem ( topdictIndex1Ref , topdictBase ) ) ; // Copy the string index if ( fonts [ Font ] . isCID ) OutputList . addLast ( getEntireIndexRange ( stringIndexOffset ) ) ; // If the font is not CID we need to append new strings . // We need 3 more strings : Registry , Ordering , and a FontName for one FD . // The total length is at most " Adobe " + " Identity " + 63 = 76 else CreateNewStringIndex ( Font ) ; // copy the new subsetted global subroutine index OutputList . addLast ( new RangeItem ( new RandomAccessFileOrArray ( NewGSubrsIndex ) , 0 , NewGSubrsIndex . length ) ) ; // deal with fdarray , fdselect , and the font descriptors // If the font is CID : if ( fonts [ Font ] . isCID ) { // copy the FDArray , FDSelect , charset // Copy FDSelect // Mark the beginning OutputList . addLast ( new MarkerItem ( fdselectRef ) ) ; // If an FDSelect exists copy it if ( fonts [ Font ] . fdselectOffset >= 0 ) OutputList . addLast ( new RangeItem ( buf , fonts [ Font ] . fdselectOffset , fonts [ Font ] . FDSelectLength ) ) ; // Else create a new one else CreateFDSelect ( fdselectRef , fonts [ Font ] . nglyphs ) ; // Copy the Charset // Mark the beginning and copy entirely OutputList . addLast ( new MarkerItem ( charsetRef ) ) ; OutputList . addLast ( new RangeItem ( buf , fonts [ Font ] . charsetOffset , fonts [ Font ] . CharsetLength ) ) ; // Copy the FDArray // If an FDArray exists if ( fonts [ Font ] . fdarrayOffset >= 0 ) { // Mark the beginning OutputList . addLast ( new MarkerItem ( fdarrayRef ) ) ; // Build a new FDArray with its private dicts and their LSubrs Reconstruct ( Font ) ; } else // Else create a new one CreateFDArray ( fdarrayRef , privateRef , Font ) ; } // If the font is not CID else { // create FDSelect CreateFDSelect ( fdselectRef , fonts [ Font ] . nglyphs ) ; // recreate a new charset CreateCharset ( charsetRef , fonts [ Font ] . nglyphs ) ; // create a font dict index ( fdarray ) CreateFDArray ( fdarrayRef , privateRef , Font ) ; } // if a private dict exists insert its subsetted version if ( fonts [ Font ] . privateOffset >= 0 ) { // Mark the beginning of the private dict IndexBaseItem PrivateBase = new IndexBaseItem ( ) ; OutputList . addLast ( PrivateBase ) ; OutputList . addLast ( new MarkerItem ( privateRef ) ) ; OffsetItem Subr = new DictOffsetItem ( ) ; // Build and copy the new private dict CreateNonCIDPrivate ( Font , Subr ) ; // Copy the new LSubrs index CreateNonCIDSubrs ( Font , PrivateBase , Subr ) ; } // copy the charstring index OutputList . addLast ( new MarkerItem ( charstringsRef ) ) ; // Add the subsetted charstring OutputList . addLast ( new RangeItem ( new RandomAccessFileOrArray ( NewCharStringsIndex ) , 0 , NewCharStringsIndex . length ) ) ; // now create the new CFF font int [ ] currentOffset = new int [ 1 ] ; currentOffset [ 0 ] = 0 ; // Count and save the offset for each item Iterator listIter = OutputList . iterator ( ) ; while ( listIter . hasNext ( ) ) { Item item = ( Item ) listIter . next ( ) ; item . increment ( currentOffset ) ; } // Compute the Xref for each of the offset items listIter = OutputList . iterator ( ) ; while ( listIter . hasNext ( ) ) { Item item = ( Item ) listIter . next ( ) ; item . xref ( ) ; } int size = currentOffset [ 0 ] ; byte [ ] b = new byte [ size ] ; // Emit all the items into the new byte array listIter = OutputList . iterator ( ) ; while ( listIter . hasNext ( ) ) { Item item = ( Item ) listIter . next ( ) ; item . emit ( b ) ; } // Return the new stream return b ;
public class DeviceProxy { public List < PipeInfo > getPipeConfig ( String [ ] pipeNames ) throws DevFailed { } }
ArrayList < String > list = new ArrayList < String > ( pipeNames . length ) ; Collections . addAll ( list , pipeNames ) ; return getPipeConfig ( list ) ;
public class BaseRichMediaStudioCreative { /** * Sets the billingAttribute value for this BaseRichMediaStudioCreative . * @ param billingAttribute * The billing attribute associated with this creative . This attribute * is read only . */ public void setBillingAttribute ( com . google . api . ads . admanager . axis . v201805 . RichMediaStudioCreativeBillingAttribute billingAttribute ) { } }
this . billingAttribute = billingAttribute ;
public class SetTopBoxCreative { /** * Sets the licenseWindowEndDateTime value for this SetTopBoxCreative . * @ param licenseWindowEndDateTime * The date and time that this creative can no longer be served * from a local cable video - on - demand * server . This attribute is optional . */ public void setLicenseWindowEndDateTime ( com . google . api . ads . admanager . axis . v201808 . DateTime licenseWindowEndDateTime ) { } }
this . licenseWindowEndDateTime = licenseWindowEndDateTime ;
public class BaiduChannelResponseMarshaller { /** * Marshall the given parameter object . */ public void marshall ( BaiduChannelResponse baiduChannelResponse , ProtocolMarshaller protocolMarshaller ) { } }
if ( baiduChannelResponse == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( baiduChannelResponse . getApplicationId ( ) , APPLICATIONID_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getCreationDate ( ) , CREATIONDATE_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getCredential ( ) , CREDENTIAL_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getEnabled ( ) , ENABLED_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getHasCredential ( ) , HASCREDENTIAL_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getId ( ) , ID_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getIsArchived ( ) , ISARCHIVED_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getLastModifiedBy ( ) , LASTMODIFIEDBY_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getLastModifiedDate ( ) , LASTMODIFIEDDATE_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getPlatform ( ) , PLATFORM_BINDING ) ; protocolMarshaller . marshall ( baiduChannelResponse . getVersion ( ) , VERSION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FastList { /** * Adds specified object to the list and increments size * @ param object * the object to add to the list * @ throws NullPointerException * if a null object is supplied */ public synchronized int add ( E object ) { } }
if ( object == null ) throw new NullPointerException ( "FastList add called with null" ) ; if ( ( count + 1 ) >= maxCount ) resize ( capacity * 2 ) ; int initialAddIndex = addIndex ; // find right spot to add to - start with addIndex and look // for first open spot . give up if we get back to initialAddIndex while ( listElements [ addIndex ] != null ) { addIndex ++ ; if ( addIndex == capacity ) addIndex = 0 ; if ( addIndex == initialAddIndex ) { // should not happen - we should have resized if we needed more // capacity throw new RuntimeException ( "FastList out of space" ) ; } } count ++ ; listElements [ addIndex ] = object ; return addIndex ;
public class DeploymentGroupInfoMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeploymentGroupInfo deploymentGroupInfo , ProtocolMarshaller protocolMarshaller ) { } }
if ( deploymentGroupInfo == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deploymentGroupInfo . getApplicationName ( ) , APPLICATIONNAME_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getDeploymentGroupId ( ) , DEPLOYMENTGROUPID_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getDeploymentGroupName ( ) , DEPLOYMENTGROUPNAME_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getDeploymentConfigName ( ) , DEPLOYMENTCONFIGNAME_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getEc2TagFilters ( ) , EC2TAGFILTERS_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getOnPremisesInstanceTagFilters ( ) , ONPREMISESINSTANCETAGFILTERS_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getAutoScalingGroups ( ) , AUTOSCALINGGROUPS_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getServiceRoleArn ( ) , SERVICEROLEARN_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getTargetRevision ( ) , TARGETREVISION_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getTriggerConfigurations ( ) , TRIGGERCONFIGURATIONS_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getAlarmConfiguration ( ) , ALARMCONFIGURATION_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getAutoRollbackConfiguration ( ) , AUTOROLLBACKCONFIGURATION_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getDeploymentStyle ( ) , DEPLOYMENTSTYLE_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getBlueGreenDeploymentConfiguration ( ) , BLUEGREENDEPLOYMENTCONFIGURATION_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getLoadBalancerInfo ( ) , LOADBALANCERINFO_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getLastSuccessfulDeployment ( ) , LASTSUCCESSFULDEPLOYMENT_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getLastAttemptedDeployment ( ) , LASTATTEMPTEDDEPLOYMENT_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getEc2TagSet ( ) , EC2TAGSET_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getOnPremisesTagSet ( ) , ONPREMISESTAGSET_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getComputePlatform ( ) , COMPUTEPLATFORM_BINDING ) ; protocolMarshaller . marshall ( deploymentGroupInfo . getEcsServices ( ) , ECSSERVICES_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class LookupTable { /** * Remove the object from the LookupTable which is referenced by the * supplied Index value . * @ param theIndex * an int containing the index value of the element to remove * from the LookupTable * @ return the Object which was removed from the LookupTable - null if no * object was removed . */ public synchronized Object removeElement ( int theIndex ) { } }
if ( theIndex < 0 || theIndex > currentCapacity - 1 ) { throw new IllegalArgumentException ( "Index is out of range." ) ; } Object theElement = theElementArray [ theIndex ] ; if ( theElement != null ) { theElementArray [ theIndex ] = null ; elementCount -- ; occupiedSlots . clear ( theIndex ) ; } return theElement ;
public class AssimilateForeignLogging { /** * Assimilate a small set of logging frameworks . */ public static synchronized void assimilate ( ) { } }
if ( assimilated ) { return ; } assimilated = true ; // Assimilate java . util . logging final Logger rootLogger = LogManager . getLogManager ( ) . getLogger ( "" ) ; final Handler [ ] handlers = rootLogger . getHandlers ( ) ; if ( handlers != null ) { for ( Handler handler : handlers ) { rootLogger . removeHandler ( handler ) ; } } SLF4JBridgeHandler . install ( ) ; LoggerContext lc = ( LoggerContext ) LoggerFactory . getILoggerFactory ( ) ; lc . addListener ( new LevelChangePropagator ( ) ) ; try { lc . addListener ( new JMXConfigurator ( lc , ManagementFactory . getPlatformMBeanServer ( ) , new ObjectName ( "com.opentable.logging:name=LogbackConfig" ) ) ) ; } catch ( MalformedObjectNameException e ) { throw new RuntimeException ( e ) ; } LoggerFactory . getLogger ( AssimilateForeignLogging . class ) . info ( "java.util.logging was assimilated." ) ;
public class FSEditLog { /** * Retrieve the implementation class for a Journal scheme . * @ param conf The configuration to retrieve the information from * @ param uriScheme The uri scheme to look up . * @ return the class of the journal implementation * @ throws IllegalArgumentException if no class is configured for uri */ static Class < ? extends JournalManager > getJournalClass ( Configuration conf , String uriScheme ) { } }
String key = "dfs.name.edits.journal-plugin" + "." + uriScheme ; Class < ? extends JournalManager > clazz = null ; try { clazz = conf . getClass ( key , null , JournalManager . class ) ; } catch ( RuntimeException re ) { throw new IllegalArgumentException ( "Invalid class specified for " + uriScheme , re ) ; } if ( clazz == null ) { LOG . warn ( "No class configured for " + uriScheme + ", " + key + " is empty" ) ; throw new IllegalArgumentException ( "No class configured for " + uriScheme ) ; } return clazz ;
public class ChunkMapReader { /** * Before combining topics in a branch , ensure any descendant topicref with @ chunk and no @ href has a stub */ private void createChildTopicrefStubs ( final List < Element > topicrefs ) { } }
if ( ! topicrefs . isEmpty ( ) ) { for ( final Element currentElem : topicrefs ) { final String href = getValue ( currentElem , ATTRIBUTE_NAME_HREF ) ; final String chunk = getValue ( currentElem , ATTRIBUTE_NAME_CHUNK ) ; if ( href == null && chunk != null ) { generateStumpTopic ( currentElem ) ; } createChildTopicrefStubs ( getChildElements ( currentElem , MAP_TOPICREF ) ) ; } }
public class UpdateAppProfileRequest { /** * Builds a new update request using an existing AppProfile . * < p > This variant is recommended over { @ link # of ( String , String ) } because it provides optimistic * concurrency control using etags . */ public static UpdateAppProfileRequest of ( @ Nonnull AppProfile appProfile ) { } }
return new UpdateAppProfileRequest ( appProfile . getInstanceId ( ) , appProfile . getId ( ) , com . google . bigtable . admin . v2 . UpdateAppProfileRequest . newBuilder ( ) . setAppProfile ( appProfile . toProto ( ) ) ) ;
public class SegmentedJournal { /** * Opens a new Raft log reader with the given reader mode . * @ param index The index from which to begin reading entries . * @ param mode The mode in which to read entries . * @ return The Raft log reader . */ public SegmentedJournalReader < E > openReader ( long index , SegmentedJournalReader . Mode mode ) { } }
SegmentedJournalReader < E > reader = new SegmentedJournalReader < > ( this , index , mode ) ; readers . add ( reader ) ; return reader ;
public class R2RMLManager { /** * Get OBDA mapping body terms from R2RML TriplesMap * @ param tm * @ return * @ throws Exception */ private ImmutableList < TargetAtom > getMappingTripleAtoms ( TriplesMap tm ) throws Exception { } }
// the body to return ImmutableList . Builder < TargetAtom > bodyBuilder = ImmutableList . builder ( ) ; // get subject ImmutableTerm subjectAtom = r2rmlParser . getSubjectAtom ( tm ) ; // get any class predicates , construct atom Class ( subject ) , add to body List < ImmutableFunctionalTerm > classPredicates = r2rmlParser . getClassPredicates ( ) ; for ( ImmutableFunctionalTerm classPred : classPredicates ) { ImmutableTerm predFunction = termFactory . getImmutableUriTemplate ( termFactory . getConstantLiteral ( RDF . TYPE . toString ( ) ) ) ; ; bodyBuilder . add ( targetAtomFactory . getTripleTargetAtom ( subjectAtom , predFunction , classPred ) ) ; // objectAtom } for ( PredicateObjectMap pom : tm . getPredicateObjectMaps ( ) ) { // for each predicate object map // predicates that contain a variable are separately treated List < ImmutableFunctionalTerm > bodyURIPredicates = r2rmlParser . getBodyURIPredicates ( pom ) ; // get object atom ImmutableTerm objectAtom = r2rmlParser . getObjectAtom ( pom ) ; if ( objectAtom == null ) { // skip , object is a join continue ; } // treat predicates for ( ImmutableFunctionalTerm predFunction : bodyURIPredicates ) { bodyBuilder . add ( targetAtomFactory . getTripleTargetAtom ( subjectAtom , predFunction , objectAtom ) ) ; // objectAtom } } return bodyBuilder . build ( ) ;
public class SivMode { /** * First bit 1 , following bits 0. */ private static byte [ ] pad ( byte [ ] in ) { } }
final byte [ ] result = Arrays . copyOf ( in , 16 ) ; new ISO7816d4Padding ( ) . addPadding ( result , in . length ) ; return result ;
public class Scs_sqr { /** * Symbolic QR or LU ordering and analysis . * @ param order * ordering method to use ( 0 to 3) * @ param A * column - compressed matrix * @ param qr * analyze for QR if true or LU if false * @ return symbolic analysis for QR or LU , null on error */ public static Scss cs_sqr ( int order , Scs A , boolean qr ) { } }
int n , k , post [ ] ; Scss S ; boolean ok = true ; if ( ! Scs_util . CS_CSC ( A ) ) return ( null ) ; /* check inputs */ n = A . n ; S = new Scss ( ) ; /* allocate result S */ S . q = Scs_amd . cs_amd ( order , A ) ; /* fill - reducing ordering */ if ( order > 0 && S . q == null ) return ( null ) ; if ( qr ) /* QR symbolic analysis */ { Scs C = order > 0 ? Scs_permute . cs_permute ( A , null , S . q , false ) : A ; S . parent = Scs_etree . cs_etree ( C , true ) ; /* etree of C ' * C , where C = A ( : , q ) */ post = Scs_post . cs_post ( S . parent , n ) ; S . cp = Scs_counts . cs_counts ( C , S . parent , post , true ) ; /* col counts chol ( C ' * C ) */ ok = C != null && S . parent != null && S . cp != null && cs_vcount ( C , S ) ; if ( ok ) for ( S . unz = 0 , k = 0 ; k < n ; k ++ ) S . unz += S . cp [ k ] ; ok = ok && S . lnz >= 0 && S . unz >= 0 ; /* int overflow guard */ } else { S . unz = 4 * ( A . p [ n ] ) + n ; /* for LU factorization only , */ S . lnz = S . unz ; /* guess nnz ( L ) and nnz ( U ) */ } return ( ok ? S : null ) ; /* return result S */
public class WebDriverHelper { /** * Waits until an element will be displayed into the page . * @ param by * the method of identifying the element * @ param maximumSeconds * maximum number of methods to wait for the element to be * present */ public void waitForElementPresent ( final By by , final int maximumSeconds ) { } }
WebDriverWait wait = new WebDriverWait ( driver , maximumSeconds ) ; wait . until ( ExpectedConditions . presenceOfElementLocated ( ( by ) ) ) ;
public class AmazonRoute53Client { /** * Authorizes the AWS account that created a specified VPC to submit an < code > AssociateVPCWithHostedZone < / code > * request to associate the VPC with a specified hosted zone that was created by a different account . To submit a * < code > CreateVPCAssociationAuthorization < / code > request , you must use the account that created the hosted zone . * After you authorize the association , use the account that created the VPC to submit an * < code > AssociateVPCWithHostedZone < / code > request . * < note > * If you want to associate multiple VPCs that you created by using one account with a hosted zone that you created * by using a different account , you must submit one authorization request for each VPC . * < / note > * @ param createVPCAssociationAuthorizationRequest * A complex type that contains information about the request to authorize associating a VPC with your * private hosted zone . Authorization is only required when a private hosted zone and a VPC were created by * using different accounts . * @ return Result of the CreateVPCAssociationAuthorization operation returned by the service . * @ throws ConcurrentModificationException * Another user submitted a request to create , update , or delete the object at the same time that you did . * Retry the request . * @ throws TooManyVPCAssociationAuthorizationsException * You ' ve created the maximum number of authorizations that can be created for the specified hosted zone . To * authorize another VPC to be associated with the hosted zone , submit a * < code > DeleteVPCAssociationAuthorization < / code > request to remove an existing authorization . To get a list * of existing authorizations , submit a < code > ListVPCAssociationAuthorizations < / code > request . * @ throws NoSuchHostedZoneException * No hosted zone exists with the ID that you specified . * @ throws InvalidVPCIdException * The VPC ID that you specified either isn ' t a valid ID or the current account is not authorized to access * this VPC . * @ throws InvalidInputException * The input is not valid . * @ sample AmazonRoute53 . CreateVPCAssociationAuthorization * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53-2013-04-01 / CreateVPCAssociationAuthorization " * target = " _ top " > AWS API Documentation < / a > */ @ Override public CreateVPCAssociationAuthorizationResult createVPCAssociationAuthorization ( CreateVPCAssociationAuthorizationRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateVPCAssociationAuthorization ( request ) ;
public class Error { /** * Compose error message by inserting the strings in the add variables * in placeholders within the error message . The message string contains * $ $ markers for each context variable . Context variables are supplied in * the add parameter . ( by Loic Lefevre ) * @ param message message string * @ param add optional parameters * @ return an < code > HsqlException < / code > */ private static String insertStrings ( String message , Object [ ] add ) { } }
StringBuffer sb = new StringBuffer ( message . length ( ) + 32 ) ; int lastIndex = 0 ; int escIndex = message . length ( ) ; // removed test : i < add . length // because if mainErrorMessage is equal to " blabla $ $ " // then the statement escIndex = mainErrorMessage . length ( ) ; // is never reached ! ? ? ? for ( int i = 0 ; i < add . length ; i ++ ) { escIndex = message . indexOf ( MESSAGE_TAG , lastIndex ) ; if ( escIndex == - 1 ) { break ; } sb . append ( message . substring ( lastIndex , escIndex ) ) ; sb . append ( add [ i ] == null ? "null exception message" : add [ i ] . toString ( ) ) ; lastIndex = escIndex + MESSAGE_TAG . length ( ) ; } escIndex = message . length ( ) ; sb . append ( message . substring ( lastIndex , escIndex ) ) ; return sb . toString ( ) ;
public class ChunkStepControllerImpl { /** * Reads an item from the reader * @ return the item read */ private Object readItem ( ) { } }
Object itemRead = null ; try { currentChunkStatus . incrementItemsTouchedInCurrentChunk ( ) ; // call read listeners before and after the actual read for ( ItemReadListenerProxy readListenerProxy : itemReadListeners ) { readListenerProxy . beforeRead ( ) ; } itemRead = readerProxy . readItem ( ) ; for ( ItemReadListenerProxy readListenerProxy : itemReadListeners ) { readListenerProxy . afterRead ( itemRead ) ; } // itemRead = = null means we reached the end of // the readerProxy " resultset " if ( itemRead == null ) { currentChunkStatus . markReadNull ( ) ; currentChunkStatus . decrementItemsTouchedInCurrentChunk ( ) ; } } catch ( Exception e ) { runtimeStepExecution . setException ( e ) ; for ( ItemReadListenerProxy readListenerProxy : itemReadListeners ) { readListenerProxy . onReadError ( e ) ; } if ( ! currentChunkStatus . isRetryingAfterRollback ( ) ) { if ( retryReadException ( e ) ) { if ( ! retryHandler . isRollbackException ( e ) ) { // retry without rollback itemRead = readItem ( ) ; } else { // retry with rollback currentChunkStatus . markForRollbackWithRetry ( e ) ; } } else if ( skipReadException ( e ) ) { currentItemStatus . setSkipped ( true ) ; runtimeStepExecution . getMetric ( MetricImpl . MetricType . READ_SKIP_COUNT ) . incValue ( ) ; } else { throw new BatchContainerRuntimeException ( e ) ; } } else { // coming from a rollback retry if ( skipReadException ( e ) ) { currentItemStatus . setSkipped ( true ) ; runtimeStepExecution . getMetric ( MetricImpl . MetricType . READ_SKIP_COUNT ) . incValue ( ) ; } else if ( retryReadException ( e ) ) { if ( ! retryHandler . isRollbackException ( e ) ) { // retry without rollback itemRead = readItem ( ) ; } else { // retry with rollback currentChunkStatus . markForRollbackWithRetry ( e ) ; } } else { throw new BatchContainerRuntimeException ( e ) ; } } } catch ( Throwable e ) { throw new BatchContainerRuntimeException ( e ) ; } logger . exiting ( sourceClass , "readItem" , itemRead == null ? "<null>" : itemRead ) ; return itemRead ;
public class RxFile { /** * Create a copy of the files found under the provided ArrayList of Uris , in the Library ' s cache folder . * The mime type of the resource will be determined by URLConnection . guessContentTypeFromName ( ) method . */ public static Observable < List < File > > createFileFromUri ( final Context context , final ArrayList < Uri > uris ) { } }
return createFileFromUri ( context , uris , MimeMap . UrlConnection ) ;
public class HashSet { /** * Save the state of this < tt > HashSet < / tt > instance to a stream ( that is , * serialize it ) . * @ serialData The capacity of the backing < tt > HashMap < / tt > instance * ( int ) , and its load factor ( float ) are emitted , followed by * the size of the set ( the number of elements it contains ) * ( int ) , followed by all of its elements ( each an Object ) in * no particular order . */ private void writeObject ( java . io . ObjectOutputStream s ) throws java . io . IOException { } }
// Write out any hidden serialization magic s . defaultWriteObject ( ) ; // Write out HashMap capacity and load factor s . writeInt ( map . capacity ( ) ) ; s . writeFloat ( map . loadFactor ( ) ) ; // Write out size s . writeInt ( map . size ( ) ) ; // Write out all elements in the proper order . for ( E e : map . keySet ( ) ) s . writeObject ( e ) ;
public class ProcessFaxClientSpi { /** * This function formats the provided template . * @ param template * The template * @ param faxJob * The fax job object * @ return The formatted template */ protected String formatTemplate ( String template , FaxJob faxJob ) { } }
return SpiUtil . formatTemplate ( template , faxJob , null , false , true ) ;
public class MarketApi { /** * List historical orders by a character List cancelled and expired market * orders placed by a character up to 90 days in the past . - - - This route is * cached for up to 3600 seconds SSO Scope : * esi - markets . read _ character _ orders . v1 * @ param characterId * An EVE character ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ return ApiResponse & lt ; List & lt ; CharacterOrdersHistoryResponse & gt ; & gt ; * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public ApiResponse < List < CharacterOrdersHistoryResponse > > getCharactersCharacterIdOrdersHistoryWithHttpInfo ( Integer characterId , String datasource , String ifNoneMatch , Integer page , String token ) throws ApiException { } }
com . squareup . okhttp . Call call = getCharactersCharacterIdOrdersHistoryValidateBeforeCall ( characterId , datasource , ifNoneMatch , page , token , null ) ; Type localVarReturnType = new TypeToken < List < CharacterOrdersHistoryResponse > > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class UpdateCenter { /** * Checks if the restart operation is scheduled * ( which means in near future Jenkins will restart by itself ) * @ see # isRestartRequiredForCompletion ( ) */ public boolean isRestartScheduled ( ) { } }
for ( UpdateCenterJob job : getJobs ( ) ) { if ( job instanceof RestartJenkinsJob ) { RestartJenkinsJob . RestartJenkinsJobStatus status = ( ( RestartJenkinsJob ) job ) . status ; if ( status instanceof RestartJenkinsJob . Pending || status instanceof RestartJenkinsJob . Running ) { return true ; } } } return false ;
public class TCPMemcachedNodeImpl { /** * ( non - Javadoc ) * @ see net . spy . memcached . MemcachedNode # setupResend ( ) */ public final void setupResend ( ) { } }
// First , reset the current write op , or cancel it if we should // be authenticating Operation op = getCurrentWriteOp ( ) ; if ( shouldAuth && op != null ) { op . cancel ( ) ; } else if ( op != null ) { ByteBuffer buf = op . getBuffer ( ) ; if ( buf != null ) { buf . reset ( ) ; } else { getLogger ( ) . info ( "No buffer for current write op, removing" ) ; removeCurrentWriteOp ( ) ; } } // Now cancel all the pending read operations . Might be better to // to requeue them . while ( hasReadOp ( ) ) { op = removeCurrentReadOp ( ) ; if ( op != getCurrentWriteOp ( ) ) { getLogger ( ) . warn ( "Discarding partially completed op: %s" , op ) ; op . cancel ( ) ; } } while ( shouldAuth && hasWriteOp ( ) ) { op = removeCurrentWriteOp ( ) ; getLogger ( ) . warn ( "Discarding partially completed op: %s" , op ) ; op . cancel ( ) ; } getWbuf ( ) . clear ( ) ; getRbuf ( ) . clear ( ) ; toWrite = 0 ;
public class AppEngineTaskQueue { /** * VisibleForTesting */ List < TaskHandle > addToQueue ( final Collection < Task > tasks ) { } }
List < TaskHandle > handles = new ArrayList < > ( ) ; Map < String , List < TaskOptions > > queueNameToTaskOptions = new HashMap < > ( ) ; for ( Task task : tasks ) { logger . finest ( "Enqueueing: " + task ) ; String queueName = task . getQueueSettings ( ) . getOnQueue ( ) ; TaskOptions taskOptions = toTaskOptions ( task ) ; List < TaskOptions > taskOptionsList = queueNameToTaskOptions . get ( queueName ) ; if ( taskOptionsList == null ) { taskOptionsList = new ArrayList < > ( ) ; queueNameToTaskOptions . put ( queueName , taskOptionsList ) ; } taskOptionsList . add ( taskOptions ) ; } for ( Map . Entry < String , List < TaskOptions > > entry : queueNameToTaskOptions . entrySet ( ) ) { Queue queue = getQueue ( entry . getKey ( ) ) ; handles . addAll ( addToQueue ( queue , entry . getValue ( ) ) ) ; } return handles ;
public class Validate { /** * Checks if the given object is < code > null < / code > * @ param o The object to validate . * @ throws ParameterException if the given object is < code > null < / code > . */ public static < O extends Object > void notNull ( O o ) { } }
if ( ! validation ) return ; if ( o == null ) throw new ParameterException ( ErrorCode . NULLPOINTER ) ;
public class LinkerDeclarationsManager { /** * Return a set of all the Declaration matching the DeclarationFilter of the . * Linker . * @ return a set of all the Declaration matching the DeclarationFilter of the * Linker . */ public Set < D > getMatchedDeclaration ( ) { } }
Set < D > bindedSet = new HashSet < D > ( ) ; for ( Map . Entry < ServiceReference < D > , Boolean > e : declarations . entrySet ( ) ) { if ( e . getValue ( ) ) { bindedSet . add ( getDeclaration ( e . getKey ( ) ) ) ; } } return bindedSet ;
public class CreateHITTypeRequest { /** * Conditions that a Worker ' s Qualifications must meet in order to accept the HIT . A HIT can have between zero and * ten Qualification requirements . All requirements must be met in order for a Worker to accept the HIT . * Additionally , other actions can be restricted using the < code > ActionsGuarded < / code > field on each * < code > QualificationRequirement < / code > structure . * @ param qualificationRequirements * Conditions that a Worker ' s Qualifications must meet in order to accept the HIT . A HIT can have between * zero and ten Qualification requirements . All requirements must be met in order for a Worker to accept the * HIT . Additionally , other actions can be restricted using the < code > ActionsGuarded < / code > field on each * < code > QualificationRequirement < / code > structure . */ public void setQualificationRequirements ( java . util . Collection < QualificationRequirement > qualificationRequirements ) { } }
if ( qualificationRequirements == null ) { this . qualificationRequirements = null ; return ; } this . qualificationRequirements = new java . util . ArrayList < QualificationRequirement > ( qualificationRequirements ) ;
public class UpdateApnsVoipChannelRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateApnsVoipChannelRequest updateApnsVoipChannelRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateApnsVoipChannelRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateApnsVoipChannelRequest . getAPNSVoipChannelRequest ( ) , APNSVOIPCHANNELREQUEST_BINDING ) ; protocolMarshaller . marshall ( updateApnsVoipChannelRequest . getApplicationId ( ) , APPLICATIONID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AloneBoltClientConnectionManager { /** * 关闭长连接 * @ param rpcClient bolt客户端 * @ param transportConfig 传输层配置 * @ param url 传输层地址 */ public void closeConnection ( RpcClient rpcClient , ClientTransportConfig transportConfig , Url url ) { } }
if ( rpcClient == null || transportConfig == null || url == null ) { return ; } // TODO do not close
public class MetricReducerOrMappingTransform { /** * Mapping a list of metric , only massage its datapoints . * @ param metrics The list of metrics to be mapped . constants The list of constants used for mapping * @ param constants constants input * @ return A list of metrics after mapping . */ protected List < Metric > mapping ( List < Metric > metrics , List < String > constants ) { } }
SystemAssert . requireArgument ( metrics != null , "Cannot transform null metrics" ) ; if ( metrics . isEmpty ( ) ) { return metrics ; } List < Metric > newMetricsList = new ArrayList < Metric > ( ) ; for ( Metric metric : metrics ) { metric . setDatapoints ( this . valueReducerOrMapping . mapping ( metric . getDatapoints ( ) , constants ) ) ; newMetricsList . add ( metric ) ; } return newMetricsList ;
public class JPAEMPool { /** * Prohibit access to factory properties via the pool . * @ throws UnsupportedOperationException */ @ Override public Map < String , Object > getProperties ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getProperties : " + this ) ; throw new UnsupportedOperationException ( "This operation is not supported on a pooling EntityManagerFactory." ) ;
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EEnum getPGPRGPgFlgs ( ) { } }
if ( pgprgPgFlgsEEnum == null ) { pgprgPgFlgsEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 127 ) ; } return pgprgPgFlgsEEnum ;
public class AbstractStaticSourceImporter { /** * This Method extracts the UUID from the source . * @ return UUID of the source */ @ Override protected UUID evalUUID ( ) { } }
UUID uuid = null ; final Pattern uuidPattern = Pattern . compile ( "@eFapsUUID[\\s]*[0-9a-z\\-]*" ) ; final Matcher uuidMatcher = uuidPattern . matcher ( getCode ( ) ) ; if ( uuidMatcher . find ( ) ) { final String uuidStr = uuidMatcher . group ( ) . replaceFirst ( "^@eFapsUUID" , "" ) ; uuid = UUID . fromString ( uuidStr . trim ( ) ) ; } return uuid ;
public class ListSkillsStoreSkillsByCategoryResult { /** * The skill store skills . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setSkillsStoreSkills ( java . util . Collection ) } or { @ link # withSkillsStoreSkills ( java . util . Collection ) } if * you want to override the existing values . * @ param skillsStoreSkills * The skill store skills . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListSkillsStoreSkillsByCategoryResult withSkillsStoreSkills ( SkillsStoreSkill ... skillsStoreSkills ) { } }
if ( this . skillsStoreSkills == null ) { setSkillsStoreSkills ( new java . util . ArrayList < SkillsStoreSkill > ( skillsStoreSkills . length ) ) ; } for ( SkillsStoreSkill ele : skillsStoreSkills ) { this . skillsStoreSkills . add ( ele ) ; } return this ;
public class CmsEncoder { /** * Creates a String out of a byte array with the specified encoding , falling back * to the system default in case the encoding name is not valid . < p > * Use this method as a replacement for < code > new String ( byte [ ] , encoding ) < / code > * to avoid possible encoding problems . < p > * @ param bytes the bytes to decode * @ param encoding the encoding scheme to use for decoding the bytes * @ return the bytes decoded to a String */ public static String createString ( byte [ ] bytes , String encoding ) { } }
String enc = encoding . intern ( ) ; if ( enc != OpenCms . getSystemInfo ( ) . getDefaultEncoding ( ) ) { enc = lookupEncoding ( enc , null ) ; } if ( enc != null ) { try { return new String ( bytes , enc ) ; } catch ( UnsupportedEncodingException e ) { // this can _ never _ happen since the charset was looked up first } } else { if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( Messages . get ( ) . getBundle ( ) . key ( Messages . ERR_UNSUPPORTED_VM_ENCODING_1 , encoding ) ) ; } enc = OpenCms . getSystemInfo ( ) . getDefaultEncoding ( ) ; try { return new String ( bytes , enc ) ; } catch ( UnsupportedEncodingException e ) { // this can also _ never _ happen since the default encoding is always valid } } // this code is unreachable in practice LOG . error ( Messages . get ( ) . getBundle ( ) . key ( Messages . ERR_ENCODING_ISSUES_1 , encoding ) ) ; return null ;
public class CmsSearchIndexTable { /** * Handles the table item clicks , including clicks on images inside of a table item . < p > * @ param event the click event * @ param itemId of the clicked row * @ param propertyId column id */ void onItemClick ( MouseEvents . ClickEvent event , Object itemId , Object propertyId ) { } }
if ( ! event . isCtrlKey ( ) && ! event . isShiftKey ( ) ) { changeValueIfNotMultiSelect ( itemId ) ; // don ' t interfere with multi - selection using control key if ( event . getButton ( ) . equals ( MouseButton . RIGHT ) || ( propertyId == null ) ) { m_menu . setEntries ( getMenuEntries ( ) , getSearchIndexNames ( ) ) ; m_menu . openForTable ( event , itemId , propertyId , this ) ; } else if ( event . getButton ( ) . equals ( MouseButton . LEFT ) && TableProperty . Name . equals ( propertyId ) ) { showSourcesWindow ( ( ( I_CmsSearchIndex ) ( ( Set < ? > ) getValue ( ) ) . iterator ( ) . next ( ) ) . getName ( ) ) ; } }
public class BeanMethodActionRule { /** * Adds a new argument rule with the specified name and returns it . * @ param argumentName the argument name * @ return the argument item rule */ public ItemRule newArgumentItemRule ( String argumentName ) { } }
ItemRule itemRule = new ItemRule ( ) ; itemRule . setName ( argumentName ) ; addArgumentItemRule ( itemRule ) ; return itemRule ;
public class DefaultHistoryReferencesTableModel { /** * Returns the history reference with the given ID . If the history reference is not found { @ code null } is returned . * @ param historyReferenceId the ID of the history reference that will be searched * @ return the history reference , or { @ code null } if not found */ public HistoryReference getHistoryReference ( int historyReferenceId ) { } }
DefaultHistoryReferencesTableEntry entry = getEntryWithHistoryId ( historyReferenceId ) ; if ( entry != null ) { return entry . getHistoryReference ( ) ; } return null ;
public class Try { /** * Convenience overload of { @ link Try # withResources ( CheckedSupplier , CheckedFn1 ) withResources } that cascades * dependent resource creation via nested calls . * @ param aSupplier the first resource supplier * @ param bFn the dependent resource function * @ param fn the function body * @ param < A > the first resource type * @ param < B > the second resource type * @ param < C > the function return type * @ return a { @ link Try } representing the result of the function ' s application to the dependent resource */ public static < A extends AutoCloseable , B extends AutoCloseable , C > Try < Exception , C > withResources ( CheckedSupplier < ? extends Exception , ? extends A > aSupplier , CheckedFn1 < ? extends Exception , ? super A , ? extends B > bFn , CheckedFn1 < ? extends Exception , ? super B , ? extends Try < ? extends Exception , ? extends C > > fn ) { } }
return withResources ( aSupplier , a -> withResources ( ( ) -> bFn . apply ( a ) , fn :: apply ) ) ;
public class Types { /** * Is t assignable to s ? < br > * Equivalent to subtype except for constant values and raw * types . < br > * ( not defined for Method and ForAll types ) */ public boolean isAssignable ( Type t , Type s , Warner warn ) { } }
if ( t . hasTag ( ERROR ) ) return true ; if ( t . getTag ( ) . isSubRangeOf ( INT ) && t . constValue ( ) != null ) { int value = ( ( Number ) t . constValue ( ) ) . intValue ( ) ; switch ( s . getTag ( ) ) { case BYTE : if ( Byte . MIN_VALUE <= value && value <= Byte . MAX_VALUE ) return true ; break ; case CHAR : if ( Character . MIN_VALUE <= value && value <= Character . MAX_VALUE ) return true ; break ; case SHORT : if ( Short . MIN_VALUE <= value && value <= Short . MAX_VALUE ) return true ; break ; case INT : return true ; case CLASS : switch ( unboxedType ( s ) . getTag ( ) ) { case BYTE : case CHAR : case SHORT : return isAssignable ( t , unboxedType ( s ) , warn ) ; } break ; } } return isConvertible ( t , s , warn ) ;
public class ESInnerHitSerialThreadLocal { /** * 设置父子查询 , 子的类型信息和orm class对象 * @ param type * @ param refs */ public static void setESInnerTypeReferences ( String type , Class < ? > refs ) { } }
Map < String , ESClass > typeRefs = innerHitTypeLocalsByType . get ( ) ; if ( typeRefs == null ) { typeRefs = new HashMap < String , ESClass > ( ) ; innerHitTypeLocalsByType . set ( typeRefs ) ; } typeRefs . put ( type , new ESClassType ( refs ) ) ;
public class CommercePriceListAccountRelUtil { /** * Returns the commerce price list account rel where commerceAccountId = & # 63 ; and commercePriceListId = & # 63 ; or throws a { @ link NoSuchPriceListAccountRelException } if it could not be found . * @ param commerceAccountId the commerce account ID * @ param commercePriceListId the commerce price list ID * @ return the matching commerce price list account rel * @ throws NoSuchPriceListAccountRelException if a matching commerce price list account rel could not be found */ public static CommercePriceListAccountRel findByC_C ( long commerceAccountId , long commercePriceListId ) throws com . liferay . commerce . price . list . exception . NoSuchPriceListAccountRelException { } }
return getPersistence ( ) . findByC_C ( commerceAccountId , commercePriceListId ) ;
public class SessionDialog { /** * Reset the UI shared Context copies . The effect is that previous copies are discarded and new * copies are created . * @ param session the session */ public void recreateUISharedContexts ( Session session ) { } }
uiContexts . clear ( ) ; for ( Context context : session . getContexts ( ) ) { Context uiContext = context . duplicate ( ) ; uiContexts . put ( context . getIndex ( ) , uiContext ) ; }
public class SubnetsInner { /** * Gets all subnets in a virtual network . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; SubnetInner & gt ; object */ public Observable < Page < SubnetInner > > listNextAsync ( final String nextPageLink ) { } }
return listNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < SubnetInner > > , Page < SubnetInner > > ( ) { @ Override public Page < SubnetInner > call ( ServiceResponse < Page < SubnetInner > > response ) { return response . body ( ) ; } } ) ;
public class TagResourceRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( TagResourceRequest tagResourceRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( tagResourceRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( tagResourceRequest . getArn ( ) , ARN_BINDING ) ; protocolMarshaller . marshall ( tagResourceRequest . getTags ( ) , TAGS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class InitialMatching0 { /** * Sets a { @ link Supplier } to execute if this matches . */ public < R > FluentMatchingR < T , R > get ( Supplier < R > supplier ) { } }
return get ( new FluentMatchingR < > ( value ) , supplier ) ;
public class QueryFormat { /** * - - - - - < QueryNodeVisitor interface > - - - - - */ public Object visit ( QueryRootNode node , Object data ) throws RepositoryException { } }
StringBuilder sb = ( StringBuilder ) data ; try { sb . append ( "SELECT" ) ; InternalQName [ ] selectProps = node . getSelectProperties ( ) ; if ( selectProps . length == 0 ) { sb . append ( " *" ) ; } else { String comma = "" ; for ( int i = 0 ; i < selectProps . length ; i ++ ) { sb . append ( comma ) . append ( " " ) ; appendName ( selectProps [ i ] , resolver , sb ) ; comma = "," ; } } sb . append ( " FROM" ) ; // node type restrictions are within predicates of location nodes // therefore we write the where clause first to a temp string to // collect the node types . StringBuilder tmp = new StringBuilder ( ) ; LocationStepQueryNode [ ] steps = node . getLocationNode ( ) . getPathSteps ( ) ; QueryNode [ ] predicates = steps [ steps . length - 1 ] . getPredicates ( ) ; // are there any relevant predicates ? for ( int i = 0 ; i < predicates . length ; i ++ ) { if ( predicates [ i ] . getType ( ) != QueryNode . TYPE_NODETYPE ) { tmp . append ( " WHERE " ) ; } } String and = "" ; for ( int i = 0 ; i < predicates . length ; i ++ ) { if ( predicates [ i ] . getType ( ) != QueryNode . TYPE_NODETYPE ) { tmp . append ( and ) ; and = " AND " ; } predicates [ i ] . accept ( this , tmp ) ; } // node types have been collected by now String comma = "" ; int ntCount = 0 ; for ( Iterator < InternalQName > it = nodeTypes . iterator ( ) ; it . hasNext ( ) ; ntCount ++ ) { InternalQName nt = it . next ( ) ; sb . append ( comma ) . append ( " " ) ; appendName ( nt , resolver , sb ) ; comma = "," ; } if ( ntCount == 0 ) { sb . append ( " " ) ; sb . append ( resolver . createJCRName ( Constants . NT_BASE ) . getAsString ( ) ) ; } // append WHERE clause sb . append ( tmp . toString ( ) ) ; if ( steps . length == 2 && steps [ 1 ] . getIncludeDescendants ( ) && steps [ 1 ] . getNameTest ( ) == null ) { // then this query selects all paths } else if ( steps . length == 1 && steps [ 0 ] . getIncludeDescendants ( ) && steps [ 0 ] . getNameTest ( ) == null ) { // then this query selects all paths } else { if ( predicates . length > 0 ) { sb . append ( " AND " ) ; } else { sb . append ( " WHERE " ) ; } node . getLocationNode ( ) . accept ( this , sb ) ; } } catch ( NamespaceException e ) { exceptions . add ( e ) ; } if ( node . getOrderNode ( ) != null ) { node . getOrderNode ( ) . accept ( this , sb ) ; } return sb ;
public class MongoDBClient { /** * Executes on list of entities to be persisted . * @ param collections * collection containing list of db objects . * @ param entity * entity in question . * @ param id * entity id . * @ param metadata * entity metadata * @ param relationHolders * relation holders . * @ param isUpdate * if it is an update * @ return collection of DB objects . */ private Map < String , List < DBObject > > onPersist ( Map < String , List < DBObject > > collections , Object entity , Object id , EntityMetadata metadata , List < RelationHolder > relationHolders , boolean isUpdate ) { } }
persistenceUnit = metadata . getPersistenceUnit ( ) ; Map < String , DBObject > documents = handler . getDocumentFromEntity ( metadata , entity , relationHolders , kunderaMetadata ) ; if ( isUpdate ) { for ( String documentName : documents . keySet ( ) ) { BasicDBObject query = new BasicDBObject ( ) ; MetamodelImpl metaModel = ( MetamodelImpl ) kunderaMetadata . getApplicationMetadata ( ) . getMetamodel ( metadata . getPersistenceUnit ( ) ) ; if ( metaModel . isEmbeddable ( metadata . getIdAttribute ( ) . getBindableJavaType ( ) ) ) { MongoDBUtils . populateCompoundKey ( query , metadata , metaModel , id ) ; } else { query . put ( "_id" , MongoDBUtils . populateValue ( id , id . getClass ( ) ) ) ; } DBCollection dbCollection = mongoDb . getCollection ( documentName ) ; KunderaCoreUtils . printQuery ( "Persist collection:" + documentName , showQuery ) ; dbCollection . save ( documents . get ( documentName ) , getWriteConcern ( ) ) ; } } else { for ( String documentName : documents . keySet ( ) ) { // a db collection can have multiple records . . // and we can have a collection of records as well . List < DBObject > dbStatements = null ; if ( collections . containsKey ( documentName ) ) { dbStatements = collections . get ( documentName ) ; dbStatements . add ( documents . get ( documentName ) ) ; } else { dbStatements = new ArrayList < DBObject > ( ) ; dbStatements . add ( documents . get ( documentName ) ) ; collections . put ( documentName , dbStatements ) ; } } } return collections ;
public class ServicesInner { /** * Delete DMS Service Instance . * The services resource is the top - level resource that represents the Data Migration Service . The DELETE method deletes a service . Any running tasks will be canceled . * @ param groupName Name of the resource group * @ param serviceName Name of the service * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ApiErrorException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void delete ( String groupName , String serviceName ) { } }
deleteWithServiceResponseAsync ( groupName , serviceName ) . toBlocking ( ) . last ( ) . body ( ) ;
public class DateUtils { /** * 不考虑时间求天数差 * @ param startDate 开始时间 * @ param endDate 结束时间 */ public static long getDaysBetweenIgnoreTime ( Date startDate , Date endDate ) { } }
startDate = getRoundedDay ( startDate ) ; endDate = getNextDay ( endDate ) ; return getBetweenDate ( startDate , endDate ) ;
public class SS { /** * Returns an < code > AddAction < / code > for building expression that would * append the specified values to this string set ; or if the attribute does * not already exist , add the new attribute and the value ( s ) to the item . * In general , DynamoDB recommends using SET rather than ADD . */ public AddAction append ( String ... values ) { } }
return new AddAction ( this , new LiteralOperand ( new LinkedHashSet < String > ( Arrays . asList ( values ) ) ) ) ;
public class SgUtils { /** * Checks if the modifiers are valid for a class . If any of the modifiers is * not valid an < code > IllegalArgumentException < / code > is thrown . * @ param modifiers * Modifiers . * @ param isInterface * Are the modifiers from an interface ? * @ param isInnerClass * Is it an inner class ? */ public static void checkClassModifiers ( final int modifiers , final boolean isInterface , final boolean isInnerClass ) { } }
// Basic checks final int type ; if ( isInterface ) { if ( isInnerClass ) { type = INNER_INTERFACE ; } else { type = OUTER_INTERFACE ; } } else { if ( isInnerClass ) { type = INNER_CLASS ; } else { type = OUTER_CLASS ; } } checkModifiers ( type , modifiers ) ; // Abstract and final check if ( Modifier . isAbstract ( modifiers ) && Modifier . isFinal ( modifiers ) ) { throw new IllegalArgumentException ( CLASS_ABSTRACT_AND_FINAL_ERROR + " [" + Modifier . toString ( modifiers ) + "]" ) ; }
public class RequestSignerRegistry { /** * Register an requestSigner * @ param requestSigner * an instance of an RequestSigner to use for a given type * @ param signerType * the type of requestSigner that this instance will be used for */ public RequestSignerRegistry register ( RequestSigner requestSigner , Class < ? extends RequestSigner > signerType ) { } }
Map < Class < ? extends RequestSigner > , RequestSigner > registeredSigners = new HashMap < > ( ) ; registeredSigners . putAll ( signerForType ) ; registeredSigners . put ( signerType , requestSigner ) ; return new RequestSignerRegistry ( registeredSigners ) ;
public class AmazonHttpClient { /** * Ensures the response handler is not null . If it is this method returns a dummy response * handler . * @ return Either original response handler or dummy response handler . */ private < T > HttpResponseHandler < T > getNonNullResponseHandler ( HttpResponseHandler < T > responseHandler ) { } }
if ( responseHandler != null ) { return responseHandler ; } else { // Return a Dummy , No - Op handler return new HttpResponseHandler < T > ( ) { @ Override public T handle ( HttpResponse response ) throws Exception { return null ; } @ Override public boolean needsConnectionLeftOpen ( ) { return false ; } } ; }