signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
|---|---|
public class Record { /** * Write this record and re - read if ( if it has been modified ) .
* @ return the bookmark . */
public Object writeAndRefresh ( boolean bRefreshDataIfNoMods ) throws DBException { } }
|
if ( ! this . isModified ( ) ) { if ( ( this . getEditMode ( ) == DBConstants . EDIT_IN_PROGRESS ) || ( this . getEditMode ( ) == DBConstants . EDIT_CURRENT ) ) { boolean bLocked = ( this . getEditMode ( ) == DBConstants . EDIT_IN_PROGRESS ) ; Object bookmark = this . getHandle ( DBConstants . BOOKMARK_HANDLE ) ; if ( bRefreshDataIfNoMods ) { this . setHandle ( bookmark , DBConstants . BOOKMARK_HANDLE ) ; if ( bLocked ) this . edit ( ) ; } return bookmark ; } else return null ; } Object bookmark = null ; if ( ( this . getEditMode ( ) == DBConstants . EDIT_IN_PROGRESS ) || ( this . getEditMode ( ) == DBConstants . EDIT_CURRENT ) ) { if ( this . getEditMode ( ) == DBConstants . EDIT_CURRENT ) { // HACK - It should have been locked already . . . especially if there were any changes
BaseBuffer buffer = null ; if ( this . isModified ( ) ) { buffer = new VectorBuffer ( null , BaseBuffer . ALL_FIELDS | BaseBuffer . MODIFIED_ONLY ) ; buffer . fieldsToBuffer ( this ) ; } this . edit ( ) ; if ( buffer != null ) { boolean [ ] rgListeners = this . setEnableListeners ( false ) ; Object [ ] rgFieldListeners = this . setEnableFieldListeners ( false ) ; buffer . bufferToFields ( this , DBConstants . DONT_DISPLAY , DBConstants . READ_MOVE ) ; buffer . free ( ) ; this . setEnableFieldListeners ( rgFieldListeners ) ; this . setEnableListeners ( rgListeners ) ; } } bookmark = this . getHandle ( DBConstants . BOOKMARK_HANDLE ) ; int iOldOpenMode = this . getOpenMode ( ) ; this . setOpenMode ( iOldOpenMode | DBConstants . OPEN_DONT_CHANGE_CURRENT_LOCK_TYPE ) ; // Don ' t relinquish my lock
this . set ( ) ; this . setHandle ( bookmark , DBConstants . BOOKMARK_HANDLE ) ; this . setOpenMode ( iOldOpenMode ) ; this . edit ( ) ; } else if ( this . getEditMode ( ) == DBConstants . EDIT_ADD ) { this . add ( ) ; bookmark = this . getLastModified ( DBConstants . BOOKMARK_HANDLE ) ; this . setHandle ( bookmark , DBConstants . BOOKMARK_HANDLE ) ; this . edit ( ) ; } return bookmark ;
|
public class Bugsnag { /** * Set the endpoint to deliver Bugsnag errors report to . This is a convenient
* shorthand for bugsnag . getDelivery ( ) . setEndpoint ( ) ;
* @ param endpoint the endpoint to send reports to
* @ see # setDelivery
* @ deprecated use { @ link Configuration # setEndpoints ( String , String ) } instead */
@ Deprecated public void setEndpoint ( String endpoint ) { } }
|
if ( config . delivery instanceof HttpDelivery ) { ( ( HttpDelivery ) config . delivery ) . setEndpoint ( endpoint ) ; }
|
public class PeerJournalStream { /** * write the contents of a journal message . */
@ Override public void write ( byte [ ] buffer , int offset , int length ) { } }
|
try { OutputStream os = _os ; if ( os != null ) { os . write ( buffer , offset , length ) ; } } catch ( Exception e ) { log . log ( Level . FINER , e . toString ( ) , e ) ; }
|
public class TypeChecker { /** * Creates a recursing type checker for a { @ link java . util . Map } .
* @ param keyChecker The typechecker to check the keys with
* @ param valueChecker The typechecker to check the values with
* @ return a typechecker for a Map containing keys and values passing the specified type checkers */
public static < K , V > TypeChecker < Map < ? extends K , ? extends V > > tMap ( TypeChecker < ? extends K > keyChecker , TypeChecker < ? extends V > valueChecker ) { } }
|
return new MapTypeChecker < > ( Map . class , keyChecker , valueChecker ) ;
|
public class ItemsUnion { /** * Serialize this union to a byte array . Result is an ItemsSketch , serialized in an
* unordered , non - compact form . The resulting byte [ ] can be passed to getInstance for either a
* sketch or union .
* @ param serDe an instance of ArrayOfItemsSerDe
* @ return byte array of this union */
public byte [ ] toByteArray ( final ArrayOfItemsSerDe < T > serDe ) { } }
|
if ( gadget_ == null ) { final ItemsSketch < T > sketch = ItemsSketch . getInstance ( maxK_ , comparator_ ) ; return sketch . toByteArray ( serDe ) ; } return gadget_ . toByteArray ( serDe ) ;
|
public class DiscordianDate { /** * Obtains the current { @ code DiscordianDate } from the specified clock .
* This will query the specified clock to obtain the current date - today .
* Using this method allows the use of an alternate clock for testing .
* The alternate clock may be introduced using { @ linkplain Clock dependency injection } .
* @ param clock the clock to use , not null
* @ return the current date , not null
* @ throws DateTimeException if the current date cannot be obtained */
public static DiscordianDate now ( Clock clock ) { } }
|
LocalDate now = LocalDate . now ( clock ) ; return DiscordianDate . ofEpochDay ( now . toEpochDay ( ) ) ;
|
public class AsyncHbaseSchemaService { /** * Check if we can perform a faster scan . We can only perform a faster scan when we are trying to discover scopes or metrics
* without having information on any other fields . */
private boolean _canSkipWhileScanning ( MetricSchemaRecordQuery query , RecordType type ) { } }
|
if ( ( RecordType . METRIC . equals ( type ) || RecordType . SCOPE . equals ( type ) ) && ! SchemaService . containsFilter ( query . getTagKey ( ) ) && ! SchemaService . containsFilter ( query . getTagValue ( ) ) && ! SchemaService . containsFilter ( query . getNamespace ( ) ) ) { if ( RecordType . METRIC . equals ( type ) && ! SchemaService . containsFilter ( query . getMetric ( ) ) ) { return false ; } if ( RecordType . SCOPE . equals ( type ) && ! SchemaService . containsFilter ( query . getScope ( ) ) ) { return false ; } return true ; } return false ;
|
public class Sha1 { /** * Reset athen initialize the digest context .
* Overrides the protected abstract method of
* < code > java . security . MessageDigestSpi < / code > . */
protected void engineReset ( ) { } }
|
int i = 60 ; do { pad [ i ] = ( byte ) 0x00 ; pad [ i + 1 ] = ( byte ) 0x00 ; pad [ i + 2 ] = ( byte ) 0x00 ; pad [ i + 3 ] = ( byte ) 0x00 ; } while ( ( i -= 4 ) >= 0 ) ; padding = 0 ; bytes = 0 ; init ( ) ;
|
public class DefaultQueryAction { /** * Add from and size to the ES query based on the ' LIMIT ' clause
* @ param from
* starts from document at position from
* @ param size
* number of documents to return . */
private void setLimit ( int from , int size ) { } }
|
request . setFrom ( from ) ; if ( size > - 1 ) { request . setSize ( size ) ; }
|
public class CommerceAccountUserRelPersistenceImpl { /** * Returns a range of all the commerce account user rels where commerceAccountId = & # 63 ; .
* Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceAccountUserRelModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order .
* @ param commerceAccountId the commerce account ID
* @ param start the lower bound of the range of commerce account user rels
* @ param end the upper bound of the range of commerce account user rels ( not inclusive )
* @ return the range of matching commerce account user rels */
@ Override public List < CommerceAccountUserRel > findByCommerceAccountId ( long commerceAccountId , int start , int end ) { } }
|
return findByCommerceAccountId ( commerceAccountId , start , end , null ) ;
|
public class ArgP { /** * Returns a usage string . */
public String usage ( ) { } }
|
final StringBuilder buf = new StringBuilder ( 16 * options . size ( ) ) ; addUsageTo ( buf ) ; return buf . toString ( ) ;
|
public class DisplayUtil { /** * Returns the width of the device ' s display .
* @ param context
* The context , which should be used , as an instance of the class { @ link Context } . The
* context may not be null
* @ return The width of the device ' s display in pixels as an { @ link Integer } value */
public static int getDisplayWidth ( @ NonNull final Context context ) { } }
|
Condition . INSTANCE . ensureNotNull ( context , "The context may not be null" ) ; return context . getResources ( ) . getDisplayMetrics ( ) . widthPixels ;
|
public class SeimiCrawlerBootstrapListener { /** * Handle an application event .
* @ param event the event to respond to */
@ Override public void onApplicationEvent ( ContextRefreshedEvent event ) { } }
|
ApplicationContext context = event . getApplicationContext ( ) ; if ( isSpringBoot ) { CrawlerProperties crawlerProperties = context . getBean ( CrawlerProperties . class ) ; if ( ! crawlerProperties . isEnabled ( ) ) { logger . warn ( "{} is not enabled" , Constants . SEIMI_CRAWLER_BOOTSTRAP_ENABLED ) ; return ; } } if ( context != null ) { if ( CollectionUtils . isEmpty ( CrawlerCache . getCrawlers ( ) ) ) { logger . info ( "Not find any crawler,may be you need to check." ) ; return ; } workersPool = Executors . newFixedThreadPool ( Constants . BASE_THREAD_NUM * Runtime . getRuntime ( ) . availableProcessors ( ) * CrawlerCache . getCrawlers ( ) . size ( ) ) ; for ( Class < ? extends BaseSeimiCrawler > a : CrawlerCache . getCrawlers ( ) ) { CrawlerModel crawlerModel = new CrawlerModel ( a , context ) ; if ( CrawlerCache . isExist ( crawlerModel . getCrawlerName ( ) ) ) { logger . error ( "Crawler:{} is repeated,please check" , crawlerModel . getCrawlerName ( ) ) ; throw new SeimiInitExcepiton ( StrFormatUtil . info ( "Crawler:{} is repeated,please check" , crawlerModel . getCrawlerName ( ) ) ) ; } CrawlerCache . putCrawlerModel ( crawlerModel . getCrawlerName ( ) , crawlerModel ) ; } for ( Map . Entry < String , CrawlerModel > crawlerEntry : CrawlerCache . getCrawlerModelContext ( ) . entrySet ( ) ) { for ( int i = 0 ; i < Constants . BASE_THREAD_NUM * Runtime . getRuntime ( ) . availableProcessors ( ) ; i ++ ) { workersPool . execute ( new SeimiProcessor ( CrawlerCache . getInterceptors ( ) , crawlerEntry . getValue ( ) ) ) ; } } if ( isSpringBoot ) { CrawlerProperties crawlerProperties = context . getBean ( CrawlerProperties . class ) ; String crawlerNames = crawlerProperties . getNames ( ) ; if ( StringUtils . isBlank ( crawlerNames ) ) { logger . info ( "Spring boot start [{}] as worker." , StringUtils . join ( CrawlerCache . getCrawlerModelContext ( ) . keySet ( ) , "," ) ) ; } else { String [ ] crawlers = crawlerNames . split ( "," ) ; for ( String cn : crawlers ) { CrawlerModel crawlerModel = CrawlerCache . getCrawlerModel ( cn ) ; if ( crawlerModel == null ) { logger . warn ( "Crawler name = {} is not existent." , cn ) ; continue ; } crawlerModel . startRequest ( ) ; } } // 统一通用配置信息至 seimiConfig
SeimiConfig config = new SeimiConfig ( ) ; config . setBloomFilterExpectedInsertions ( crawlerProperties . getBloomFilterExpectedInsertions ( ) ) ; config . setBloomFilterFalseProbability ( crawlerProperties . getBloomFilterFalseProbability ( ) ) ; config . setSeimiAgentHost ( crawlerProperties . getSeimiAgentHost ( ) ) ; config . setSeimiAgentPort ( crawlerProperties . getSeimiAgentPort ( ) ) ; CrawlerCache . setConfig ( config ) ; } }
|
public class CPDImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public void eUnset ( int featureID ) { } }
|
switch ( featureID ) { case AfplibPackage . CPD__CP_DESC : setCPDesc ( CP_DESC_EDEFAULT ) ; return ; case AfplibPackage . CPD__GCGID_LEN : setGCGIDLen ( GCGID_LEN_EDEFAULT ) ; return ; case AfplibPackage . CPD__NUM_CD_PTS : setNumCdPts ( NUM_CD_PTS_EDEFAULT ) ; return ; case AfplibPackage . CPD__GCSGID : setGCSGID ( GCSGID_EDEFAULT ) ; return ; case AfplibPackage . CPD__CPGID : setCPGID ( CPGID_EDEFAULT ) ; return ; case AfplibPackage . CPD__ENC_SCHEME : setEncScheme ( ENC_SCHEME_EDEFAULT ) ; return ; } super . eUnset ( featureID ) ;
|
public class JavaUtils { /** * Converts a JVM external name to a JVM signature name . An external name is
* that which is returned from { @ link Class # getName ( ) } A signature name is
* the name in class file format .
* For example :
* [ java . lang . Object
* becomes :
* [ Ljava / lang / Object ;
* @ param externalName
* @ return */
public static String toSignature ( String externalName ) { } }
|
if ( externalName == null ) return null ; String ret = primitiveNameDescriptors . get ( externalName ) ; if ( ret != null ) return ret ; ret = externalName . replace ( '.' , '/' ) ; return ( ret . charAt ( 0 ) == '[' ) ? ret : "L" + ret + ";" ;
|
public class WSManUtils { /** * Checks if a string is a valid UUID or not .
* @ param string The UUID value .
* @ return true if input is a valid UUID or false if input is empty or an invalid UUID format . */
public static boolean isUUID ( String string ) { } }
|
try { if ( string != null ) { UUID . fromString ( string ) ; return true ; } else { return false ; } } catch ( IllegalArgumentException ex ) { return false ; }
|
public class X509CertImpl { /** * Return an enumeration of names of attributes existing within this
* attribute . */
public Enumeration < String > getElements ( ) { } }
|
AttributeNameEnumeration elements = new AttributeNameEnumeration ( ) ; elements . addElement ( NAME + DOT + INFO ) ; elements . addElement ( NAME + DOT + ALG_ID ) ; elements . addElement ( NAME + DOT + SIGNATURE ) ; elements . addElement ( NAME + DOT + SIGNED_CERT ) ; return elements . elements ( ) ;
|
public class BoundingBox { /** * Transform the bounding box using the provided projection transform
* @ param transform
* projection transform
* @ return transformed bounding box
* @ since 3.0.0 */
public BoundingBox transform ( ProjectionTransform transform ) { } }
|
BoundingBox transformed = this ; if ( transform . isSameProjection ( ) ) { transformed = new BoundingBox ( transformed ) ; } else { if ( transform . getFromProjection ( ) . isUnit ( Units . DEGREES ) ) { transformed = TileBoundingBoxUtils . boundDegreesBoundingBoxWithWebMercatorLimits ( transformed ) ; } GeometryEnvelope envelope = buildEnvelope ( transformed ) ; GeometryEnvelope transformedEnvelope = transform . transform ( envelope ) ; transformed = new BoundingBox ( transformedEnvelope ) ; } return transformed ;
|
public class ExtendedByteBuf { /** * Reads optional range of bytes . Negative lengths are translated to None , 0 length represents empty Array */
public static Optional < byte [ ] > readOptRangedBytes ( ByteBuf bf ) { } }
|
int length = SignedNumeric . decode ( readUnsignedInt ( bf ) ) ; return length < 0 ? Optional . empty ( ) : Optional . of ( readRangedBytes ( bf , length ) ) ;
|
public class ReviewsImpl { /** * Use this method to add frames for a video review . Timescale : This parameter is a factor which is used to convert the timestamp on a frame into milliseconds . Timescale is provided in the output of the Content Moderator video media processor on the Azure Media Services platform . Timescale in the Video Moderation output is Ticks / Second .
* @ param teamName Your team name .
* @ param reviewId Id of the review .
* @ param contentType The content type .
* @ param videoFrameBody Body for add video frames API
* @ param addVideoFrameUrlOptionalParameter the object representing the optional parameters to be set before calling this API
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceResponse } object if successful . */
public Observable < Void > addVideoFrameUrlAsync ( String teamName , String reviewId , String contentType , List < VideoFrameBodyItem > videoFrameBody , AddVideoFrameUrlOptionalParameter addVideoFrameUrlOptionalParameter ) { } }
|
return addVideoFrameUrlWithServiceResponseAsync ( teamName , reviewId , contentType , videoFrameBody , addVideoFrameUrlOptionalParameter ) . map ( new Func1 < ServiceResponse < Void > , Void > ( ) { @ Override public Void call ( ServiceResponse < Void > response ) { return response . body ( ) ; } } ) ;
|
public class DiskCacheSizeInfo { /** * Call this method to reset the disk cache size info after disk clear */
public synchronized void reset ( ) { } }
|
final String methodName = "reset()" ; this . currentDataGB = 1 ; if ( this . currentDependencyIdGB > 0 ) { this . currentDependencyIdGB = 1 ; } if ( this . currentTemplateGB > 0 ) { this . currentTemplateGB = 1 ; } if ( this . diskCacheSizeInGBLimit > 0 ) { this . diskCacheSizeInBytesLimit = ( diskCacheSizeInGBLimit - this . currentDependencyIdGB - this . currentTemplateGB ) * GB_SIZE ; this . diskCacheSizeInBytesHighLimit = ( this . diskCacheSizeInBytesLimit * ( long ) this . highThreshold ) / 100l ; this . diskCacheSizeInBytesLowLimit = ( this . diskCacheSizeInBytesLimit * ( long ) this . lowThreshold ) / 100l ; traceDebug ( methodName , "cacheName=" + this . cacheName + " diskCacheSizeInBytesLimit=" + this . diskCacheSizeInBytesLimit + " diskCacheSizeInBytesHighLimit=" + this . diskCacheSizeInBytesHighLimit + " diskCacheSizeInBytesLowLimit=" + this . diskCacheSizeInBytesLowLimit + " currentDataGB=" + this . currentDataGB + " currentDependencyIdGB=" + this . currentDependencyIdGB + " currentTemplateGB=" + this . currentTemplateGB ) ; }
|
public class RecoveryMgr { /** * Rolls back the transaction . The method iterates through the log records ,
* calling { @ link LogRecord # undo ( Transaction ) } for each log record it finds
* for the transaction , until it finds the transaction ' s START record . */
void rollback ( Transaction tx ) { } }
|
ReversibleIterator < LogRecord > iter = new LogRecordIterator ( ) ; LogSeqNum txUnDoNextLSN = null ; while ( iter . hasNext ( ) ) { LogRecord rec = iter . next ( ) ; if ( rec . txNumber ( ) == txNum ) { if ( txUnDoNextLSN != null ) { if ( txUnDoNextLSN . compareTo ( rec . getLSN ( ) ) != 1 ) continue ; } if ( rec . op ( ) == OP_START ) return ; else if ( rec instanceof LogicalEndRecord ) { // Undo this Logical operation ;
rec . undo ( tx ) ; /* * Extract the logicalStartLSN form rec by casting it as a
* LogicalEndRecord */
LogSeqNum logicalStartLSN = ( ( LogicalEndRecord ) rec ) . getlogicalStartLSN ( ) ; /* * Save the Logical Start LSN to skip the log records
* between the end record and the start record */
txUnDoNextLSN = logicalStartLSN ; } else rec . undo ( tx ) ; } }
|
public class Person { /** * Reset a person . Meaning ti will be removed from all Caches .
* @ param _ key for the Person to be cleaned from Cache . UUID or Name .
* @ throws EFapsException the e faps exception */
public static void reset ( final String _key ) throws EFapsException { } }
|
final Person person ; if ( UUIDUtil . isUUID ( _key ) ) { person = Person . get ( UUID . fromString ( _key ) ) ; } else { person = Person . get ( _key ) ; } if ( person != null ) { InfinispanCache . get ( ) . < Long , Person > getCache ( Person . IDCACHE ) . remove ( person . getId ( ) ) ; InfinispanCache . get ( ) . < String , Person > getCache ( Person . NAMECACHE ) . remove ( person . getName ( ) ) ; if ( person . getUUID ( ) != null ) { InfinispanCache . get ( ) . < UUID , Person > getCache ( Person . UUIDCACHE ) . remove ( person . getUUID ( ) ) ; } }
|
public class GreedyEnsembleExperiment { /** * Build a single - element " ensemble " .
* @ param ensemble
* @ param vec */
protected void singleEnsemble ( final double [ ] ensemble , final NumberVector vec ) { } }
|
double [ ] buf = new double [ 1 ] ; for ( int i = 0 ; i < ensemble . length ; i ++ ) { buf [ 0 ] = vec . doubleValue ( i ) ; ensemble [ i ] = voting . combine ( buf , 1 ) ; if ( Double . isNaN ( ensemble [ i ] ) ) { LOG . warning ( "NaN after combining: " + FormatUtil . format ( buf ) + " " + voting . toString ( ) ) ; } } applyScaling ( ensemble , scaling ) ;
|
public class PendingWriteQueue { /** * Add the given { @ code msg } and { @ link ChannelPromise } . */
public void add ( Object msg , ChannelPromise promise ) { } }
|
assert ctx . executor ( ) . inEventLoop ( ) ; if ( msg == null ) { throw new NullPointerException ( "msg" ) ; } if ( promise == null ) { throw new NullPointerException ( "promise" ) ; } // It is possible for writes to be triggered from removeAndFailAll ( ) . To preserve ordering ,
// we should add them to the queue and let removeAndFailAll ( ) fail them later .
int messageSize = size ( msg ) ; PendingWrite write = PendingWrite . newInstance ( msg , messageSize , promise ) ; PendingWrite currentTail = tail ; if ( currentTail == null ) { tail = head = write ; } else { currentTail . next = write ; tail = write ; } size ++ ; bytes += messageSize ; tracker . incrementPendingOutboundBytes ( write . size ) ;
|
public class JobManagerRunner { public void start ( ) throws Exception { } }
|
try { leaderElectionService . start ( this ) ; } catch ( Exception e ) { log . error ( "Could not start the JobManager because the leader election service did not start." , e ) ; throw new Exception ( "Could not start the leader election service." , e ) ; }
|
public class PoolStatisticsImpl { /** * { @ inheritDoc } */
public void clear ( ) { } }
|
this . createdCount . set ( 0 ) ; this . destroyedCount . set ( 0 ) ; this . maxCreationTime . set ( Long . MIN_VALUE ) ; this . maxGetTime . set ( Long . MIN_VALUE ) ; this . maxPoolTime . set ( Long . MIN_VALUE ) ; this . maxUsageTime . set ( Long . MIN_VALUE ) ; this . maxUsedCount . set ( Integer . MIN_VALUE ) ; this . maxWaitTime . set ( Long . MIN_VALUE ) ; this . timedOut . set ( 0 ) ; this . totalBlockingTime . set ( 0L ) ; this . totalBlockingTimeInvocations . set ( 0L ) ; this . totalCreationTime . set ( 0L ) ; this . totalGetTime . set ( 0L ) ; this . totalGetTimeInvocations . set ( 0L ) ; this . totalPoolTime . set ( 0L ) ; this . totalPoolTimeInvocations . set ( 0L ) ; this . totalUsageTime . set ( 0L ) ; this . totalUsageTimeInvocations . set ( 0L ) ; this . inUseCount . set ( 0 ) ; this . blockingFailureCount . set ( 0 ) ; this . waitCount . set ( 0 ) ; this . commitCount = new AtomicLong ( 0L ) ; this . commitTotalTime = new AtomicLong ( 0L ) ; this . commitMaxTime = new AtomicLong ( 0L ) ; this . endCount = new AtomicLong ( 0L ) ; this . endTotalTime = new AtomicLong ( 0L ) ; this . endMaxTime = new AtomicLong ( 0L ) ; this . forgetCount = new AtomicLong ( 0L ) ; this . forgetTotalTime = new AtomicLong ( 0L ) ; this . forgetMaxTime = new AtomicLong ( 0L ) ; this . prepareCount = new AtomicLong ( 0L ) ; this . prepareTotalTime = new AtomicLong ( 0L ) ; this . prepareMaxTime = new AtomicLong ( 0L ) ; this . recoverCount = new AtomicLong ( 0L ) ; this . recoverTotalTime = new AtomicLong ( 0L ) ; this . recoverMaxTime = new AtomicLong ( 0L ) ; this . rollbackCount = new AtomicLong ( 0L ) ; this . rollbackTotalTime = new AtomicLong ( 0L ) ; this . rollbackMaxTime = new AtomicLong ( 0L ) ; this . startCount = new AtomicLong ( 0L ) ; this . startTotalTime = new AtomicLong ( 0L ) ; this . startMaxTime = new AtomicLong ( 0L ) ;
|
public class WebSockets { /** * Sends a complete ping message , invoking the callback when complete
* @ param data The data to send
* @ param wsChannel The web socket channel
* @ param callback The callback to invoke on completion */
public static void sendPing ( final ByteBuffer [ ] data , final WebSocketChannel wsChannel , final WebSocketCallback < Void > callback ) { } }
|
sendInternal ( mergeBuffers ( data ) , WebSocketFrameType . PING , wsChannel , callback , null , - 1 ) ;
|
public class CreateLayerRequest { /** * An array of < code > Package < / code > objects that describes the layer packages .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setPackages ( java . util . Collection ) } or { @ link # withPackages ( java . util . Collection ) } if you want to override
* the existing values .
* @ param packages
* An array of < code > Package < / code > objects that describes the layer packages .
* @ return Returns a reference to this object so that method calls can be chained together . */
public CreateLayerRequest withPackages ( String ... packages ) { } }
|
if ( this . packages == null ) { setPackages ( new com . amazonaws . internal . SdkInternalList < String > ( packages . length ) ) ; } for ( String ele : packages ) { this . packages . add ( ele ) ; } return this ;
|
public class IntervalExtensions { /** * Checks if the given time range is between the given time range to check
* @ param timeRange
* the time range
* @ param timeRangeToCheck
* the time range to check
* @ return true , if the given time range is between the given time range to check otherwise
* false */
public static boolean isBetween ( final Interval timeRange , final Interval timeRangeToCheck ) { } }
|
return ( ( timeRange . getStart ( ) != null && timeRange . getStart ( ) . isBefore ( timeRangeToCheck . getStart ( ) ) ) && ( timeRange . getEnd ( ) != null && timeRange . getEnd ( ) . isAfter ( timeRangeToCheck . getEnd ( ) ) ) ) ;
|
public class Statement { /** * Creates a code chunk that assigns and prints jsDoc above the assignment . */
public static Statement assign ( Expression lhs , Expression rhs , JsDoc jsDoc ) { } }
|
return Assignment . create ( lhs , rhs , jsDoc ) ;
|
public class AbstractRouter { /** * Parse the hash and divides it into shellCreator , route and parameters
* @ param route ths hash to parse
* @ return parse result
* @ throws com . github . nalukit . nalu . client . internal . route . RouterException in case no controller is found for the routing */
public RouteResult parse ( String route ) throws RouterException { } }
|
RouteResult routeResult = new RouteResult ( ) ; String routeValue = route ; // only the part after the first # is intresting :
if ( routeValue . contains ( "#" ) ) { routeValue = routeValue . substring ( routeValue . indexOf ( "#" ) + 1 ) ; } // extract shellCreator first :
if ( routeValue . startsWith ( "/" ) ) { routeValue = routeValue . substring ( 1 ) ; } // check , if there are more " / "
if ( routeValue . contains ( "/" ) ) { routeResult . setShell ( "/" + routeValue . substring ( 0 , routeValue . indexOf ( "/" ) ) ) ; } else { routeResult . setShell ( "/" + routeValue ) ; } // check , if the shellCreator exists . . . .
Optional < String > optional = this . shellConfiguration . getShells ( ) . stream ( ) . map ( ShellConfig :: getRoute ) . filter ( f -> f . equals ( routeResult . getShell ( ) ) ) . findAny ( ) ; if ( optional . isPresent ( ) ) { routeResult . setShell ( optional . get ( ) ) ; } else { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "no matching shellCreator found for route >>" ) . append ( route ) . append ( "<< --> Routing aborted!" ) ; RouterLogger . logSimple ( sb . toString ( ) , 1 ) ; throw new RouterException ( sb . toString ( ) ) ; } // extract route first :
routeValue = route ; if ( routeValue . startsWith ( "/" ) ) { routeValue = routeValue . substring ( 1 ) ; } if ( routeValue . contains ( "/" ) ) { String searchRoute = routeValue ; Optional < RouteConfig > optionalRouterConfig = this . routerConfiguration . getRouters ( ) . stream ( ) . filter ( rc -> Nalu . match ( searchRoute , rc . getRoute ( ) ) ) . findFirst ( ) ; if ( optionalRouterConfig . isPresent ( ) ) { routeResult . setRoute ( optionalRouterConfig . get ( ) . getRoute ( ) ) ; if ( routeResult . getRoute ( ) . contains ( "*" ) ) { String [ ] partsOfRoute = routeValue . split ( "/" ) ; String compareRoute = optionalRouterConfig . get ( ) . getRoute ( ) ; if ( compareRoute . startsWith ( "/" ) ) { compareRoute = compareRoute . substring ( 1 ) ; } String [ ] partsOfRouteFromConfiguration = compareRoute . split ( "/" ) ; for ( int i = 0 ; i < partsOfRouteFromConfiguration . length ; i ++ ) { if ( partsOfRouteFromConfiguration [ i ] . equals ( "*" ) ) { if ( partsOfRoute . length - 1 >= i ) { String parameterValue = partsOfRoute [ i ] . replace ( AbstractRouter . NALU_SLASH_REPLACEMENT , "/" ) ; if ( Nalu . isUsingColonForParametersInUrl ( ) ) { if ( parameterValue . length ( ) > 0 ) { if ( parameterValue . startsWith ( ":" ) ) { parameterValue = parameterValue . substring ( 1 ) ; } } } routeResult . getParameterValues ( ) . add ( parameterValue ) ; } else { routeResult . getParameterValues ( ) . add ( "" ) ; } } } } } else { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "no matching route found for route >>" ) . append ( route ) . append ( "<< --> Routing aborted!" ) ; RouterLogger . logSimple ( sb . toString ( ) , 1 ) ; throw new RouterException ( sb . toString ( ) ) ; } } else { String finalSearchPart = "/" + routeValue ; if ( this . routerConfiguration . getRouters ( ) . stream ( ) . anyMatch ( f -> f . match ( finalSearchPart ) ) ) { routeResult . setRoute ( "/" + routeValue ) ; } else { throw new RouterException ( RouterLogger . logNoMatchingRoute ( route ) ) ; } } return routeResult ;
|
public class StreamValueData { /** * { @ inheritDoc } */
@ Override public InputStream getAsStream ( ) throws IOException { } }
|
if ( isByteArrayAfterSpool ( ) ) { return new ByteArrayInputStream ( data ) ; // from bytes
} else { if ( spoolFile != null ) { return PrivilegedFileHelper . fileInputStream ( spoolFile ) ; // from spool file
} else { throw new IllegalArgumentException ( "Stream already consumed" ) ; } }
|
public class GuidedDTDRLPersistence { /** * ActionSetField and ActionUpdateField need to be grouped in this manner . */
private LabelledAction findByLabelledAction ( List < LabelledAction > actions , String boundName , boolean isUpdate ) { } }
|
for ( LabelledAction labelledAction : actions ) { IAction action = labelledAction . action ; if ( action instanceof ActionFieldList ) { if ( labelledAction . boundName . equals ( boundName ) && labelledAction . isUpdate == isUpdate ) { return labelledAction ; } } } return null ;
|
public class Distributions { /** * Creates an { @ code Distribution } with { @ code ExponentialBuckets } .
* @ param numFiniteBuckets initializes the number of finite buckets
* @ param growthFactor initializes the growth factor
* @ param scale initializes the scale
* @ return a { @ code Distribution } with { @ code ExponentialBuckets }
* @ throws IllegalArgumentException if a bad input prevents creation . */
public static Distribution createExponential ( int numFiniteBuckets , double growthFactor , double scale ) { } }
|
if ( numFiniteBuckets <= 0 ) { throw new IllegalArgumentException ( MSG_BAD_NUM_FINITE_BUCKETS ) ; } if ( growthFactor <= 1.0 ) { throw new IllegalArgumentException ( String . format ( MSG_DOUBLE_TOO_LOW , "growth factor" , 1.0 ) ) ; } if ( scale <= 0.0 ) { throw new IllegalArgumentException ( String . format ( MSG_DOUBLE_TOO_LOW , "scale" , 0.0 ) ) ; } ExponentialBuckets buckets = ExponentialBuckets . newBuilder ( ) . setGrowthFactor ( growthFactor ) . setNumFiniteBuckets ( numFiniteBuckets ) . setScale ( scale ) . build ( ) ; Builder builder = Distribution . newBuilder ( ) . setExponentialBuckets ( buckets ) ; for ( int i = 0 ; i < numFiniteBuckets + 2 ; i ++ ) { builder . addBucketCounts ( 0L ) ; } return builder . build ( ) ;
|
public class tmglobal_tmtrafficpolicy_binding { /** * Use this API to fetch a tmglobal _ tmtrafficpolicy _ binding resources . */
public static tmglobal_tmtrafficpolicy_binding [ ] get ( nitro_service service ) throws Exception { } }
|
tmglobal_tmtrafficpolicy_binding obj = new tmglobal_tmtrafficpolicy_binding ( ) ; tmglobal_tmtrafficpolicy_binding response [ ] = ( tmglobal_tmtrafficpolicy_binding [ ] ) obj . get_resources ( service ) ; return response ;
|
public class ArcaBroadcastManager { private static void addToReceivers ( final BroadcastReceiver receiver , final String action ) { } }
|
Set < String > actions = RECEIVERS . get ( receiver ) ; if ( actions == null ) { actions = new HashSet < String > ( 1 ) ; RECEIVERS . put ( receiver , actions ) ; } actions . add ( action ) ;
|
public class DeleteHlsTaskRunner { /** * Wait for the distribution to be disabled
* Note that this can take up to 15 min */
private void waitForDisabled ( String distId ) { } }
|
long maxTime = 1800000 ; // 30 min
long start = System . currentTimeMillis ( ) ; boolean deployed = isDeployed ( distId ) ; while ( ! deployed ) { if ( System . currentTimeMillis ( ) < start + maxTime ) { sleep ( 10000 ) ; deployed = isDeployed ( distId ) ; } else { String error = "Timeout Reached waiting for distribution to " + "be disabled. Please wait a few minutes and try again." ; throw new RuntimeException ( error ) ; } }
|
public class StringGroovyMethods { /** * Overloads the left shift operator to provide an easy way to append multiple
* objects as string representations to a String .
* @ param self a String
* @ param value an Object
* @ return a StringBuffer built from this string
* @ since 1.0 */
public static StringBuffer leftShift ( String self , Object value ) { } }
|
return new StringBuffer ( self ) . append ( value ) ;
|
public class ElasticsearchClusterRunner { /** * Wait for green state of a cluster .
* @ param indices indices to check status
* @ return cluster health status */
public ClusterHealthStatus ensureGreen ( final String ... indices ) { } }
|
final ClusterHealthResponse actionGet = client ( ) . admin ( ) . cluster ( ) . health ( Requests . clusterHealthRequest ( indices ) . waitForGreenStatus ( ) . waitForEvents ( Priority . LANGUID ) . waitForNoRelocatingShards ( true ) ) . actionGet ( ) ; if ( actionGet . isTimedOut ( ) ) { onFailure ( "ensureGreen timed out, cluster state:\n" + client ( ) . admin ( ) . cluster ( ) . prepareState ( ) . get ( ) . getState ( ) + "\n" + client ( ) . admin ( ) . cluster ( ) . preparePendingClusterTasks ( ) . get ( ) , actionGet ) ; } return actionGet . getStatus ( ) ;
|
public class CacheServletWrapper40 { /** * { @ inheritDoc } */
@ Override public void handleRequest ( ServletRequest req , ServletResponse res ) throws Exception { } }
|
String methodName = "handleRequest" ; // set the MappingMatch here as when the CacheServletWrapper is being used we will not go
// through the path of URIMatcher .
// reqData . setMappingMatch ( this . mapping . getMappingMatch ( ) ) ;
WebAppDispatcherContext40 dispatchContext = ( WebAppDispatcherContext40 ) ( ( SRTServletRequest40 ) req ) . getWebAppDispatcherContext ( ) ; dispatchContext . setMappingMatch ( mapping . getMappingMatch ( ) ) ; if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { logger . logp ( Level . FINE , CLASS_NAME , methodName , "MappingMatch: " + mapping . getMappingMatch ( ) ) ; } super . handleRequest ( req , res ) ;
|
public class Console { /** * Formats and logs a message . If we are running in HostedMode the log message will be reported
* to its console . If we ' re running in Firefox , the log message will be sent to Firebug if it
* is enabled . */
public static void log ( String message , Object ... args ) { } }
|
StringBuilder sb = new StringBuilder ( ) ; sb . append ( message ) ; if ( args . length > 1 ) { sb . append ( " [" ) ; for ( int ii = 0 , ll = args . length / 2 ; ii < ll ; ii ++ ) { if ( ii > 0 ) { sb . append ( ", " ) ; } sb . append ( args [ 2 * ii ] ) . append ( "=" ) . append ( args [ 2 * ii + 1 ] ) ; } sb . append ( "]" ) ; } Object error = ( args . length % 2 == 1 ) ? args [ args . length - 1 ] : null ; if ( GWT . isScript ( ) ) { if ( error != null ) { sb . append ( ": " ) . append ( error ) ; } firebugLog ( sb . toString ( ) , error ) ; } else { GWT . log ( sb . toString ( ) , ( Throwable ) error ) ; }
|
public class ApiTokenClient { /** * Retrieves all user scenarios .
* @ return list of { @ link com . loadimpact . resource . UserScenario } */
public List < UserScenario > getUserScenarios ( ) { } }
|
return invoke ( USER_SCENARIOS , new RequestClosure < JsonArray > ( ) { @ Override public JsonArray call ( Invocation . Builder request ) { return request . get ( JsonArray . class ) ; } } , new ResponseClosure < JsonArray , List < UserScenario > > ( ) { @ Override public List < UserScenario > call ( JsonArray json ) { List < UserScenario > ds = new ArrayList < UserScenario > ( json . size ( ) ) ; for ( int k = 0 ; k < json . size ( ) ; ++ k ) { ds . add ( new UserScenario ( json . getJsonObject ( k ) ) ) ; } return ds ; } } ) ;
|
public class CalendarCodeGenerator { /** * Generates the date - time classes into the given output directory . */
public Map < LocaleID , ClassName > generate ( Path outputDir , DataReader reader ) throws IOException { } }
|
Map < LocaleID , ClassName > dateClasses = new TreeMap < > ( ) ; List < DateTimeData > dateTimeDataList = new ArrayList < > ( ) ; for ( Map . Entry < LocaleID , DateTimeData > entry : reader . calendars ( ) . entrySet ( ) ) { DateTimeData dateTimeData = entry . getValue ( ) ; LocaleID localeId = entry . getKey ( ) ; String className = "_CalendarFormatter_" + localeId . safe ; TimeZoneData timeZoneData = reader . timezones ( ) . get ( localeId ) ; TypeSpec type = createFormatter ( dateTimeData , timeZoneData , className ) ; CodeGenerator . saveClass ( outputDir , Types . PACKAGE_CLDR_DATES , className , type ) ; ClassName cls = ClassName . get ( Types . PACKAGE_CLDR_DATES , className ) ; dateClasses . put ( localeId , cls ) ; dateTimeDataList . add ( dateTimeData ) ; } String className = "_CalendarUtils" ; TypeSpec . Builder utilsType = TypeSpec . classBuilder ( className ) . addModifiers ( PUBLIC ) ; addSkeletonClassifierMethod ( utilsType , dateTimeDataList ) ; addMetaZones ( utilsType , reader . metazones ( ) ) ; buildTimeZoneAliases ( utilsType , reader . timezoneAliases ( ) ) ; CodeGenerator . saveClass ( outputDir , Types . PACKAGE_CLDR_DATES , "_CalendarUtils" , utilsType . build ( ) ) ; return dateClasses ;
|
public class CheckBoxFormComponentInterceptor { /** * Returns the keys used to fetch the extra text from the
* < code > MessageSource < / code > .
* The keys returned are
* < code > & lt ; formModelId & gt ; . & lt ; propertyName & gt ; . & lt ; textKey & gt ; , & lt ; propertyName & gt ; . & lt ; textKey & gt ; , & lt ; textKey & gt ; < / code >
* Can safely be overridden to add extra keys
* @ param propertyName
* the property name
* @ return the keys */
protected String [ ] getExtraTextKeys ( String propertyName ) { } }
|
return new String [ ] { getFormModel ( ) . getId ( ) + "." + propertyName + "." + textKey , propertyName + "." + textKey , textKey } ;
|
public class WhiteboxImpl { /** * Set the value of a field using reflection . Use this method when you need
* to specify in which class the field is declared . This is useful if you
* have two fields in a class hierarchy that has the same name but you like
* to modify the latter .
* @ param object the object to modify
* @ param fieldName the name of the field
* @ param value the new value of the field
* @ param where which class the field is defined */
public static void setInternalState ( Object object , String fieldName , Object value , Class < ? > where ) { } }
|
if ( object == null || fieldName == null || fieldName . equals ( "" ) || fieldName . startsWith ( " " ) ) { throw new IllegalArgumentException ( "object, field name, and \"where\" must not be empty or null." ) ; } final Field field = getField ( fieldName , where ) ; try { field . set ( object , value ) ; } catch ( Exception e ) { throw new RuntimeException ( "Internal Error: Failed to set field in method setInternalState." , e ) ; }
|
public class ListHsmsResult { /** * The list of ARNs that identify the HSMs .
* @ return The list of ARNs that identify the HSMs . */
public java . util . List < String > getHsmList ( ) { } }
|
if ( hsmList == null ) { hsmList = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return hsmList ;
|
public class PeerManager { /** * Invokes the supplied function on < em > all < / em > node objects ( except the local node ) . A caller
* that needs to call an invocation service method on a remote node should use this mechanism
* to locate the appropriate node ( or nodes ) and call the desired method .
* @ return the number of times the invoked function returned true . */
public int invokeOnNodes ( Function < Tuple < Client , NodeObject > , Boolean > func ) { } }
|
int invoked = 0 ; for ( PeerNode peer : _peers . values ( ) ) { if ( peer . nodeobj != null ) { if ( func . apply ( Tuple . newTuple ( peer . getClient ( ) , peer . nodeobj ) ) ) { invoked ++ ; } } } return invoked ;
|
public class DateUtilExtensions { /** * < p > Return a string representation of the time portion of this date
* according to the locale - specific { @ link java . text . DateFormat # MEDIUM } default format .
* For an " en _ UK " system locale , this would be < code > HH : MM : ss < / code > .
* < p > Note that a new DateFormat instance is created for every
* invocation of this method ( for thread safety ) .
* @ param self a Date
* @ return a string representing the time portion of this date
* @ see java . text . DateFormat # getTimeInstance ( int )
* @ see java . text . DateFormat # MEDIUM
* @ since 1.5.7 */
public static String getTimeString ( Date self ) { } }
|
return DateFormat . getTimeInstance ( DateFormat . MEDIUM ) . format ( self ) ;
|
public class DimensionGroup { /** * A list of specific dimensions from a dimension group . If this parameter is not present , then it signifies that
* all of the dimensions in the group were requested , or are present in the response .
* Valid values for elements in the < code > Dimensions < / code > array are :
* < ul >
* < li >
* db . user . id
* < / li >
* < li >
* db . user . name
* < / li >
* < li >
* db . host . id
* < / li >
* < li >
* db . host . name
* < / li >
* < li >
* db . sql . id
* < / li >
* < li >
* db . sql . db _ id
* < / li >
* < li >
* db . sql . statement
* < / li >
* < li >
* db . sql . tokenized _ id
* < / li >
* < li >
* db . sql _ tokenized . id
* < / li >
* < li >
* db . sql _ tokenized . db _ id
* < / li >
* < li >
* db . sql _ tokenized . statement
* < / li >
* < li >
* db . wait _ event . name
* < / li >
* < li >
* db . wait _ event . type
* < / li >
* < li >
* db . wait _ event _ type . name
* < / li >
* < / ul >
* @ param dimensions
* A list of specific dimensions from a dimension group . If this parameter is not present , then it signifies
* that all of the dimensions in the group were requested , or are present in the response . < / p >
* Valid values for elements in the < code > Dimensions < / code > array are :
* < ul >
* < li >
* db . user . id
* < / li >
* < li >
* db . user . name
* < / li >
* < li >
* db . host . id
* < / li >
* < li >
* db . host . name
* < / li >
* < li >
* db . sql . id
* < / li >
* < li >
* db . sql . db _ id
* < / li >
* < li >
* db . sql . statement
* < / li >
* < li >
* db . sql . tokenized _ id
* < / li >
* < li >
* db . sql _ tokenized . id
* < / li >
* < li >
* db . sql _ tokenized . db _ id
* < / li >
* < li >
* db . sql _ tokenized . statement
* < / li >
* < li >
* db . wait _ event . name
* < / li >
* < li >
* db . wait _ event . type
* < / li >
* < li >
* db . wait _ event _ type . name
* < / li > */
public void setDimensions ( java . util . Collection < String > dimensions ) { } }
|
if ( dimensions == null ) { this . dimensions = null ; return ; } this . dimensions = new java . util . ArrayList < String > ( dimensions ) ;
|
public class Ports { /** * Returns a free port in the defined range , returns null if none is available . */
public static Integer findFree ( int lowIncluse , int highInclusive ) { } }
|
int low = Math . max ( 1 , Math . min ( lowIncluse , highInclusive ) ) ; int high = Math . min ( 65535 , Math . max ( lowIncluse , highInclusive ) ) ; Integer result = null ; int split = RandomUtils . nextInt ( low , high + 1 ) ; for ( int port = split ; port <= high ; port ++ ) { if ( isFree ( port ) ) { result = port ; break ; } } if ( result == null ) { for ( int port = low ; port < split ; port ++ ) { if ( isFree ( port ) ) { result = port ; break ; } } } return result ;
|
public class Mail { /** * sets a mail param
* @ param type
* @ param file
* @ param name
* @ param value
* @ param contentID
* @ param disposition
* @ throws PageException */
public void setParam ( String type , String file , String fileName , String name , String value , String disposition , String contentID , Boolean oRemoveAfterSend ) throws PageException { } }
|
if ( file != null ) { boolean removeAfterSend = ( oRemoveAfterSend == null ) ? remove : oRemoveAfterSend . booleanValue ( ) ; setMimeattach ( file , fileName , type , disposition , contentID , removeAfterSend ) ; } else { if ( name . equalsIgnoreCase ( "bcc" ) ) setBcc ( value ) ; else if ( name . equalsIgnoreCase ( "cc" ) ) setCc ( value ) ; else if ( name . equalsIgnoreCase ( "charset" ) ) setCharset ( CharsetUtil . toCharset ( value , null ) ) ; else if ( name . equalsIgnoreCase ( "failto" ) ) setFailto ( value ) ; else if ( name . equalsIgnoreCase ( "from" ) ) setFrom ( value ) ; else if ( name . equalsIgnoreCase ( "mailerid" ) ) setMailerid ( value ) ; else if ( name . equalsIgnoreCase ( "mimeattach" ) ) setMimeattach ( value ) ; else if ( name . equalsIgnoreCase ( "priority" ) ) setPriority ( value ) ; else if ( name . equalsIgnoreCase ( "replyto" ) ) setReplyto ( value ) ; else if ( name . equalsIgnoreCase ( "subject" ) ) setSubject ( value ) ; else if ( name . equalsIgnoreCase ( "to" ) ) setTo ( value ) ; else smtp . addHeader ( name , value ) ; }
|
public class Targeting { /** * Sets the mobileApplicationTargeting value for this Targeting .
* @ param mobileApplicationTargeting * Specifies targeting against mobile applications . */
public void setMobileApplicationTargeting ( com . google . api . ads . admanager . axis . v201811 . MobileApplicationTargeting mobileApplicationTargeting ) { } }
|
this . mobileApplicationTargeting = mobileApplicationTargeting ;
|
public class MigrateToExtensionSettings { /** * Gets the set of feed item IDs from the function if it is of the form :
* < code > IN ( FEED _ ITEM _ ID , { xxx , xxx } ) < / code > . Otherwise , returns an empty set . */
private static Set < Long > getFeedItemIdsFromArgument ( Function function ) { } }
|
if ( function . getLhsOperand ( ) . length == 1 && function . getLhsOperand ( 0 ) instanceof RequestContextOperand ) { RequestContextOperand requestContextOperand = ( RequestContextOperand ) function . getLhsOperand ( 0 ) ; if ( RequestContextOperandContextType . FEED_ITEM_ID . equals ( requestContextOperand . getContextType ( ) ) && FunctionOperator . IN . equals ( function . getOperator ( ) ) ) { return Arrays . stream ( function . getRhsOperand ( ) ) . filter ( ConstantOperand . class :: isInstance ) . map ( argument -> ( ( ConstantOperand ) argument ) . getLongValue ( ) ) . collect ( Collectors . toSet ( ) ) ; } } return new HashSet < > ( ) ;
|
public class EditManager { /** * Create and append an edit directive to the edit set if not there . */
private static void addDirective ( Element plfNode , String attributeName , String type , IPerson person ) throws PortalException { } }
|
Document plf = ( Document ) person . getAttribute ( Constants . PLF ) ; Element editSet = getEditSet ( plfNode , plf , person , true ) ; // see if attributes has already been marked as being edited
Element child = ( Element ) editSet . getFirstChild ( ) ; Element edit = null ; while ( child != null && edit == null ) { if ( child . getNodeName ( ) . equals ( type ) && child . getAttribute ( Constants . ATT_NAME ) . equals ( attributeName ) ) edit = child ; child = ( Element ) child . getNextSibling ( ) ; } if ( edit == null ) // if not found then newly mark as edited
{ String ID = null ; try { ID = getDLS ( ) . getNextStructDirectiveId ( person ) ; } catch ( Exception e ) { throw new PortalException ( "Exception encountered while " + "generating new edit node " + "Id for userId=" + person . getID ( ) , e ) ; } edit = plf . createElement ( type ) ; edit . setAttribute ( Constants . ATT_TYPE , type ) ; edit . setAttribute ( Constants . ATT_ID , ID ) ; edit . setAttribute ( Constants . ATT_NAME , attributeName ) ; editSet . appendChild ( edit ) ; }
|
public class TransportTermsByQueryAction { /** * The operation that executes the query and generates a { @ link TermsByQueryShardResponse } for each shard . */
@ Override protected TermsByQueryShardResponse shardOperation ( TermsByQueryShardRequest shardRequest ) throws ElasticsearchException { } }
|
IndexService indexService = indicesService . indexServiceSafe ( shardRequest . shardId ( ) . getIndex ( ) ) ; IndexShard indexShard = indexService . shardSafe ( shardRequest . shardId ( ) . id ( ) ) ; TermsByQueryRequest request = shardRequest . request ( ) ; OrderByShardOperation orderByOperation = OrderByShardOperation . get ( request . getOrderBy ( ) , request . maxTermsPerShard ( ) ) ; SearchShardTarget shardTarget = new SearchShardTarget ( clusterService . localNode ( ) . id ( ) , shardRequest . shardId ( ) . getIndex ( ) , shardRequest . shardId ( ) . id ( ) ) ; ShardSearchRequest shardSearchRequest = new ShardSearchLocalRequest ( request . types ( ) , request . nowInMillis ( ) , shardRequest . filteringAliases ( ) ) ; SearchContext context = new DefaultSearchContext ( 0 , shardSearchRequest , shardTarget , indexShard . acquireSearcher ( "termsByQuery" ) , indexService , indexShard , scriptService , pageCacheRecycler , bigArrays , threadPool . estimatedTimeInMillisCounter ( ) , parseFieldMatcher , SearchService . NO_TIMEOUT ) ; SearchContext . setCurrent ( context ) ; try { MappedFieldType fieldType = context . smartNameFieldType ( request . field ( ) ) ; if ( fieldType == null ) { throw new SearchContextException ( context , "[termsByQuery] field '" + request . field ( ) + "' not found for types " + Arrays . toString ( request . types ( ) ) ) ; } IndexFieldData indexFieldData = context . fieldData ( ) . getForField ( fieldType ) ; BytesReference querySource = request . querySource ( ) ; if ( querySource != null && querySource . length ( ) > 0 ) { XContentParser queryParser = null ; try { queryParser = XContentFactory . xContent ( querySource ) . createParser ( querySource ) ; QueryParseContext . setTypes ( request . types ( ) ) ; ParsedQuery parsedQuery = orderByOperation . getParsedQuery ( queryParser , indexService ) ; if ( parsedQuery != null ) { context . parsedQuery ( parsedQuery ) ; } } finally { QueryParseContext . removeTypes ( ) ; if ( queryParser != null ) { queryParser . close ( ) ; } } } context . preProcess ( ) ; // execute the search only gathering the hit count and bitset for each segment
logger . debug ( "{}: Executes search for collecting terms {}" , Thread . currentThread ( ) . getName ( ) , shardRequest . shardId ( ) ) ; TermsCollector termsCollector = this . getTermsCollector ( request . termsEncoding ( ) , indexFieldData , context ) ; if ( request . expectedTerms ( ) != null ) termsCollector . setExpectedTerms ( request . expectedTerms ( ) ) ; if ( request . maxTermsPerShard ( ) != null ) termsCollector . setMaxTerms ( request . maxTermsPerShard ( ) ) ; HitStream hitStream = orderByOperation . getHitStream ( context ) ; TermsSet terms = termsCollector . collect ( hitStream ) ; logger . debug ( "{}: Returns terms response with {} terms for shard {}" , Thread . currentThread ( ) . getName ( ) , terms . size ( ) , shardRequest . shardId ( ) ) ; return new TermsByQueryShardResponse ( shardRequest . shardId ( ) , terms ) ; } catch ( Throwable e ) { logger . error ( "[termsByQuery] Error executing shard operation" , e ) ; throw new QueryPhaseExecutionException ( context , "[termsByQuery] Failed to execute query" , e ) ; } finally { // this will also release the index searcher
context . close ( ) ; SearchContext . removeCurrent ( ) ; }
|
public class ContentSpecProcessor { /** * Does a validation pass before processing any data .
* @ param processorData The data to be used during processing .
* @ return True if the content spec is valid , otherwise false . */
protected boolean doValidationPass ( final ProcessorData processorData ) { } }
|
// Validate the content specification before doing any rest calls
if ( ! doFirstValidationPass ( processorData ) ) { return false ; } // Check if the app should be shutdown
if ( isShuttingDown . get ( ) ) { shutdown . set ( true ) ; return false ; } // Validate the content specification bug links before doing the post validation as it is a costly operation
if ( ! doBugLinkValidationPass ( processorData ) ) { log . error ( ProcessorConstants . ERROR_INVALID_CS_MSG ) ; return false ; } // Check if the app should be shutdown
if ( isShuttingDown . get ( ) ) { shutdown . set ( true ) ; return false ; } // Validate the content specification now that we have most of the data from the REST API
if ( ! doSecondValidationPass ( processorData ) ) { log . error ( ProcessorConstants . ERROR_INVALID_CS_MSG ) ; return false ; } // Log that the spec is valid
log . info ( ProcessorConstants . INFO_VALID_CS_MSG ) ; return true ;
|
public class ObjectFactory { /** * Create an instance of { @ link Document . Projects . Project . Task . ResourceAssignments } */
public Document . Projects . Project . Task . ResourceAssignments createDocumentProjectsProjectTaskResourceAssignments ( ) { } }
|
return new Document . Projects . Project . Task . ResourceAssignments ( ) ;
|
public class RabbitMqQueueFactory { /** * Setter for { @ link # defaultConnectionFactory } .
* @ param connectionFactory
* @ return
* @ since 0.7.1 */
public RabbitMqQueueFactory < T , ID , DATA > setDefaultConnectionFactory ( ConnectionFactory connectionFactory ) { } }
|
return setDefaultConnectionFactory ( connectionFactory , false ) ;
|
public class TimeField { /** * Parses the given string into a corresponding time . The string must
* follow the standard format used by time fields , as defined by
* FORMAT and as would be produced by format ( ) .
* @ param timeString
* The time string to parse , which may be null .
* @ return
* The time corresponding to the given time string , or null if the
* provided time string was null or blank .
* @ throws ParseException
* If the given time string does not conform to the standard format
* used by time fields . */
public static Date parse ( String timeString ) throws ParseException { } }
|
// Return null if no time provided
if ( timeString == null || timeString . isEmpty ( ) ) return null ; // Parse time according to format
DateFormat timeFormat = new SimpleDateFormat ( TimeField . FORMAT ) ; return timeFormat . parse ( timeString ) ;
|
public class AmazonGameLiftClient { /** * Updates game session properties . This includes the session name , maximum player count , protection policy , which
* controls whether or not an active game session can be terminated during a scale - down event , and the player
* session creation policy , which controls whether or not new players can join the session . To update a game
* session , specify the game session ID and the values you want to change . If successful , an updated
* < a > GameSession < / a > object is returned .
* < ul >
* < li >
* < a > CreateGameSession < / a >
* < / li >
* < li >
* < a > DescribeGameSessions < / a >
* < / li >
* < li >
* < a > DescribeGameSessionDetails < / a >
* < / li >
* < li >
* < a > SearchGameSessions < / a >
* < / li >
* < li >
* < a > UpdateGameSession < / a >
* < / li >
* < li >
* < a > GetGameSessionLogUrl < / a >
* < / li >
* < li >
* Game session placements
* < ul >
* < li >
* < a > StartGameSessionPlacement < / a >
* < / li >
* < li >
* < a > DescribeGameSessionPlacement < / a >
* < / li >
* < li >
* < a > StopGameSessionPlacement < / a >
* < / li >
* < / ul >
* < / li >
* < / ul >
* @ param updateGameSessionRequest
* Represents the input for a request action .
* @ return Result of the UpdateGameSession operation returned by the service .
* @ throws NotFoundException
* A service resource associated with the request could not be found . Clients should not retry such
* requests .
* @ throws ConflictException
* The requested operation would cause a conflict with the current state of a service resource associated
* with the request . Resolve the conflict before retrying this request .
* @ throws InternalServiceException
* The service encountered an unrecoverable internal failure while processing the request . Clients can retry
* such requests immediately or after a waiting period .
* @ throws UnauthorizedException
* The client failed authentication . Clients should not retry such requests .
* @ throws InvalidGameSessionStatusException
* The requested operation would cause a conflict with the current state of a resource associated with the
* request and / or the game instance . Resolve the conflict before retrying .
* @ throws InvalidRequestException
* One or more parameter values in the request are invalid . Correct the invalid parameter values before
* retrying .
* @ sample AmazonGameLift . UpdateGameSession
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / gamelift - 2015-10-01 / UpdateGameSession " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public UpdateGameSessionResult updateGameSession ( UpdateGameSessionRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeUpdateGameSession ( request ) ;
|
public class VimGenerator2 { /** * Generate the Vim numeric constants .
* @ param it the receiver of the generated elements . */
protected void generateNumericConstants ( IStyleAppendable it ) { } }
|
appendComment ( it , "numerical constants" ) ; // $ NON - NLS - 1 $
appendMatch ( it , "sarlNumber" , "[0-9][0-9]*\\.[0-9]\\+([eE][0-9]\\+)\\?[fFdD]\\?" ) ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $
appendMatch ( it , "sarlNumber" , "0[xX][0-9a-fA-F]\\+" ) ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $
appendMatch ( it , "sarlNumber" , "[0-9]\\+[lL]\\?" ) ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $
appendCluster ( it , "sarlNumber" ) ; // $ NON - NLS - 1 $
hilight ( "sarlNumber" , VimSyntaxGroup . CONSTANT ) ; // $ NON - NLS - 1 $
it . newLine ( ) ;
|
public class RealRxPermission { /** * Map emitted items from the source observable into { @ link Permission } objects for each
* permission in parameters .
* If one or several permissions have never been requested , invoke the related framework method
* to ask the user if he allows the permissions . */
@ NonNull @ CheckReturnValue private < T > ObservableTransformer < T , Permission > ensureEach ( @ NonNull final String ... permissions ) { } }
|
checkPermissions ( permissions ) ; return new ObservableTransformer < T , Permission > ( ) { @ Override @ NonNull @ CheckReturnValue public ObservableSource < Permission > apply ( final Observable < T > o ) { return request ( o , permissions ) ; } } ;
|
public class EVCacheLatchImpl { /** * ( non - Javadoc )
* @ see
* com . netflix . evcache . operation . EVCacheLatchI # addFuture ( net . spy . memcached . internal . ListenableFuture ) */
public void addFuture ( ListenableFuture < Boolean , OperationCompletionListener > future ) { } }
|
future . addListener ( this ) ; if ( future . isDone ( ) ) countDown ( ) ; this . futures . add ( future ) ;
|
public class CodecCollector { /** * Compute termvector number full .
* @ param docSet
* the doc set
* @ param termDocId
* the term doc id
* @ param termsEnum
* the terms enum
* @ param lrc
* the lrc
* @ param postingsEnum
* the postings enum
* @ param positionsData
* the positions data
* @ return the termvector number full
* @ throws IOException
* Signals that an I / O exception has occurred . */
private static TermvectorNumberFull computeTermvectorNumberFull ( List < Integer > docSet , int termDocId , TermsEnum termsEnum , LeafReaderContext lrc , PostingsEnum postingsEnum , Map < Integer , Integer > positionsData ) throws IOException { } }
|
TermvectorNumberFull result = new TermvectorNumberFull ( docSet . size ( ) ) ; Iterator < Integer > docIterator = docSet . iterator ( ) ; int localTermDocId = termDocId ; postingsEnum = termsEnum . postings ( postingsEnum , PostingsEnum . FREQS ) ; while ( docIterator . hasNext ( ) ) { int docId = docIterator . next ( ) - lrc . docBase ; if ( docId >= localTermDocId && ( ( docId == localTermDocId ) || ( ( localTermDocId = postingsEnum . advance ( docId ) ) == docId ) ) ) { result . args [ result . docNumber ] = postingsEnum . freq ( ) ; result . positions [ result . docNumber ] = ( positionsData == null ) ? 0 : positionsData . get ( docId + lrc . docBase ) ; result . docNumber ++ ; } } return result ;
|
public class CommerceTaxMethodLocalServiceWrapper { /** * Returns the commerce tax method with the primary key .
* @ param commerceTaxMethodId the primary key of the commerce tax method
* @ return the commerce tax method
* @ throws PortalException if a commerce tax method with the primary key could not be found */
@ Override public com . liferay . commerce . tax . model . CommerceTaxMethod getCommerceTaxMethod ( long commerceTaxMethodId ) throws com . liferay . portal . kernel . exception . PortalException { } }
|
return _commerceTaxMethodLocalService . getCommerceTaxMethod ( commerceTaxMethodId ) ;
|
public class DrlParser { /** * This will expand the DRL using the given expander resolver . useful for
* debugging .
* @ param source -
* the source which use a DSL
* @ param resolver -
* the DSL expander resolver itself .
* @ throws DroolsParserException
* If unable to expand in any way . */
public String getExpandedDRL ( final String source , final DefaultExpanderResolver resolver ) throws DroolsParserException { } }
|
final Expander expander = resolver . get ( "*" , null ) ; final String expanded = expander . expand ( source ) ; if ( expander . hasErrors ( ) ) { String err = "" ; for ( ExpanderException ex : expander . getErrors ( ) ) { err = err + "\n Line:[" + ex . getLine ( ) + "] " + ex . getMessage ( ) ; } throw new DroolsParserException ( err ) ; } return expanded ;
|
public class DisqueClient { /** * Create a new client that connects to the supplied uri with default { @ link ClientResources } . You can connect to different
* Redis servers but you must supply a { @ link RedisURI } on connecting .
* @ param uri the Redis URI , must not be { @ literal null }
* @ return a new instance of { @ link DisqueClient } */
public static DisqueClient create ( String uri ) { } }
|
LettuceAssert . notNull ( uri , "uri must not be null" ) ; return new DisqueClient ( null , DisqueURI . create ( uri ) ) ;
|
public class TRMFacade { /** * Method chooseLink
* @ param linkUuid
* @ return
* @ throws SIResourceException */
public LinkSelection chooseLink ( SIBUuid12 linkUuid ) throws SIResourceException { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "chooseLink" , new Object [ ] { linkUuid } ) ; LinkSelection s = null ; // Pick an ME to send the message to .
try { s = _linkManager . select ( linkUuid ) ; } catch ( LinkException e ) { // Error during create of the link . Trace an FFST . If we cant
// advertise the link , other ME ' s wont be able to send to it
FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.destination.TRMFacade.chooseLink" , "1:530:1.7" , this ) ; SibTr . exception ( tc , e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "chooseLink" , e ) ; throw new SIResourceException ( e ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "chooseLink" , s ) ; return s ;
|
public class CDKMCSHandler { /** * { @ inheritDoc }
* @ param shouldMatchBonds */
@ Override public void searchMCS ( boolean shouldMatchBonds ) { } }
|
CDKRMapHandler rmap = new CDKRMapHandler ( ) ; try { if ( ( source . getAtomCount ( ) == target . getAtomCount ( ) ) && source . getBondCount ( ) == target . getBondCount ( ) ) { rOnPFlag = true ; rmap . calculateOverlapsAndReduceExactMatch ( source , target , shouldMatchBonds ) ; } else if ( source . getAtomCount ( ) > target . getAtomCount ( ) && source . getBondCount ( ) != target . getBondCount ( ) ) { rOnPFlag = true ; rmap . calculateOverlapsAndReduce ( source , target , shouldMatchBonds ) ; } else { rOnPFlag = false ; rmap . calculateOverlapsAndReduce ( target , source , shouldMatchBonds ) ; } setAllMapping ( ) ; setAllAtomMapping ( ) ; setFirstMapping ( ) ; setFirstAtomMapping ( ) ; } catch ( CDKException e ) { rmap = null ; // System . err . println ( " WARNING : graphContainer : most probably time out error " ) ;
}
|
public class PersistentUserManagedEhcache { /** * { @ inheritDoc } */
@ Override public V replace ( K key , V value ) throws CacheLoadingException , CacheWritingException { } }
|
return cache . replace ( key , value ) ;
|
public class BigtableInstanceAdminClient { /** * Asynchronously lists all app profiles of the specified instance .
* < p > Sample code :
* < pre > { @ code
* ApiFuture < List < AppProfile > > appProfilesFuture = client . listAppProfilesAsync ( " my - instance " ) ;
* List < AppProfile > appProfiles = appProfileFuture . get ( ) ;
* } < / pre >
* @ see AppProfile */
@ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < List < AppProfile > > listAppProfilesAsync ( String instanceId ) { } }
|
String instanceName = NameUtil . formatInstanceName ( projectId , instanceId ) ; ListAppProfilesRequest request = ListAppProfilesRequest . newBuilder ( ) . setParent ( instanceName ) . build ( ) ; // TODO ( igorbernstein2 ) : try to upstream pagination spooling or figure out a way to expose the
// paginated responses while maintaining the wrapper facade .
// Fetch the first page .
ApiFuture < ListAppProfilesPage > firstPageFuture = ApiFutures . transform ( stub . listAppProfilesPagedCallable ( ) . futureCall ( request ) , new ApiFunction < ListAppProfilesPagedResponse , ListAppProfilesPage > ( ) { @ Override public ListAppProfilesPage apply ( ListAppProfilesPagedResponse response ) { return response . getPage ( ) ; } } , MoreExecutors . directExecutor ( ) ) ; // Fetch the rest of the pages by chaining the futures .
ApiFuture < List < com . google . bigtable . admin . v2 . AppProfile > > allProtos = ApiFutures . transformAsync ( firstPageFuture , new ApiAsyncFunction < ListAppProfilesPage , List < com . google . bigtable . admin . v2 . AppProfile > > ( ) { List < com . google . bigtable . admin . v2 . AppProfile > responseAccumulator = Lists . newArrayList ( ) ; @ Override public ApiFuture < List < com . google . bigtable . admin . v2 . AppProfile > > apply ( ListAppProfilesPage page ) { // Add all entries from the page
responseAccumulator . addAll ( Lists . newArrayList ( page . getValues ( ) ) ) ; // If this is the last page , just return the accumulated responses .
if ( ! page . hasNextPage ( ) ) { return ApiFutures . immediateFuture ( responseAccumulator ) ; } // Otherwise fetch the next page .
return ApiFutures . transformAsync ( page . getNextPageAsync ( ) , this , MoreExecutors . directExecutor ( ) ) ; } } , MoreExecutors . directExecutor ( ) ) ; // Wrap all of the accumulated protos .
return ApiFutures . transform ( allProtos , new ApiFunction < List < com . google . bigtable . admin . v2 . AppProfile > , List < AppProfile > > ( ) { @ Override public List < AppProfile > apply ( List < com . google . bigtable . admin . v2 . AppProfile > input ) { List < AppProfile > results = Lists . newArrayListWithCapacity ( input . size ( ) ) ; for ( com . google . bigtable . admin . v2 . AppProfile appProfile : input ) { results . add ( AppProfile . fromProto ( appProfile ) ) ; } return results ; } } , MoreExecutors . directExecutor ( ) ) ;
|
public class LikeRule { /** * Deserialize the state of the object .
* @ param in object input stream
* @ throws IOException if IOException during deserialization
* @ throws ClassNotFoundException if class not found . */
private void readObject ( final java . io . ObjectInputStream in ) throws IOException , ClassNotFoundException { } }
|
try { field = ( String ) in . readObject ( ) ; String patternString = ( String ) in . readObject ( ) ; pattern = Pattern . compile ( patternString , Pattern . CASE_INSENSITIVE ) ; } catch ( PatternSyntaxException e ) { throw new IOException ( "Invalid LIKE rule - " + e . getMessage ( ) ) ; }
|
public class CollectionHelper { /** * Returns the first item in the given collection */
public static < T > T first ( Iterable < T > objects ) { } }
|
if ( objects != null ) { for ( T object : objects ) { return object ; } } return null ;
|
public class SecurityServiceImpl { /** * Retrieve the AuthenticationService for the specified id .
* @ param id AuthenticationService id to retrieve
* @ return A non - null AuthenticationService instance . */
private AuthenticationService getAuthenticationService ( String id ) { } }
|
AuthenticationService service = authentication . getService ( id ) ; if ( service == null ) { throwIllegalArgumentExceptionInvalidAttributeValue ( SecurityConfiguration . CFG_KEY_AUTHENTICATION_REF , id ) ; } return service ;
|
public class StreamSummaryContainer { /** * Adds the lock information about the key , namely if the key suffer some contention and if the keys was locked or
* not .
* @ param contention { @ code true } if the key was contented .
* @ param failLock { @ code true } if the key was not locked . */
public void addLockInformation ( Object key , boolean contention , boolean failLock ) { } }
|
if ( ! isEnabled ( ) ) { return ; } syncOffer ( Stat . MOST_LOCKED_KEYS , key ) ; if ( contention ) { syncOffer ( Stat . MOST_CONTENDED_KEYS , key ) ; } if ( failLock ) { syncOffer ( Stat . MOST_FAILED_KEYS , key ) ; }
|
public class AppContextJdon { /** * ApplicationContextAware ' s method
* at first run , startup Jdon Framework * */
public void setApplicationContext ( ApplicationContext applicationContext ) throws BeansException { } }
|
this . applicationContext = applicationContext ; if ( servletContext == null ) if ( applicationContext instanceof WebApplicationContext ) { servletContext = ( ( WebApplicationContext ) applicationContext ) . getServletContext ( ) ; if ( servletContext == null ) { System . err . print ( "this class only fit for Spring Web Application" ) ; return ; } } // start up jdon
AppContextWrapper acw = new ServletContextWrapper ( servletContext ) ; ContainerFinder containerFinder = new ContainerFinderImp ( ) ; containerWrapper = containerFinder . findContainer ( acw ) ;
|
public class LoggingFilter { /** * { @ inheritDoc } */
@ Override public void filter ( final ClientRequestContext context ) throws IOException { } }
|
final long id = _id . incrementAndGet ( ) ; context . setProperty ( LOGGING_ID_PROPERTY , id ) ; final StringBuilder b = new StringBuilder ( ) ; printRequestLine ( b , "Sending client request" , id , context . getMethod ( ) , context . getUri ( ) ) ; printPrefixedHeaders ( b , id , REQUEST_PREFIX , context . getStringHeaders ( ) ) ; if ( printEntity && context . hasEntity ( ) && isSupportPrintType ( context . getMediaType ( ) ) ) { final OutputStream stream = new LoggingStream ( b , context . getEntityStream ( ) ) ; context . setEntityStream ( stream ) ; context . setProperty ( ENTITY_LOGGER_PROPERTY , stream ) ; // not calling log ( b ) here - it will be called by the interceptor
} else { log ( b ) ; }
|
public class CmsDriverManager { /** * Changes the " expire " date of a resource . < p >
* @ param dbc the current database context
* @ param resource the resource to touch
* @ param dateExpired the new expire date of the resource
* @ throws CmsDataAccessException if something goes wrong
* @ see CmsObject # setDateExpired ( String , long , boolean )
* @ see I _ CmsResourceType # setDateExpired ( CmsObject , CmsSecurityManager , CmsResource , long , boolean ) */
public void setDateExpired ( CmsDbContext dbc , CmsResource resource , long dateExpired ) throws CmsDataAccessException { } }
|
resource . setDateExpired ( dateExpired ) ; if ( resource . getState ( ) . isUnchanged ( ) ) { resource . setState ( CmsResource . STATE_CHANGED ) ; } getVfsDriver ( dbc ) . writeResourceState ( dbc , dbc . currentProject ( ) , resource , UPDATE_STRUCTURE , false ) ; // modify the last modified project reference
getVfsDriver ( dbc ) . writeResourceState ( dbc , dbc . currentProject ( ) , resource , UPDATE_RESOURCE_PROJECT , false ) ; // log
log ( dbc , new CmsLogEntry ( dbc , resource . getStructureId ( ) , CmsLogEntryType . RESOURCE_DATE_EXPIRED , new String [ ] { resource . getRootPath ( ) } ) , false ) ; // clear the cache
m_monitor . clearResourceCache ( ) ; // fire the event
Map < String , Object > data = new HashMap < String , Object > ( 2 ) ; data . put ( I_CmsEventListener . KEY_RESOURCE , resource ) ; data . put ( I_CmsEventListener . KEY_CHANGE , new Integer ( CHANGED_TIMEFRAME ) ) ; OpenCms . fireCmsEvent ( new CmsEvent ( I_CmsEventListener . EVENT_RESOURCE_MODIFIED , data ) ) ;
|
public class JarConfigurationProvider { /** * Return a list of all relation entity classes filtered by relationship
* type .
* @ param relType
* @ return classes */
private List < Class < ? extends RelationshipInterface > > getRelationClassCandidatesForRelType ( final String relType ) { } }
|
List < Class < ? extends RelationshipInterface > > candidates = new ArrayList ( ) ; for ( final Class < ? extends RelationshipInterface > candidate : getRelationshipEntities ( ) . values ( ) ) { Relation rel = instantiate ( candidate ) ; if ( rel == null ) { continue ; } if ( rel . name ( ) . equals ( relType ) ) { candidates . add ( candidate ) ; } } return candidates ;
|
public class SyncRemoteTable { /** * Move the current position and read the record ( optionally read several records ) .
* @ param iRelPosition relative Position to read the next record .
* @ param iRecordCount Records to read .
* @ return If I read 1 record , this is the record ' s data .
* @ return If I read several records , this is a vector of the returned records .
* @ return If at EOF , or error , returns the error code as a Integer .
* @ exception DBException File exception . */
public Object doMove ( int iRelPosition , int iRecordCount ) throws DBException , RemoteException { } }
|
synchronized ( m_objSync ) { return m_tableRemote . doMove ( iRelPosition , iRecordCount ) ; }
|
public class AmazonElasticTranscoderClient { /** * The CreatePreset operation creates a preset with settings that you specify .
* < important >
* Elastic Transcoder checks the CreatePreset settings to ensure that they meet Elastic Transcoder requirements and
* to determine whether they comply with H . 264 standards . If your settings are not valid for Elastic Transcoder ,
* Elastic Transcoder returns an HTTP 400 response ( < code > ValidationException < / code > ) and does not create the
* preset . If the settings are valid for Elastic Transcoder but aren ' t strictly compliant with the H . 264 standard ,
* Elastic Transcoder creates the preset and returns a warning message in the response . This helps you determine
* whether your settings comply with the H . 264 standard while giving you greater flexibility with respect to the
* video that Elastic Transcoder produces .
* < / important >
* Elastic Transcoder uses the H . 264 video - compression format . For more information , see the International
* Telecommunication Union publication < i > Recommendation ITU - T H . 264 : Advanced video coding for generic audiovisual
* services < / i > .
* @ param createPresetRequest
* The < code > CreatePresetRequest < / code > structure .
* @ return Result of the CreatePreset operation returned by the service .
* @ throws ValidationException
* One or more required parameter values were not provided in the request .
* @ throws IncompatibleVersionException
* @ throws AccessDeniedException
* General authentication failure . The request was not signed correctly .
* @ throws LimitExceededException
* Too many operations for a given AWS account . For example , the number of pipelines exceeds the maximum
* allowed .
* @ throws InternalServiceException
* Elastic Transcoder encountered an unexpected exception while trying to fulfill the request .
* @ sample AmazonElasticTranscoder . CreatePreset */
@ Override public CreatePresetResult createPreset ( CreatePresetRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeCreatePreset ( request ) ;
|
public class DebuggerReader { /** * This first prints out the current breakpoint source location before calling the superclass ' run method . The
* synchronization prevents more than one debugging session on the console .
* @ return the exit status */
public ExitStatus run ( ) { } }
|
synchronized ( DebuggerReader . class ) // Only one console !
{ println ( "Stopped " + breakpoint ) ; println ( interpreter . getSourceLine ( breakpoint . location ) ) ; try { interpreter . setDefaultName ( breakpoint . location . getModule ( ) ) ; } catch ( Exception e ) { throw new InternalException ( 52 , "Cannot set default name at breakpoint" ) ; } ExitStatus status = super . run ( new Vector < File > ( ) ) ; return status ; }
|
public class JFeatureSpec { /** * Java wrapper for { @ link FeatureSpec # cross ( Tuple2 , Function2 ) } */
public JFeatureSpec < T > cross ( final String x1 , final String x2 , final BiFunction < Double , Double , Double > f ) { } }
|
Function2 < Object , Object , Object > g = JavaOps . crossFn ( f ) ; return wrap ( self . cross ( Tuple2 . apply ( x1 , x2 ) , g ) ) ;
|
public class CheckSumUtils { /** * Returns the MD5 Checksum of the string passed in parameter
* @ param str
* the string content
* @ return the Checksum of the input stream
* @ throws IOException
* if an IO exception occurs */
public static String getMD5Checksum ( String str ) throws IOException { } }
|
InputStream is = new ByteArrayInputStream ( str . getBytes ( ) ) ; return getMD5Checksum ( is ) ;
|
public class BitSet { /** * Sets all bits to true within the given range .
* @ param fromIndex The lower bit index .
* @ param toIndex The upper bit index . */
private static void setInternal ( int [ ] array , int fromIndex , int toIndex ) { } }
|
int first = wordIndex ( fromIndex ) ; int last = wordIndex ( toIndex ) ; maybeGrowArrayToIndex ( array , last ) ; int startBit = bitOffset ( fromIndex ) ; int endBit = bitOffset ( toIndex ) ; if ( first == last ) { // Set the bits in between first and last .
maskInWord ( array , first , startBit , endBit ) ; } else { // Set the bits from fromIndex to the next 31 bit boundary .
maskInWord ( array , first , startBit , BITS_PER_WORD ) ; // Set the bits from the last 31 bit boundary to toIndex .
maskInWord ( array , last , 0 , endBit ) ; // Set everything in between .
for ( int i = first + 1 ; i < last ; i ++ ) { array [ i ] = WORD_MASK ; } }
|
public class ConnectionManagerImpl { /** * Execute batch ( if the batch mode where used ) . */
public void executeBatch ( ) throws OJBBatchUpdateException { } }
|
if ( batchCon != null ) { try { batchCon . executeBatch ( ) ; } catch ( Throwable th ) { throw new OJBBatchUpdateException ( th ) ; } }
|
public class JDBCClobClient { /** * Retrieves a copy of the specified substring in the < code > CLOB < / code >
* value designated by this < code > Clob < / code > object .
* @ param pos the first character of the substring to be extracted . The
* first character is at position 1.
* @ param length the number of consecutive characters to be copied
* @ return a < code > String < / code > that is the specified substring in the
* < code > CLOB < / code > value designated by this < code > Clob < / code > object
* @ throws SQLException if there is an error accessing the
* < code > CLOB < / code > value */
public synchronized String getSubString ( long pos , int length ) throws SQLException { } }
|
if ( ! isInLimits ( Long . MAX_VALUE , pos - 1 , length ) ) { throw Util . outOfRangeArgument ( ) ; } try { return clob . getSubString ( session , pos - 1 , length ) ; } catch ( HsqlException e ) { throw Util . sqlException ( e ) ; }
|
public class OrientationHistogramSift { /** * Compute the angle . The angle for each neighbor bin is found using the weighted sum
* of the derivative . Then the peak index is found by 2nd order polygon interpolation . These two bits of
* information are combined and used to return the final angle output .
* @ param index1 Histogram index of the peak
* @ return angle of the peak . - pi to pi */
double computeAngle ( int index1 ) { } }
|
int index0 = CircularIndex . addOffset ( index1 , - 1 , histogramMag . length ) ; int index2 = CircularIndex . addOffset ( index1 , 1 , histogramMag . length ) ; // compute the peak location using a second order polygon
double v0 = histogramMag [ index0 ] ; double v1 = histogramMag [ index1 ] ; double v2 = histogramMag [ index2 ] ; double offset = FastHessianFeatureDetector . polyPeak ( v0 , v1 , v2 ) ; // interpolate using the index offset and angle of its neighbor
return interpolateAngle ( index0 , index1 , index2 , offset ) ;
|
public class TapClient { /** * Gets the next tap message from the queue of received tap messages .
* @ param time the amount of time to wait for a message .
* @ param timeunit the unit of time to use .
* @ return The tap message at the head of the queue or null if the queue is
* empty for the given amount of time . */
public ResponseMessage getNextMessage ( long time , TimeUnit timeunit ) { } }
|
try { Object m = rqueue . poll ( time , timeunit ) ; if ( m == null ) { return null ; } else if ( m instanceof ResponseMessage ) { return ( ResponseMessage ) m ; } else if ( m instanceof TapAck ) { TapAck ack = ( TapAck ) m ; tapAck ( ack . getConn ( ) , ack . getNode ( ) , ack . getOpcode ( ) , ack . getOpaque ( ) , ack . getCallback ( ) ) ; return null ; } else { throw new RuntimeException ( "Unexpected tap message type" ) ; } } catch ( InterruptedException e ) { shutdown ( ) ; return null ; }
|
public class BitVector { /** * Factory method for creating a < tt > BitVector < / tt > instance
* wrapping the given byte data .
* @ param data a byte [ ] containing packed bits .
* @ param size Size to set the bit vector to
* @ return the newly created < tt > BitVector < / tt > instance . */
public static BitVector createBitVector ( byte [ ] data , int size ) { } }
|
BitVector bv = new BitVector ( data . length * 8 ) ; bv . setBytes ( data ) ; bv . size = size ; return bv ;
|
public class RuleProviderRegistry { /** * Gets the current instance of { @ link RuleProviderRegistry } . */
public static RuleProviderRegistry instance ( GraphRewrite event ) { } }
|
RuleProviderRegistry instance = ( RuleProviderRegistry ) event . getRewriteContext ( ) . get ( RuleProviderRegistry . class ) ; return instance ;
|
public class CommerceShippingFixedOptionLocalServiceUtil { /** * Creates a new commerce shipping fixed option with the primary key . Does not add the commerce shipping fixed option to the database .
* @ param commerceShippingFixedOptionId the primary key for the new commerce shipping fixed option
* @ return the new commerce shipping fixed option */
public static com . liferay . commerce . shipping . engine . fixed . model . CommerceShippingFixedOption createCommerceShippingFixedOption ( long commerceShippingFixedOptionId ) { } }
|
return getService ( ) . createCommerceShippingFixedOption ( commerceShippingFixedOptionId ) ;
|
public class CodepointHelper { /** * Verifies a sequence of codepoints using the specified profile
* @ param sStr
* String
* @ param eProfile
* profile to use */
public static void verify ( @ Nullable final String sStr , @ Nonnull final ECodepointProfile eProfile ) { } }
|
if ( sStr != null ) verify ( new CodepointIteratorCharSequence ( sStr ) , eProfile ) ;
|
public class TextAdapterActivity { /** * Typically you should not override this method . ConnectionPoolAdapter
* does override this with internal MDW logic .
* @ param errorCode
* @ throws ActivityException */
protected void handleConnectionException ( int errorCode , Throwable originalCause ) throws ActivityException { } }
|
InternalEvent message = InternalEvent . createActivityStartMessage ( getActivityId ( ) , getProcessInstanceId ( ) , getWorkTransitionInstanceId ( ) , getMasterRequestId ( ) , COMPCODE_AUTO_RETRY ) ; ScheduledEventQueue eventQueue = ScheduledEventQueue . getSingleton ( ) ; int retry_interval = this . getRetryInterval ( ) ; Date scheduledTime = new Date ( DatabaseAccess . getCurrentTime ( ) + retry_interval * 1000 ) ; super . loginfo ( "The activity failed, set to retry at " + StringHelper . dateToString ( scheduledTime ) ) ; eventQueue . scheduleInternalEvent ( ScheduledEvent . INTERNAL_EVENT_PREFIX + this . getActivityInstanceId ( ) , scheduledTime , message . toString ( ) , "procinst:" + this . getProcessInstanceId ( ) . toString ( ) ) ; this . setReturnCode ( COMPCODE_AUTO_RETRY ) ; // the above is to prevent engine from making transitions ( typically to exception handler )
throw new ActivityException ( errorCode , originalCause . getMessage ( ) , originalCause ) ;
|
public class WordDataWriter { /** * / * ( non - Javadoc )
* @ see jvntextpro . data . DataWriter # writeFile ( java . util . List , java . lang . String ) */
@ Override public void writeFile ( List lblSeqs , String filename ) { } }
|
String ret = writeString ( lblSeqs ) ; try { BufferedWriter out = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( filename ) , "UTF-8" ) ) ; out . write ( ret ) ; out . close ( ) ; } catch ( Exception e ) { }
|
public class SecurityUtil { /** * Encrypt a string using SHA
* @ param plaintext the original text
* @ return resultant hash */
public synchronized String encrypt ( String plaintext ) { } }
|
MessageDigest md = null ; try { md = MessageDigest . getInstance ( "SHA" ) ; md . update ( plaintext . getBytes ( UTF8 ) ) ; } catch ( Exception e ) { LOG . log ( Level . SEVERE , "Should not happen!" , e ) ; } byte raw [ ] = md . digest ( ) ; return encode ( raw ) ;
|
public class TopLevelDocumentToApiModelVisitor { /** * Recurse into the child documents converting and adding to the list .
* Several document types require special processing because they have
* split lists .
* @ param document the current document */
protected void addSplitAttributes ( final GedDocument < ? > document ) { } }
|
for ( final GedDocument < ? extends GedObject > attribute : document . getAttributes ( ) ) { final DocumentToApiModelVisitor v = createVisitor ( ) ; attribute . accept ( v ) ; ( ( ApiHasImages ) baseObject ) . addAttribute ( convertToAttribute ( v . getBaseObject ( ) ) ) ; }
|
public class Encoder { /** * Append length info . On success , store the result in " bits " . */
static void appendLengthInfo ( int numLetters , Version version , Mode mode , BitArray bits ) throws WriterException { } }
|
int numBits = mode . getCharacterCountBits ( version ) ; if ( numLetters >= ( 1 << numBits ) ) { throw new WriterException ( numLetters + " is bigger than " + ( ( 1 << numBits ) - 1 ) ) ; } bits . appendBits ( numLetters , numBits ) ;
|
public class Ranges { /** * Returns the value normalized within the range . It performs a linear
* transformation where the minimum value of the range becomes 0 while
* the maximum becomes 1.
* @ param range a range
* @ param value a value
* @ return the value transformed based on the range */
public static double normalize ( Range range , double value ) { } }
|
return normalize ( value , range . getMinimum ( ) . doubleValue ( ) , range . getMaximum ( ) . doubleValue ( ) ) ;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.