signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AllInvocationsFinder { /** * Gets all stubbings from mocks . Invocations are ordered earlier first . * @ param mocks mocks * @ return stubbings */ public static Set < Stubbing > findStubbings ( Iterable < ? > mocks ) { } }
Set < Stubbing > stubbings = new TreeSet < Stubbing > ( new StubbingComparator ( ) ) ; for ( Object mock : mocks ) { Collection < ? extends Stubbing > fromSingleMock = new DefaultMockingDetails ( mock ) . getStubbings ( ) ; stubbings . addAll ( fromSingleMock ) ; } return stubbings ;
public class PersonImpl { /** * Determines whether or not this person is a " guest " user . * < p > This person is a " guest " if both of the following are true : * < ol > * < li > This person ' s user name is listed as a guest user account . * < li > This person does not have a live instance ISecurityContext that states he / she has been * successfully authenticated . ( It can be either null or unauthenticated . ) * < / ol > * @ return < code > true < / code > If person is a guest , otherwise < code > false < / code > */ @ Override public boolean isGuest ( ) { } }
String userName = ( String ) getAttribute ( IPerson . USERNAME ) ; boolean isGuestUsername = PersonFactory . getGuestUsernames ( ) . contains ( userName ) ; boolean isAuthenticated = m_securityContext != null && m_securityContext . isAuthenticated ( ) ; return isGuestUsername && ! isAuthenticated ;
public class AbstractSessionManager { /** * Find sessions that have timed out and invalidate them . * This runs in the SessionScavenger thread . */ private void scavenge ( ) { } }
Thread thread = Thread . currentThread ( ) ; ClassLoader old_loader = thread . getContextClassLoader ( ) ; try { if ( _handler == null ) return ; ClassLoader loader = _handler . getClassLoader ( ) ; if ( loader != null ) thread . setContextClassLoader ( loader ) ; long now = System . currentTimeMillis ( ) ; // Since Hashtable enumeration is not safe over deletes , // we build a list of stale sessions , then go back and invalidate them Object stale = null ; synchronized ( AbstractSessionManager . this ) { // For each session for ( Iterator i = _sessions . values ( ) . iterator ( ) ; i . hasNext ( ) ; ) { Session session = ( Session ) i . next ( ) ; long idleTime = session . _maxIdleMs ; if ( idleTime > 0 && session . _accessed + idleTime < now ) { // Found a stale session , add it to the list stale = LazyList . add ( stale , session ) ; } } } // Remove the stale sessions for ( int i = LazyList . size ( stale ) ; i -- > 0 ; ) { // check it has not been accessed in the meantime Session session = ( Session ) LazyList . get ( stale , i ) ; long idleTime = session . _maxIdleMs ; if ( idleTime > 0 && session . _accessed + idleTime < System . currentTimeMillis ( ) ) { session . invalidate ( ) ; int nbsess = this . _sessions . size ( ) ; if ( nbsess < this . _minSessions ) this . _minSessions = nbsess ; } } } finally { thread . setContextClassLoader ( old_loader ) ; }
public class V1InstanceCreator { /** * Creates a new Regression Plan with title and project . * @ param name Title of the plan . * @ param project Project to assign . * @ param attributes Additional attributes for initialization Regression Plan . * @ return A newly minted Regression Plan that exists in the VersionOne system . */ public RegressionPlan regressionPlan ( String name , Project project , Map < String , Object > attributes ) { } }
RegressionPlan regressionPlan = new RegressionPlan ( instance ) ; regressionPlan . setName ( name ) ; regressionPlan . setProject ( project ) ; addAttributes ( regressionPlan , attributes ) ; regressionPlan . save ( ) ; return regressionPlan ;
public class ScanJob { /** * Returns false if cycle thread cannot be allocated */ private boolean initialzeScanHelper ( ) { } }
mScanHelper = new ScanHelper ( this ) ; mScanState = ScanState . restore ( ScanJob . this ) ; mScanState . setLastScanStartTimeMillis ( System . currentTimeMillis ( ) ) ; mScanHelper . setMonitoringStatus ( mScanState . getMonitoringStatus ( ) ) ; mScanHelper . setRangedRegionState ( mScanState . getRangedRegionState ( ) ) ; mScanHelper . setBeaconParsers ( mScanState . getBeaconParsers ( ) ) ; mScanHelper . setExtraDataBeaconTracker ( mScanState . getExtraBeaconDataTracker ( ) ) ; if ( mScanHelper . getCycledScanner ( ) == null ) { try { mScanHelper . createCycledLeScanner ( mScanState . getBackgroundMode ( ) , null ) ; } catch ( OutOfMemoryError e ) { LogManager . w ( TAG , "Failed to create CycledLeScanner thread." ) ; return false ; } } return true ;
public class ArraySuffix { /** * Gets the value of the index */ Object evaluateIndex ( Object pContext , VariableResolver pResolver , Map functions , String defaultPrefix , Logger pLogger ) throws ELException { } }
return mIndex . evaluate ( pContext , pResolver , functions , defaultPrefix , pLogger ) ;
public class Item { /** * Sets this item to a float item . * @ param floatVal * the value of this item . */ void set ( final float floatVal ) { } }
this . type = ClassWriter . FLOAT ; this . intVal = Float . floatToRawIntBits ( floatVal ) ; this . hashCode = 0x7FFFFFFF & ( type + ( int ) floatVal ) ;
public class JDBCResultSet { /** * < ! - - start generic documentation - - > * Updates the designated column with a < code > byte < / code > array value . * The updater methods are used to update column values in the * current row or the insert row . The updater methods do not * update the underlying database ; instead the < code > updateRow < / code > or * < code > insertRow < / code > methods are called to update the database . * < ! - - end generic documentation - - > * < ! - - start release - specific documentation - - > * < div class = " ReleaseSpecificDocumentation " > * < h3 > HSQLDB - Specific Information : < / h3 > < p > * HSQLDB supports this feature . < p > * < / div > * < ! - - end release - specific documentation - - > * @ param columnIndex the first column is 1 , the second is 2 , . . . * @ param x the new column value * @ exception SQLException if a database access error occurs , * the result set concurrency is < code > CONCUR _ READ _ ONLY < / code > * or this method is called on a closed result set * @ exception SQLFeatureNotSupportedException if the JDBC driver does not support * this method * @ since JDK 1.2 ( JDK 1.1 . x developers : read the overview for * JDBCResultSet ) */ public void updateBytes ( int columnIndex , byte [ ] x ) throws SQLException { } }
startUpdate ( columnIndex ) ; preparedStatement . setParameter ( columnIndex , x ) ;
public class TransitionSystem { /** * 转换动作为动作id * @ param act 动作 * @ return 动作类型的依存关系id */ int transform ( Action act ) { } }
int deprel = 0 ; int [ ] deprel_inference = new int [ ] { deprel } ; if ( ActionUtils . is_shift ( act ) ) { return 0 ; } else if ( ActionUtils . is_left_arc ( act , deprel_inference ) ) { deprel = deprel_inference [ 0 ] ; return 1 + deprel ; } else if ( ActionUtils . is_right_arc ( act , deprel_inference ) ) { deprel = deprel_inference [ 0 ] ; return L + 1 + deprel ; } else { System . err . printf ( "unknown transition in transform(Action): %d-%d" , act . name ( ) , act . rel ( ) ) ; } return - 1 ;
public class GitLabApiClient { /** * Perform an HTTP GET call with the specified query parameters and URL , returning * a ClientResponse instance with the data returned from the endpoint . * @ param queryParams multivalue map of request parameters * @ param url the fully formed path to the GitLab API endpoint * @ return a ClientResponse instance with the data returned from the endpoint */ protected Response get ( MultivaluedMap < String , String > queryParams , URL url ) { } }
return ( invocation ( url , queryParams ) . get ( ) ) ;
public class JobsInner { /** * Gets information about the jobs associated with the subscription . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; JobInner & gt ; object */ public Observable < Page < JobInner > > listAsync ( ) { } }
return listWithServiceResponseAsync ( ) . map ( new Func1 < ServiceResponse < Page < JobInner > > , Page < JobInner > > ( ) { @ Override public Page < JobInner > call ( ServiceResponse < Page < JobInner > > response ) { return response . body ( ) ; } } ) ;
public class SimpleDateFormat { /** * / * Initialize defaultCenturyStart and defaultCenturyStartYear by base time . * The default start time is 80 years before the creation time of this object . */ private void initializeDefaultCenturyStart ( long baseTime ) { } }
defaultCenturyBase = baseTime ; // clone to avoid messing up date stored in calendar object // when this method is called while parsing Calendar tmpCal = ( Calendar ) calendar . clone ( ) ; tmpCal . setTimeInMillis ( baseTime ) ; tmpCal . add ( Calendar . YEAR , - 80 ) ; defaultCenturyStart = tmpCal . getTime ( ) ; defaultCenturyStartYear = tmpCal . get ( Calendar . YEAR ) ;
public class StoredPaymentChannelServerStates { /** * < p > Closes the given channel using { @ link ServerConnectionEventHandler # closeChannel ( ) } and * { @ link PaymentChannelV1ServerState # close ( ) } to notify any connected client of channel closure and to complete and * broadcast the latest payment transaction . < / p > * < p > Removes the given channel from this set of { @ link StoredServerChannel } s and notifies the wallet of a change to * this wallet extension . < / p > */ public void closeChannel ( StoredServerChannel channel ) { } }
lock . lock ( ) ; try { if ( mapChannels . remove ( channel . contract . getTxId ( ) ) == null ) return ; } finally { lock . unlock ( ) ; } synchronized ( channel ) { channel . closeConnectedHandler ( ) ; try { TransactionBroadcaster broadcaster = getBroadcaster ( ) ; channel . getOrCreateState ( wallet , broadcaster ) . close ( ) ; } catch ( InsufficientMoneyException e ) { log . error ( "Exception when closing channel" , e ) ; } catch ( VerificationException e ) { log . error ( "Exception when closing channel" , e ) ; } channel . state = null ; } updatedChannel ( channel ) ;
public class SuggestedAdUnit { /** * Sets the targetWindow value for this SuggestedAdUnit . * @ param targetWindow * The { @ code target } attribute of the underlying ad tag , as defined * in the { @ link AdUnit } . This * attribute is read - only and is populated by Google . */ public void setTargetWindow ( com . google . api . ads . admanager . axis . v201811 . AdUnitTargetWindow targetWindow ) { } }
this . targetWindow = targetWindow ;
public class Karyon { /** * Creates a new { @ link KaryonServer } which combines lifecycle of the passed WebSockets { @ link RxServer } with * it ' s own lifecycle . * @ param server WebSocket server * @ param modules Additional bootstrapModules if any . * @ return { @ link KaryonServer } which is to be used to start the created server . */ public static KaryonServer forWebSocketServer ( RxServer < ? extends WebSocketFrame , ? extends WebSocketFrame > server , Module ... modules ) { } }
return forWebSocketServer ( server , toBootstrapModule ( modules ) ) ;
public class TaskLockbox { /** * Attempt to acquire a lock for a task , without removing it from the queue . Can safely be called multiple times on * the same task until the lock is preempted . * @ param lockType type of lock to be acquired * @ param task task that wants a lock * @ param interval interval to lock * @ return { @ link LockResult } containing a new or an existing lock if succeeded . Otherwise , { @ link LockResult } with a * { @ link LockResult # revoked } flag . * @ throws IllegalStateException if the task is not a valid active task */ public LockResult tryLock ( final TaskLockType lockType , final Task task , final Interval interval ) { } }
giant . lock ( ) ; try { if ( ! activeTasks . contains ( task . getId ( ) ) ) { throw new ISE ( "Unable to grant lock to inactive Task [%s]" , task . getId ( ) ) ; } Preconditions . checkArgument ( interval . toDurationMillis ( ) > 0 , "interval empty" ) ; final TaskLockPosse posseToUse = createOrFindLockPosse ( task , interval , lockType ) ; if ( posseToUse != null && ! posseToUse . getTaskLock ( ) . isRevoked ( ) ) { // Add to existing TaskLockPosse , if necessary if ( posseToUse . addTask ( task ) ) { log . info ( "Added task[%s] to TaskLock[%s]" , task . getId ( ) , posseToUse . getTaskLock ( ) . getGroupId ( ) ) ; // Update task storage facility . If it fails , revoke the lock . try { taskStorage . addLock ( task . getId ( ) , posseToUse . getTaskLock ( ) ) ; return LockResult . ok ( posseToUse . getTaskLock ( ) ) ; } catch ( Exception e ) { log . makeAlert ( "Failed to persist lock in storage" ) . addData ( "task" , task . getId ( ) ) . addData ( "dataSource" , posseToUse . getTaskLock ( ) . getDataSource ( ) ) . addData ( "interval" , posseToUse . getTaskLock ( ) . getInterval ( ) ) . addData ( "version" , posseToUse . getTaskLock ( ) . getVersion ( ) ) . emit ( ) ; unlock ( task , interval ) ; return LockResult . fail ( false ) ; } } else { log . info ( "Task[%s] already present in TaskLock[%s]" , task . getId ( ) , posseToUse . getTaskLock ( ) . getGroupId ( ) ) ; return LockResult . ok ( posseToUse . getTaskLock ( ) ) ; } } else { final boolean lockRevoked = posseToUse != null && posseToUse . getTaskLock ( ) . isRevoked ( ) ; return LockResult . fail ( lockRevoked ) ; } } finally { giant . unlock ( ) ; }
public class DdosProtectionPlansInner { /** * Creates or updates a DDoS protection plan . * @ param resourceGroupName The name of the resource group . * @ param ddosProtectionPlanName The name of the DDoS protection plan . * @ param parameters Parameters supplied to the create or update operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < DdosProtectionPlanInner > createOrUpdateAsync ( String resourceGroupName , String ddosProtectionPlanName , DdosProtectionPlanInner parameters ) { } }
return createOrUpdateWithServiceResponseAsync ( resourceGroupName , ddosProtectionPlanName , parameters ) . map ( new Func1 < ServiceResponse < DdosProtectionPlanInner > , DdosProtectionPlanInner > ( ) { @ Override public DdosProtectionPlanInner call ( ServiceResponse < DdosProtectionPlanInner > response ) { return response . body ( ) ; } } ) ;
public class DenialOfServiceTaf { /** * Return of " True " means IP has been added . * Return of " False " means IP already added . * @ param ip * @ return */ public static synchronized boolean denyIP ( String ip ) { } }
boolean rv = false ; if ( deniedIP == null ) { deniedIP = new HashMap < String , Counter > ( ) ; deniedIP . put ( ip , new Counter ( ip ) ) ; // Noted duplicated for minimum time spent rv = true ; } else if ( deniedIP . get ( ip ) == null ) { deniedIP . put ( ip , new Counter ( ip ) ) ; rv = true ; } if ( rv ) { writeIP ( ) ; } return rv ;
public class Try { /** * { @ inheritDoc } */ @ Override public < U extends Throwable > Try < U , A > biMapL ( Function < ? super T , ? extends U > fn ) { } }
return ( Try < U , A > ) BoundedBifunctor . super . < U > biMapL ( fn ) ;
public class DeleteVpcEndpointConnectionNotificationsRequest { /** * One or more notification IDs . * @ param connectionNotificationIds * One or more notification IDs . */ public void setConnectionNotificationIds ( java . util . Collection < String > connectionNotificationIds ) { } }
if ( connectionNotificationIds == null ) { this . connectionNotificationIds = null ; return ; } this . connectionNotificationIds = new com . amazonaws . internal . SdkInternalList < String > ( connectionNotificationIds ) ;
public class PipeConnectionEvent { /** * Setter for event parameters map * @ param paramMap * Event parameters as Map */ public void setParamMap ( Map < String , Object > paramMap ) { } }
if ( paramMap != null && ! paramMap . isEmpty ( ) ) { this . paramMap . putAll ( paramMap ) ; }
public class CollUtil { /** * 其中一个集合在另一个集合中是否至少包含一个元素 , 既是两个集合是否至少有一个共同的元素 * @ param coll1 集合1 * @ param coll2 集合2 * @ return 其中一个集合在另一个集合中是否至少包含一个元素 * @ since 2.1 * @ see # intersection */ public static boolean containsAny ( Collection < ? > coll1 , Collection < ? > coll2 ) { } }
if ( isEmpty ( coll1 ) || isEmpty ( coll2 ) ) { return false ; } if ( coll1 . size ( ) < coll2 . size ( ) ) { for ( Object object : coll1 ) { if ( coll2 . contains ( object ) ) { return true ; } } } else { for ( Object object : coll2 ) { if ( coll1 . contains ( object ) ) { return true ; } } } return false ;
public class MemberConfig { /** * Sets the member identifier . * @ param id the member identifier * @ return the member configuration */ public MemberConfig setId ( MemberId id ) { } }
this . id = id != null ? id : MemberId . anonymous ( ) ; return this ;
public class SinkQuerySegmentWalker { /** * Decorates a Sink ' s query runner to emit query / segmentAndCache / time , query / segment / time , query / wait / time once * each for the whole Sink . Also adds CPU time to cpuTimeAccumulator . */ private < T > QueryRunner < T > withPerSinkMetrics ( final QueryRunner < T > sinkRunner , final QueryToolChest < T , ? extends Query < T > > queryToolChest , final SegmentId sinkSegmentId , final AtomicLong cpuTimeAccumulator ) { } }
// Note : reportSegmentAndCacheTime and reportSegmentTime are effectively the same here . They don ' t split apart // cache vs . non - cache due to the fact that Sinks may be partially cached and partially uncached . Making this // better would need to involve another accumulator like the cpuTimeAccumulator that we could share with the // sinkRunner . String sinkSegmentIdString = sinkSegmentId . toString ( ) ; return CPUTimeMetricQueryRunner . safeBuild ( new MetricsEmittingQueryRunner < > ( emitter , queryToolChest , new MetricsEmittingQueryRunner < > ( emitter , queryToolChest , sinkRunner , QueryMetrics :: reportSegmentTime , queryMetrics -> queryMetrics . segment ( sinkSegmentIdString ) ) , QueryMetrics :: reportSegmentAndCacheTime , queryMetrics -> queryMetrics . segment ( sinkSegmentIdString ) ) . withWaitMeasuredFromNow ( ) , queryToolChest , emitter , cpuTimeAccumulator , false ) ;
public class TextResponseParser { /** * { @ inheritDoc } */ @ Override protected final void deserializeBytes1to3 ( final int line ) throws InternetSCSIException { } }
continueFlag = Utils . isBitSet ( line & Constants . CONTINUE_FLAG_MASK ) ; // all bits are reserved , except the continue flag bit Utils . isReserved ( line & ( Constants . LAST_THREE_BYTES_MASK ^ Constants . CONTINUE_FLAG_MASK ) ) ;
public class EnumParameterMapper { /** * { @ inheritDoc } * @ see jp . co . future . uroborosql . parameter . mapper . BindParameterMapper # toJdbc ( java . lang . Object , java . sql . Connection , jp . co . future . uroborosql . parameter . mapper . BindParameterMapperManager ) */ @ Override public Object toJdbc ( final Enum < ? > original , final Connection connection , final BindParameterMapperManager parameterMapperManager ) { } }
return original . toString ( ) ;
public class AvroEntityComposer { /** * Initialize the AvroRecordBuilderFactories for all keyAsColumn mapped fields * that are record types . We need to be able to get record builders for these * since the records are broken across many columns , and need to be * constructed by the composer . */ private void initRecordBuilderFactories ( ) { } }
for ( FieldMapping fieldMapping : avroSchema . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { String fieldName = fieldMapping . getFieldName ( ) ; Schema fieldSchema = avroSchema . getAvroSchema ( ) . getField ( fieldName ) . schema ( ) ; Schema . Type fieldSchemaType = fieldSchema . getType ( ) ; if ( fieldSchemaType == Schema . Type . RECORD ) { AvroRecordBuilderFactory < E > factory = buildAvroRecordBuilderFactory ( fieldSchema ) ; kacRecordBuilderFactories . put ( fieldName , factory ) ; } } }
public class ProcUrl { /** * Prepare URL ' s prefixes . * @ return list of schema prefixes and local prefixes if local URL are allowed . */ private List < String > preparePrefixes ( ) { } }
// Prepare prefixes for all schemas List < String > prefixes = new ArrayList < String > ( Schema . values ( ) . length + ( local ? 3 : 0 ) ) ; for ( Schema schema : Schema . values ( ) ) { prefixes . add ( schema . getPrefix ( ) ) ; } // For local URls prefixes are " . / " , " . . / " , " / " if ( local ) { Collections . addAll ( prefixes , LOCAL_PREFIXES ) ; } return prefixes ;
public class EmbeddedHandler { /** * / * ( non - Javadoc ) * @ see net . roboconf . target . api . AbstractThreadedTargetHandler * # machineConfigurator ( net . roboconf . target . api . TargetHandlerParameters , java . lang . String ) */ @ Override public MachineConfigurator machineConfigurator ( TargetHandlerParameters parameters , String machineId ) throws TargetException { } }
// Configure the machine only if there is an IP address MachineConfigurator configurator = null ; // Retrieve the IP address String ip = this . machineIdToIp . get ( machineId ) ; if ( ! Utils . isEmptyOrWhitespaces ( ip ) ) { configurator = new ConfiguratorOnCreation ( parameters , ip , machineId , this ) ; } return configurator ;
public class RawClientAlert { /** * set up alert rules */ public static void configure ( DatabaseClient client ) throws IOException { } }
// create a manager for configuring rules RuleManager ruleMgr = client . newRuleManager ( ) ; // specify a rule in raw XML ( raw JSON is also supported // as well as a POJO rule definition ) String rawRule = "<rapi:rule xmlns:rapi='http://marklogic.com/rest-api'>" + "<rapi:name>" + RULE_NAME + "</rapi:name>" + "<rapi:description>industry of Real Estate</rapi:description>" + "<search:search " + "xmlns:search='http://marklogic.com/appservices/search'>" + "<search:query>" + "<search:value-constraint-query>" + "<search:constraint-name>industry</search:constraint-name>" + "<search:text>Real Estate</search:text>" + "</search:value-constraint-query>" + "</search:query>" + "<search:options>" + "<search:constraint name='industry'>" + "<search:value>" + "<search:element name='industry' ns=''/>" + "</search:value>" + "</search:constraint>" + "</search:options>" + "</search:search>" + "<rapi:rule-metadata>" + "<correlate-with>/demographic-statistics?zipcode=</correlate-with>" + "</rapi:rule-metadata>" + "</rapi:rule>" ; // create a handle for writing the rule StringHandle writeHandle = new StringHandle ( rawRule ) ; // write the rule to the database ruleMgr . writeRule ( RULE_NAME , writeHandle ) ;
public class ByteArraySerializer { /** * ( non - Javadoc ) * @ see Serializer # canRead ( java . lang . String ) */ @ Override public final boolean canRead ( MediaType mimeType , Class < ? > resultType ) { } }
return mimeType . is ( MediaType . ANY_TYPE ) && byte [ ] . class . equals ( resultType ) ;
public class DVWCWordsiMain { /** * { @ inheritDoc } */ protected void handleExtraOptions ( ) { } }
// If the - L option is given , load the basis mapping from disk . if ( argOptions . hasOption ( 'L' ) ) { basis = loadObject ( openLoadFile ( ) ) ; basis . setReadOnly ( true ) ; } else basis = new StringBasisMapping ( ) ;
public class SortedSetSubject { /** * Fails if the subject does not have the given first element . */ public void hasFirstElement ( @ NullableDecl Object element ) { } }
if ( actualAsNavigableSet ( ) . isEmpty ( ) ) { failWithActual ( "expected to have first element" , element ) ; return ; } if ( ! Objects . equal ( actualAsNavigableSet ( ) . first ( ) , element ) ) { if ( actualAsNavigableSet ( ) . contains ( element ) ) { failWithoutActual ( simpleFact ( lenientFormat ( "Not true that %s has first element <%s>. " + "It does contain this element, but the first element is <%s>" , actualAsString ( ) , element , actualAsNavigableSet ( ) . first ( ) ) ) ) ; return ; } failWithoutActual ( simpleFact ( lenientFormat ( "Not true that %s has first element <%s>. " + "It does not contain this element, and the first element is <%s>" , actualAsString ( ) , element , actualAsNavigableSet ( ) . first ( ) ) ) ) ; }
public class DocumentPermission { /** * Create a DocumentPermissionDeleter to execute delete . * @ param pathServiceSid Sync Service Instance SID or unique name . * @ param pathDocumentSid Sync Document SID or unique name . * @ param pathIdentity Identity of the user to whom the Sync Document * Permission applies . * @ return DocumentPermissionDeleter capable of executing the delete */ public static DocumentPermissionDeleter deleter ( final String pathServiceSid , final String pathDocumentSid , final String pathIdentity ) { } }
return new DocumentPermissionDeleter ( pathServiceSid , pathDocumentSid , pathIdentity ) ;
public class Http2Channel { /** * Creates a response stream to respond to the initial HTTP upgrade * @ return */ public Http2HeadersStreamSinkChannel createInitialUpgradeResponseStream ( ) { } }
if ( lastGoodStreamId != 0 ) { throw new IllegalStateException ( ) ; } lastGoodStreamId = 1 ; Http2HeadersStreamSinkChannel stream = new Http2HeadersStreamSinkChannel ( this , 1 ) ; StreamHolder streamHolder = new StreamHolder ( stream ) ; streamHolder . sourceClosed = true ; currentStreams . put ( 1 , streamHolder ) ; receiveConcurrentStreamsAtomicUpdater . getAndIncrement ( this ) ; return stream ;
public class WildcardMatch { /** * Matches a string against a pattern with wildcards . Two wildcard types * are supported : single character match ( defaults to ' ? ' ) and ANY * character match ( ' * ' ) , matching any count of characters including 0. * Wildcard characters may be escaped by an escape character , which * defaults to ' \ ' . * @ param s The string , in which the search should be performed . * @ param pattern The search pattern string including wildcards . * @ return true , if string ' s ' matches ' pattern ' . */ public boolean match ( String s , String pattern ) { } }
preceededByMultipleChar = false ; isEscaped = false ; if ( ! caseSensitive ) { pattern = pattern . toLowerCase ( ) ; s = s . toLowerCase ( ) ; } int offset = 0 ; while ( true ) { String ps = getNextSubString ( pattern ) ; int len = ps . length ( ) ; pattern = pattern . substring ( len + escCnt ) ; if ( len > 0 && isWildcard ( ps . charAt ( 0 ) ) && escCnt == 0 ) { offset = getWildcardOffset ( ps . charAt ( 0 ) ) ; if ( isSingleWildcardChar ( ps . charAt ( 0 ) ) ) { s = s . substring ( 1 ) ; // This is not yet enough : If a ' * ' precedes ' ? ' , ' s ' might be SHORTER // than seen here , for this we need preceededByMultipleChar variable . . . } if ( pattern . length ( ) == 0 ) { return s . length ( ) <= offset || preceededByMultipleChar ; } } else { int idx = s . indexOf ( ps ) ; if ( idx < 0 || ( idx > offset && ! preceededByMultipleChar ) ) { return false ; } s = s . substring ( idx + len ) ; preceededByMultipleChar = false ; } if ( pattern . length ( ) == 0 ) { return ( s . length ( ) == 0 ) ; } }
public class UnpackedBytecodeCallback { /** * ( non - Javadoc ) * @ see * edu . umd . cs . findbugs . ba . BytecodeScanner . Callback # handleInstruction ( int , * int ) */ @ Override public void handleInstruction ( int opcode , int index ) { } }
bytecodeSet . set ( opcode ) ; offsetToOpcodeMap [ index ] = ( short ) opcode ;
public class MenuBuilder { /** * Called when an item is added or removed . * @ param structureChanged true if the menu structure changed , * false if only item properties changed . * ( Visibility is a structural property since it affects layout . ) */ void onItemsChanged ( boolean structureChanged ) { } }
if ( ! mPreventDispatchingItemsChanged ) { if ( structureChanged ) { mIsVisibleItemsStale = true ; mIsActionItemsStale = true ; } dispatchPresenterUpdate ( structureChanged ) ; } else { mItemsChangedWhileDispatchPrevented = true ; }
public class HashExtensions { /** * Hashes the given { @ link byte [ ] } object with the given parameters . * @ param hashIt * the hash it * @ param hashAlgorithm * the hash algorithm * @ return the generated { @ link String } object * @ throws NoSuchAlgorithmException * is thrown if instantiation of the MessageDigest object fails . */ public static byte [ ] hash ( final byte [ ] hashIt , final HashAlgorithm hashAlgorithm ) throws NoSuchAlgorithmException { } }
return hash ( hashIt , null , hashAlgorithm , null ) ;
public class StashReader { /** * Gets the metadata for a single table in this stash . This is similar to getting the splits for the table * except that it exposes lower level information about the underlying S3 files . For clients who will use * their own system for reading the files from S3 , such as source files for a map - reduce job , this method provides * the necessary information . For simply iterating over the stash contents using either { @ link # scan ( String ) } * or { @ link # getSplits ( String ) } in conjunction with { @ link # getSplit ( StashSplit ) } is preferred . */ public StashTableMetadata getTableMetadata ( String table ) throws StashNotAvailableException , TableNotStashedException { } }
ImmutableList . Builder < StashFileMetadata > filesBuilder = ImmutableList . builder ( ) ; Iterator < S3ObjectSummary > objectSummaries = getS3ObjectSummariesForTable ( table ) ; while ( objectSummaries . hasNext ( ) ) { S3ObjectSummary objectSummary = objectSummaries . next ( ) ; filesBuilder . add ( new StashFileMetadata ( _bucket , objectSummary . getKey ( ) , objectSummary . getSize ( ) ) ) ; } List < StashFileMetadata > files = filesBuilder . build ( ) ; // Get the prefix arbitrarily from the first file . String prefix = files . get ( 0 ) . getKey ( ) ; prefix = prefix . substring ( 0 , prefix . lastIndexOf ( '/' ) + 1 ) ; return new StashTableMetadata ( _bucket , prefix , table , files ) ;
public class ModelStoreConfig { /** * all the capacities will be used to initialize HashMaps so they should be powers of two */ private void ensurePowerOfTwo ( String parameter , int number ) { } }
if ( Integer . bitCount ( number ) > 1 ) { LOG . warn ( "Parameter {} should be power of two but was {}" , parameter , number ) ; }
public class BusItinerary { /** * Replies the list of the bus halts of the bus itinerary . * @ return a list of bus halts */ @ Pure public Iterable < BusItineraryHalt > busHalts ( ) { } }
final MultiCollection < BusItineraryHalt > halts = new MultiCollection < > ( ) ; halts . addCollection ( this . validHalts ) ; halts . addCollection ( this . invalidHalts ) ; return Collections . unmodifiableCollection ( halts ) ;
public class ST_Split { /** * Splits a MultilineString using a point . * @ param multiLineString * @ param pointToSplit * @ param tolerance * @ return */ private static MultiLineString splitMultiLineStringWithPoint ( MultiLineString multiLineString , Point pointToSplit , double tolerance ) { } }
ArrayList < LineString > linestrings = new ArrayList < LineString > ( ) ; boolean notChanged = true ; int nb = multiLineString . getNumGeometries ( ) ; for ( int i = 0 ; i < nb ; i ++ ) { LineString subGeom = ( LineString ) multiLineString . getGeometryN ( i ) ; LineString [ ] result = splitLineStringWithPoint ( subGeom , pointToSplit , tolerance ) ; if ( result != null ) { Collections . addAll ( linestrings , result ) ; notChanged = false ; } else { linestrings . add ( subGeom ) ; } } if ( ! notChanged ) { return FACTORY . createMultiLineString ( linestrings . toArray ( new LineString [ 0 ] ) ) ; } return null ;
public class PowerMockito { /** * Verifies certain behavior < b > happened once < / b > * Alias to < code > verifyNew ( mockClass , times ( 1 ) ) < / code > E . g : * < pre > * verifyNew ( ClassWithStaticMethod . class ) ; * < / pre > * Above is equivalent to : * < pre > * verifyNew ( ClassWithStaticMethod . class , times ( 1 ) ) ; * < / pre > * @ param mock Class mocked by PowerMock . */ @ SuppressWarnings ( "unchecked" ) public static synchronized < T > ConstructorArgumentsVerification verifyNew ( Class < T > mock ) { } }
return verifyNew ( mock , times ( 1 ) ) ;
public class Util { /** * Returns the proposition id for an extended proposition definition . * @ param extendedProposition an ExtendedProposition . * @ return a proposition id { @ link String } . */ private static String propositionId ( Instance extendedProposition ) { } }
Instance proposition = ( Instance ) extendedProposition . getOwnSlotValue ( extendedProposition . getKnowledgeBase ( ) . getSlot ( "proposition" ) ) ; if ( proposition . hasType ( proposition . getKnowledgeBase ( ) . getCls ( "ConstantParameter" ) ) ) { throw new IllegalStateException ( "Constant parameters are not yet supported as " + "components of a high level abstraction definition." ) ; } else { return proposition . getName ( ) ; }
public class XMLTextStruct { /** * used only with java 7 , do not set @ Override */ public Text replaceWholeText ( String content ) throws DOMException { } }
Text oldText = text ; Document doc = XMLUtil . getDocument ( text ) ; Text newText = doc . createTextNode ( content ) ; Node parent = oldText . getParentNode ( ) ; parent . replaceChild ( XMLCaster . toRawNode ( newText ) , XMLCaster . toRawNode ( oldText ) ) ; return oldText ;
public class RuleSessionImpl { /** * / * ( non - Javadoc ) * @ see nz . co . senanque . rules . RuleSession # getMessage ( java . lang . String , java . lang . Object [ ] ) */ public String getMessage ( String message , Object [ ] args ) { } }
for ( int i = 0 ; i < args . length ; i ++ ) { if ( args [ i ] instanceof List < ? > ) { @ SuppressWarnings ( "unchecked" ) List < ProxyField > list = ( List < ProxyField > ) args [ i ] ; StringBuilder sb = new StringBuilder ( ) ; for ( ProxyField pf : list ) { sb . append ( String . valueOf ( pf . getValue ( ) ) ) ; sb . append ( "," ) ; } if ( sb . length ( ) > 0 ) { args [ i ] = sb . substring ( 0 , sb . length ( ) - 1 ) ; } else { args [ i ] = "" ; } } } return m_messageSourceAccessor . getMessage ( message , args ) ;
public class AsyncSocketChannelHelper { /** * Requests an asynchronous read from an < code > AsyncSocketChannel < / code > * to an array of direct ByteBuffers . * The byte buffers in the array must be direct byte buffers created using { @ link java . nio . ByteBuffer # allocateDirect ( int ) } . The array of byte * buffers can contain < code > null < / code > entries . Processing of the * array will terminate at the first < code > null < / code > entry . * Any entries after the < code > null < / code > entry are ignored . * The method returns immediately with a < em > future < / em > object . * Applications can invoke methods on the future object to determine the * completion status of the read , including registering a listener to be * called back when the operation completes . The completed future * also reports the total number of bytes read , or if the read failed , * the reason for failure as an < code > Exception < / code > . * The asynchronous IO subsystem retains logical control of the array of * byte buffers until the read completes . Clients should not attempt to * read or modify the byte buffers until the system indicates that the * operation is finished by marking the future completed . * Only one read operation can be in progress on this channel at a time . * No further reads will succeed on this channel until this read * completes . Attempts to read on the channel before an outstanding read * has completed will return a completed future with an * < code > IOPendingException < / code > . * The read operation uses the array of byte buffers as follows : * < ul > * < li > Starting with the first byte buffer in the array , data is read * into the buffer starting at the buffer ' s initial < code > buffer . position ( ) * < / code > . < / li > * < li > A maximum of < code > buffer . remaining ( ) < / code > bytes are read into * the buffer . < / li > * < li > The buffer ' s < code > buffer . position ( ) < / code > is incremented by the * number of bytes read . < / li > * < li > The buffer ' s limit is left unchanged . < / li > * < li > This is repeated for each buffer in turn , either until all the * data available from the channel has been read or until all the buffers * have been filled . < / li > * < / ul > * If the operation fails in some way , the future will return an * Exception . Some of the byte buffers may get modified in these * circumstances . It is not possible to be sure that a failing operation * will not change the contents of some of the buffers . * Example usage is : * < pre > * ByteBuffer [ ] bufarray = new ByteBuffer [ MAXBUFS ] ; * for ( int i = 0 , i < MAXBUFS ; i + + ) { * bufarray [ i ] = ByteBuffer . allocateDirect ( 1024 ) ; * IAsyncFuture readFuture = channel . read ( bufarray ) ; * . . . & lt ; do some useful work & gt ; . . . * try { * / / bytesRead is the total number of bytes read across all buffers * long bytesRead = readFuture . getByteCount ( ) ; * } catch ( AsyncException ex ) { * . . . the IO operation failed . . . * < / pre > * @ param bufs * the array of < code > ByteBuffers < / code > to read into . * The array must be non - null . * The first entry in the array must be non - null . * The second or subsequent entries in the array may be null . The read operation uses all of the entries in the * array before the first null entry . Any entries after the first null entry are ignored . * @ param forceQueue * @ param bytesRequested * @ param useJITBuffer * @ param vci * @ param asyncIO * @ return an < code > IAsyncFuture < / code > which acts as a placeholder for the asynchronous operation * @ throws IllegalArgumentException if the preconditions on the bufs parameter are not met * @ see IAsyncFuture */ public IAsyncFuture read ( ByteBuffer [ ] bufs , boolean forceQueue , long bytesRequested , boolean useJITBuffer , VirtualConnection vci , boolean asyncIO ) { } }
return this . schannel . multiIO ( bufs , 0 , true , forceQueue , bytesRequested , useJITBuffer , vci , asyncIO ) ;
public class JNRPEServer { /** * Prints the JNRPE Server usage and , eventually , the error about the last * invocation . * @ param e * The last error . Can be null . */ @ SuppressWarnings ( "unchecked" ) private static void printUsage ( final Exception e ) { } }
printVersion ( ) ; if ( e != null ) { System . out . println ( e . getMessage ( ) + "\n" ) ; } HelpFormatter hf = new HelpFormatter ( ) ; StringBuilder sbDivider = new StringBuilder ( "=" ) ; while ( sbDivider . length ( ) < hf . getPageWidth ( ) ) { sbDivider . append ( "=" ) ; } // DISPLAY SETTING hf . getDisplaySettings ( ) . clear ( ) ; hf . getDisplaySettings ( ) . add ( DisplaySetting . DISPLAY_GROUP_EXPANDED ) ; hf . getDisplaySettings ( ) . add ( DisplaySetting . DISPLAY_PARENT_CHILDREN ) ; // USAGE SETTING hf . getFullUsageSettings ( ) . clear ( ) ; hf . getFullUsageSettings ( ) . add ( DisplaySetting . DISPLAY_PARENT_ARGUMENT ) ; hf . getFullUsageSettings ( ) . add ( DisplaySetting . DISPLAY_ARGUMENT_BRACKETED ) ; hf . getFullUsageSettings ( ) . add ( DisplaySetting . DISPLAY_PARENT_CHILDREN ) ; hf . getFullUsageSettings ( ) . add ( DisplaySetting . DISPLAY_GROUP_EXPANDED ) ; hf . setDivider ( sbDivider . toString ( ) ) ; hf . setGroup ( configureCommandLine ( ) ) ; hf . print ( ) ; System . exit ( 0 ) ;
public class GetIntegrationResponseRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetIntegrationResponseRequest getIntegrationResponseRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getIntegrationResponseRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getIntegrationResponseRequest . getRestApiId ( ) , RESTAPIID_BINDING ) ; protocolMarshaller . marshall ( getIntegrationResponseRequest . getResourceId ( ) , RESOURCEID_BINDING ) ; protocolMarshaller . marshall ( getIntegrationResponseRequest . getHttpMethod ( ) , HTTPMETHOD_BINDING ) ; protocolMarshaller . marshall ( getIntegrationResponseRequest . getStatusCode ( ) , STATUSCODE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class BaseDfuImpl { /** * Waits until the notification will arrive . Returns the data returned by the notification . * This method will block the thread until response is not ready or the device gets disconnected . * If connection state will change , or an error will occur , an exception will be thrown . * @ return the value returned by the Control Point notification * @ throws DeviceDisconnectedException Thrown when the device will disconnect in the middle of * the transmission . * @ throws DfuException Thrown if DFU error occur . * @ throws UploadAbortedException Thrown if DFU operation was aborted by user . */ byte [ ] readNotificationResponse ( ) throws DeviceDisconnectedException , DfuException , UploadAbortedException { } }
// do not clear the mReceiveData here . The response might already be obtained . Clear it in write request instead . try { synchronized ( mLock ) { while ( ( mReceivedData == null && mConnected && mError == 0 && ! mAborted ) || mPaused ) mLock . wait ( ) ; } } catch ( final InterruptedException e ) { loge ( "Sleeping interrupted" , e ) ; } if ( mAborted ) throw new UploadAbortedException ( ) ; if ( ! mConnected ) throw new DeviceDisconnectedException ( "Unable to write Op Code: device disconnected" ) ; if ( mError != 0 ) throw new DfuException ( "Unable to write Op Code" , mError ) ; return mReceivedData ;
public class StepStartedEvent { /** * Sets name , status , start time and title to specified step * @ param step which will be changed */ @ Override public void process ( Step step ) { } }
step . setName ( getName ( ) ) ; step . setStatus ( Status . PASSED ) ; step . setStart ( System . currentTimeMillis ( ) ) ; step . setTitle ( getTitle ( ) ) ;
public class FieldSet { /** * Useful for implementing * { @ link Message # getField ( Descriptors . FieldDescriptor ) } . This method * returns { @ code null } if the field is not set ; in this case it is up * to the caller to fetch the field ' s default value . */ public Object getField ( final FieldDescriptorType descriptor ) { } }
Object o = fields . get ( descriptor ) ; if ( o instanceof LazyField ) { return ( ( LazyField ) o ) . getValue ( ) ; } return o ;
public class EnforcementJobRestEntity { /** * Creates a new enforcement * < pre > * POST / enforcements * Request : * POST / agreements HTTP / 1.1 * Accept : application / xml or application / json * Response : * { @ code * < ? xml version = " 1.0 " encoding = " UTF - 8 " standalone = " yes " ? > * < message code = " 201 " message = " The enforcementJob has been stored successfully in the SLA Repository Database " / > * < / pre > * Example : < li > curl - H " Content - type : application / xml " - X POST - d @ enforcement . xml localhost : 8080 / sla - service / enforcements < / li > * @ param id of the agreement * @ return XML information with the different details of the agreement */ @ POST public Response createEnforcementJob ( @ Context UriInfo uriInfo , @ RequestBody EnforcementJob enforcementJob ) throws ConflictException , InternalException , NotFoundException { } }
logger . debug ( "StartOf createEnforcementJob - REQUEST Insert /enforcement" ) ; EnforcementJobHelperE enforcementJobHelper = getHelper ( ) ; String id , location = null ; try { id = enforcementJobHelper . createEnforcementJob ( enforcementJob ) ; location = buildResourceLocation ( uriInfo . getAbsolutePath ( ) . toString ( ) , id ) ; } catch ( DBExistsHelperException e ) { logger . info ( "createEnforcementJob ConflictException:" + e . getMessage ( ) ) ; throw new ConflictException ( e . getMessage ( ) ) ; } catch ( InternalHelperException e ) { logger . info ( "createEnforcementJob InternalException:" , e ) ; throw new InternalException ( e . getMessage ( ) ) ; } catch ( DBMissingHelperException e ) { logger . info ( "createEnforcementJob DBMissingHelperException:" + e . getMessage ( ) ) ; throw new NotFoundException ( e . getMessage ( ) ) ; } logger . debug ( "EndOf createEnforcementJob" ) ; return buildResponsePOST ( HttpStatus . CREATED , createMessage ( HttpStatus . CREATED , id , "The enforcementJob has been stored successfully in the SLA Repository Database" ) , location ) ;
public class ActivationELU { /** * = alpha * exp ( x ) ; x < 0 * f ' ( x ) * = 1 ; x > = 0 */ @ Override public Pair < INDArray , INDArray > backprop ( INDArray in , INDArray epsilon ) { } }
assertShape ( in , epsilon ) ; // no support in ELU native to override alpha if ( alpha != 1.00 ) { INDArray dLdz = Nd4j . getExecutioner ( ) . exec ( new ELUDerivative ( in . dup ( ) ) ) ; dLdz . muli ( alpha ) ; BooleanIndexing . replaceWhere ( dLdz , 1 , Conditions . equals ( alpha ) ) ; dLdz . muli ( epsilon ) ; return new Pair < > ( dLdz , null ) ; } else { INDArray dLdz = Nd4j . getExecutioner ( ) . exec ( new ELUDerivative ( in ) ) ; dLdz . muli ( epsilon ) ; return new Pair < > ( dLdz , null ) ; }
public class IntEntity { /** * Implementation of abstract base methods */ @ Override public void writeEnc ( Writer w ) throws IOException { } }
w . write ( "<!ENTITY " ) ; w . write ( mName ) ; w . write ( " \"" ) ; TextEscaper . outputDTDText ( w , mRepl , 0 , mRepl . length ) ; w . write ( "\">" ) ;
public class MailUtil { /** * 使用配置文件中设置的账户发送HTML邮件 , 发送给多人 * @ param tos 收件人列表 * @ param subject 标题 * @ param content 正文 * @ param files 附件列表 * @ since 3.2.0 */ public static void sendHtml ( Collection < String > tos , String subject , String content , File ... files ) { } }
send ( tos , subject , content , true , files ) ;
public class CdnClient { /** * Get the description of certain IP address . * @ param ip IP address . * @ return Details of statistics */ public DescribeIpResponse describeIp ( String ip ) { } }
DescribeIpRequest request = new DescribeIpRequest ( ) . withIp ( ip ) ; return describeIp ( request ) ;
public class ValidationJob { /** * Validates that partitions are in a given format * Partitions to be processed are picked up from the config store which are tagged . * Tag can be passed through key GOBBLIN _ CONFIG _ TAGS _ WHITELIST * Datasets tagged by the above key will be picked up . * PathName will be treated as tableName and ParentPathName will be treated as dbName * For example if the dataset uri picked up by is / data / hive / myDb / myTable * Then myTable is tableName and myDb is dbName */ private void runFileFormatValidation ( ) throws IOException { } }
Preconditions . checkArgument ( this . props . containsKey ( VALIDATION_FILE_FORMAT_KEY ) ) ; this . configStoreUri = StringUtils . isNotBlank ( this . props . getProperty ( ConfigurationKeys . CONFIG_MANAGEMENT_STORE_URI ) ) ? Optional . of ( this . props . getProperty ( ConfigurationKeys . CONFIG_MANAGEMENT_STORE_URI ) ) : Optional . < String > absent ( ) ; if ( ! Boolean . valueOf ( this . props . getProperty ( ConfigurationKeys . CONFIG_MANAGEMENT_STORE_ENABLED , ConfigurationKeys . DEFAULT_CONFIG_MANAGEMENT_STORE_ENABLED ) ) ) { this . configStoreUri = Optional . < String > absent ( ) ; } List < Partition > partitions = new ArrayList < > ( ) ; if ( this . configStoreUri . isPresent ( ) ) { Preconditions . checkArgument ( this . props . containsKey ( GOBBLIN_CONFIG_TAGS_WHITELIST ) , "Missing required property " + GOBBLIN_CONFIG_TAGS_WHITELIST ) ; String tag = this . props . getProperty ( GOBBLIN_CONFIG_TAGS_WHITELIST ) ; ConfigClient configClient = ConfigClient . createConfigClient ( VersionStabilityPolicy . WEAK_LOCAL_STABILITY ) ; Path tagUri = PathUtils . mergePaths ( new Path ( this . configStoreUri . get ( ) ) , new Path ( tag ) ) ; try ( AutoReturnableObject < IMetaStoreClient > client = pool . getClient ( ) ) { Collection < URI > importedBy = configClient . getImportedBy ( new URI ( tagUri . toString ( ) ) , true ) ; for ( URI uri : importedBy ) { String dbName = new Path ( uri ) . getParent ( ) . getName ( ) ; Table table = new Table ( client . get ( ) . getTable ( dbName , new Path ( uri ) . getName ( ) ) ) ; for ( org . apache . hadoop . hive . metastore . api . Partition partition : client . get ( ) . listPartitions ( dbName , table . getTableName ( ) , maxParts ) ) { partitions . add ( new Partition ( table , partition ) ) ; } } } catch ( Exception e ) { this . throwables . add ( e ) ; } } for ( Partition partition : partitions ) { if ( ! shouldValidate ( partition ) ) { continue ; } String fileFormat = this . props . getProperty ( VALIDATION_FILE_FORMAT_KEY ) ; Optional < HiveSerDeWrapper . BuiltInHiveSerDe > hiveSerDe = Enums . getIfPresent ( HiveSerDeWrapper . BuiltInHiveSerDe . class , fileFormat . toUpperCase ( ) ) ; if ( ! hiveSerDe . isPresent ( ) ) { throwables . add ( new Throwable ( "Partition SerDe is either not supported or absent" ) ) ; continue ; } String serdeLib = partition . getTPartition ( ) . getSd ( ) . getSerdeInfo ( ) . getSerializationLib ( ) ; if ( ! hiveSerDe . get ( ) . toString ( ) . equalsIgnoreCase ( serdeLib ) ) { throwables . add ( new Throwable ( "Partition " + partition . getCompleteName ( ) + " SerDe " + serdeLib + " doesn't match with the required SerDe " + hiveSerDe . get ( ) . toString ( ) ) ) ; } } if ( ! this . throwables . isEmpty ( ) ) { for ( Throwable e : this . throwables ) { log . error ( "Failed to validate due to " + e ) ; } throw new RuntimeException ( "Validation Job Failed" ) ; }
public class HttpConnectorUtil { /** * Extract sender configuration from transport configuration . * @ param transportsConfiguration { @ link TransportsConfiguration } which sender configurations should be extracted . * @ param scheme scheme of the transport . * @ return extracted { @ link SenderConfiguration } . */ public static SenderConfiguration getSenderConfiguration ( TransportsConfiguration transportsConfiguration , String scheme ) { } }
Map < String , SenderConfiguration > senderConfigurations = transportsConfiguration . getSenderConfigurations ( ) . stream ( ) . collect ( Collectors . toMap ( senderConf -> senderConf . getScheme ( ) . toLowerCase ( Locale . getDefault ( ) ) , config -> config ) ) ; return Constants . HTTPS_SCHEME . equals ( scheme ) ? senderConfigurations . get ( Constants . HTTPS_SCHEME ) : senderConfigurations . get ( Constants . HTTP_SCHEME ) ;
public class RDBMUserLayoutStore { /** * Cache used during import / export operations */ @ Override public void setProfileImportExportCache ( Cache < Tuple < String , String > , UserProfile > profileCache ) { } }
if ( profileCache == null ) { profileCacheHolder . remove ( ) ; } else { profileCacheHolder . set ( profileCache ) ; }
public class ResourceIndexImpl { /** * { @ inheritDoc } */ public void delete ( List < Triple > triples , boolean flush ) throws IOException , TrippiException { } }
_writer . delete ( triples , flush ) ;
public class TypeImpl { /** * Deletes the concept as type */ @ Override public void delete ( ) { } }
// If the deletion is successful we will need to update the cache of linked concepts . To do this caches must be loaded Map < Role , Boolean > plays = cachedDirectPlays . get ( ) ; super . delete ( ) ; // Updated caches of linked types plays . keySet ( ) . forEach ( roleType -> ( ( RoleImpl ) roleType ) . deleteCachedDirectPlaysByType ( getThis ( ) ) ) ;
public class ProtoParser { /** * Reads a reserved tags and names list like " reserved 10 , 12 to 14 , ' foo ' ; " . */ private ReservedElement readReserved ( Location location , String documentation ) { } }
ImmutableList . Builder < Object > valuesBuilder = ImmutableList . builder ( ) ; while ( true ) { char c = reader . peekChar ( ) ; if ( c == '"' || c == '\'' ) { valuesBuilder . add ( reader . readQuotedString ( ) ) ; } else { int tagStart = reader . readInt ( ) ; c = reader . peekChar ( ) ; if ( c != ',' && c != ';' ) { if ( ! reader . readWord ( ) . equals ( "to" ) ) { throw reader . unexpected ( "expected ',', ';', or 'to'" ) ; } int tagEnd = reader . readInt ( ) ; valuesBuilder . add ( Range . closed ( tagStart , tagEnd ) ) ; } else { valuesBuilder . add ( tagStart ) ; } } c = reader . readChar ( ) ; if ( c == ';' ) break ; if ( c != ',' ) throw reader . unexpected ( "expected ',' or ';'" ) ; } ImmutableList < Object > values = valuesBuilder . build ( ) ; if ( values . isEmpty ( ) ) { throw reader . unexpected ( "'reserved' must have at least one field name or tag" ) ; } return new ReservedElement ( location , documentation , values ) ;
public class EntityDesc { /** * 完全修飾名を返します 。 * @ return 完全修飾名 */ @ Override public String getQualifiedName ( ) { } }
String prefix = StringUtil . defaultString ( entityPrefix , "" ) ; String suffix = StringUtil . defaultString ( entitySuffix , "" ) ; if ( packageName == null || packageName . isEmpty ( ) ) { return prefix + simpleName + suffix ; } return packageName + "." + prefix + simpleName + suffix ;
public class TrainingsImpl { /** * Delete a tag from the project . * @ param projectId The project id * @ param tagId Id of the tag to be deleted * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void deleteTag ( UUID projectId , UUID tagId ) { } }
deleteTagWithServiceResponseAsync ( projectId , tagId ) . toBlocking ( ) . single ( ) . body ( ) ;
public class ListUserImportJobsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListUserImportJobsRequest listUserImportJobsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listUserImportJobsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listUserImportJobsRequest . getUserPoolId ( ) , USERPOOLID_BINDING ) ; protocolMarshaller . marshall ( listUserImportJobsRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( listUserImportJobsRequest . getPaginationToken ( ) , PAGINATIONTOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Jetty8Helper { /** * Hook up Jetty handlers . Do this before start ( ) is called . */ ServletContextHandler createServletContextHandler ( ) { } }
// Both security and session handlers are already created ( Note : we don ' t want to create a new separate session // handler just for ServletContextHandler - we want to have just one SessionHandler & SessionManager ) final ServletContextHandler context = new ServletContextHandler ( ServletContextHandler . NO_SECURITY | ServletContextHandler . NO_SESSIONS ) ; if ( null != config . context_path && ! config . context_path . isEmpty ( ) ) { context . setContextPath ( config . context_path ) ; } else { context . setContextPath ( "/" ) ; } return context ;
public class GenericValueMetricGroup { @ Override protected void putVariables ( Map < String , String > variables ) { } }
variables . put ( ScopeFormat . asVariable ( this . key ) , value ) ;
public class UBiDiProps { /** * property access functions - - - - - * * * */ public final int getMaxValue ( int which ) { } }
int max ; max = indexes [ IX_MAX_VALUES ] ; switch ( which ) { case UProperty . BIDI_CLASS : return ( max & CLASS_MASK ) ; case UProperty . JOINING_GROUP : return ( max & MAX_JG_MASK ) >> MAX_JG_SHIFT ; case UProperty . JOINING_TYPE : return ( max & JT_MASK ) >> JT_SHIFT ; case UProperty . BIDI_PAIRED_BRACKET_TYPE : return ( max & BPT_MASK ) >> BPT_SHIFT ; default : return - 1 ; /* undefined */ }
public class TasksModel { /** * Deletes a Task document within the DocumentStore . * @ param task task to delete * @ throws ConflictException if the task passed in has a rev which doesn ' t * match the current rev in the DocumentStore . * @ throws DocumentNotFoundException if the rev for this task does not exist * @ throws DocumentStoreException if there was an error deleting the rev for this task */ public void deleteDocument ( Task task ) throws ConflictException , DocumentNotFoundException , DocumentStoreException { } }
this . mDocumentStore . database ( ) . delete ( task . getDocumentRevision ( ) ) ;
public class Configurations { /** * Get the Configuration . * @ return * the Apache Configuration . */ public static Configuration getConfiguration ( ) { } }
if ( configurations == null ) { configurations = ConfigurationFactory . getConfiguration ( ) ; LOGGER . info ( "Initialized the Configurations." ) ; } return configurations ;
public class KeyManager { /** * Loads the secret key . * @ return a SecretKey * @ throws IOException if the file cannot be read * @ throws ClassNotFoundException if deserialization of the SecretKey fails */ private SecretKey loadSecretKey ( ) throws IOException , ClassNotFoundException { } }
final File file = getKeyPath ( KeyType . SECRET ) ; SecretKey key ; try ( InputStream fis = Files . newInputStream ( file . toPath ( ) ) ; ObjectInputStream ois = new ObjectInputStream ( fis ) ) { key = ( SecretKey ) ois . readObject ( ) ; } return this . secretKey = key ;
public class Grid { /** * Creates the grid based on the boundaries defined by all atoms given ( iAtoms and jAtoms ) * and places the atoms in their corresponding grid cells . * Checks also if the i and j grid overlap , i . e . the enclosing bounds of * the 2 grids ( i and j ) are no more than one cell size apart . If they don ' t * overlap then they are too far apart so there ' s nothing to calculate , we set * the noOverlap flag and then { @ link # getIndicesContacts ( ) } will do no calculation at all . */ private void fillGrid ( ) { } }
if ( jbounds != null && ! ibounds . overlaps ( jbounds , cutoff ) ) { // System . out . print ( " - " ) ; noOverlap = true ; return ; } findFullGridIntBounds ( ) ; cells = new GridCell [ 1 + ( bounds [ 3 ] - bounds [ 0 ] ) / cellSize ] [ 1 + ( bounds [ 4 ] - bounds [ 1 ] ) / cellSize ] [ 1 + ( bounds [ 5 ] - bounds [ 2 ] ) / cellSize ] ; int i = 0 ; for ( Point3d atom : iAtoms ) { int xind = xintgrid2xgridindex ( getFloor ( atom . x ) ) ; int yind = yintgrid2ygridindex ( getFloor ( atom . y ) ) ; int zind = zintgrid2zgridindex ( getFloor ( atom . z ) ) ; if ( cells [ xind ] [ yind ] [ zind ] == null ) { cells [ xind ] [ yind ] [ zind ] = new GridCell ( this ) ; } cells [ xind ] [ yind ] [ zind ] . addIindex ( i ) ; i ++ ; } if ( jAtoms == null ) return ; int j = 0 ; for ( Point3d atom : jAtoms ) { int xind = xintgrid2xgridindex ( getFloor ( atom . x ) ) ; int yind = yintgrid2ygridindex ( getFloor ( atom . y ) ) ; int zind = zintgrid2zgridindex ( getFloor ( atom . z ) ) ; if ( cells [ xind ] [ yind ] [ zind ] == null ) { cells [ xind ] [ yind ] [ zind ] = new GridCell ( this ) ; } cells [ xind ] [ yind ] [ zind ] . addJindex ( j ) ; j ++ ; }
public class MessageReceiverFilterList { /** * Remove this message filter from this queue . * Note : This will remove a filter that equals this filter , accounting for a copy * passed from a remote client . * @ param messageFilter The message filter to remove . * @ param bFreeFilter If true , free this filter . * @ return True if successful . */ public boolean removeMessageFilter ( Integer intFilterID , boolean bFreeFilter ) { } }
BaseMessageFilter filter = ( BaseMessageFilter ) m_mapFilters . remove ( intFilterID ) ; if ( filter == null ) { System . out . println ( "Error: BaseMessageReceiver.removeMessageFilter" ) ; return false ; } filter . setMessageReceiver ( null , null ) ; // Make sure free doesn ' t try to remove filter again . if ( bFreeFilter ) filter . free ( ) ; return true ; // Success .
public class Parser { /** * information for the label identifier ( tokenBeg , tokenEnd , etc . ) */ private LabeledStatement matchJumpLabelName ( ) throws IOException { } }
LabeledStatement label = null ; if ( peekTokenOrEOL ( ) == Token . NAME ) { consumeToken ( ) ; if ( labelSet != null ) { label = labelSet . get ( ts . getString ( ) ) ; } if ( label == null ) { reportError ( "msg.undef.label" ) ; } } return label ;
public class AWSSecurityTokenServiceClient { /** * Returns details about the IAM identity whose credentials are used to call the API . * @ param getCallerIdentityRequest * @ return Result of the GetCallerIdentity operation returned by the service . * @ sample AWSSecurityTokenService . GetCallerIdentity * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / sts - 2011-06-15 / GetCallerIdentity " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetCallerIdentityResult getCallerIdentity ( GetCallerIdentityRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetCallerIdentity ( request ) ;
public class EventDrivenUpdatesMap { /** * Returns a space separated list of component IDs of components associated with ` events ` . * If ` events ` is a { @ link String } the following format is expected : ` event - a [ [ , ] event - b ] [ | default - value ] ` * Where * * ` default - value ` * * is returned if no matching event is found and defaults to ` @ none ` . * @ return the list of component IDs associated with ` events ` or a default value ( see above ) * @ param events either { @ link String } ( see above ) or an { @ link Iterable } of Strings * @ throws IllegalArgumentException if ` events ` is not of the possible types * @ see # get ( String , String ) * @ see # get ( Iterable , String ) */ @ Override @ SuppressWarnings ( "unchecked" ) public String get ( Object events ) throws IllegalArgumentException { } }
if ( events instanceof Iterable ) { return get ( ( Iterable < String > ) events , DEFAULT_VALUE ) ; } else if ( events instanceof String ) { final String [ ] iter = parseEventsAndDefault ( ( String ) events ) ; return get ( iter [ 0 ] , iter [ 1 ] ) ; } throw new IllegalArgumentException ( "expected Iterable<String> or String but was " + events ) ;
public class Input { /** * sequential属性名视图 ; * 注册服务时 , 不要序列化此属性 , 与inputObjs内的属性重复了 , 保留较全的inputObjs属性 . */ @ JSONField ( serialize = false ) public String [ ] getSequentialNames ( ) { } }
List < Obj > sequential = getSequential ( ) ; String [ ] sequentialNames = new String [ sequential . size ( ) ] ; for ( int i = 0 ; i < sequential . size ( ) ; i ++ ) { sequentialNames [ i ] = sequential . get ( i ) . getName ( ) ; } return sequentialNames ;
public class UnsafeHelper { /** * Returns the size of an instance of this class ( in bytes ) . * Instances include a header + all fields + padded to 8 bytes . * If this is an array , it does not include the size of the elements . * @ param clazz * @ return */ public static long sizeOf ( Class clazz ) { } }
long maxSize = headerSize ( clazz ) ; while ( clazz != Object . class ) { for ( Field f : clazz . getDeclaredFields ( ) ) { if ( ( f . getModifiers ( ) & Modifier . STATIC ) == 0 ) { long offset = unsafe . objectFieldOffset ( f ) ; if ( offset > maxSize ) { // Assume 1 byte of the field width . This is ok as it gets padded out at the end maxSize = offset + 1 ; } } } clazz = clazz . getSuperclass ( ) ; } // The whole class always pads to a 8 bytes boundary , so we round up to 8 bytes . return roundUpTo8 ( maxSize ) ;
public class JvmTypeParameterImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case TypesPackage . JVM_TYPE_PARAMETER__CONSTRAINTS : return constraints != null && ! constraints . isEmpty ( ) ; case TypesPackage . JVM_TYPE_PARAMETER__NAME : return NAME_EDEFAULT == null ? name != null : ! NAME_EDEFAULT . equals ( name ) ; case TypesPackage . JVM_TYPE_PARAMETER__DECLARATOR : return getDeclarator ( ) != null ; } return super . eIsSet ( featureID ) ;
public class EJBModuleMetaDataImpl { /** * Adds a new application even listener to be notified when an application * has fully started or will begin stopping . * @ param listener the listener */ public void addApplicationEventListener ( EJBApplicationEventListener listener ) // F743-26072 { } }
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "addApplicationEventListener: " + listener ) ; if ( ivApplicationEventListeners == null ) { ivApplicationEventListeners = new ArrayList < EJBApplicationEventListener > ( ) ; } ivApplicationEventListeners . add ( listener ) ;
public class SSLConnector { /** * Attempts to get a new socket connection to the given host within the * given time limit . * @ param host * the host name / IP * @ param port * the port on the host * @ param localAddress * the local host name / IP to bind the socket to * @ param localPort * the port on the local machine * @ param params * { @ link HttpConnectionParams Http connection parameters } * @ return Socket a new socket * @ throws IOException * if an I / O error occurs while creating the socket * @ throws UnknownHostException * if the IP address of the host cannot be determined * @ throws ConnectTimeoutException */ @ Override public Socket createSocket ( final String host , final int port , final InetAddress localAddress , final int localPort , final HttpConnectionParams params ) throws IOException , UnknownHostException , ConnectTimeoutException { } }
if ( params == null ) { throw new IllegalArgumentException ( "Parameters may not be null" ) ; } int timeout = params . getConnectionTimeout ( ) ; if ( timeout == 0 ) { InetAddress hostAddress = getCachedMisconfiguredHost ( host , port ) ; if ( hostAddress != null ) { return clientSSLSockFactory . createSocket ( hostAddress , port , localAddress , localPort ) ; } try { SSLSocket sslSocket = ( SSLSocket ) clientSSLSockFactory . createSocket ( host , port , localAddress , localPort ) ; sslSocket . startHandshake ( ) ; return sslSocket ; } catch ( SSLException e ) { if ( ! e . getMessage ( ) . contains ( CONTENTS_UNRECOGNIZED_NAME_EXCEPTION ) ) { throw e ; } hostAddress = InetAddress . getByName ( host ) ; cacheMisconfiguredHost ( host , port , hostAddress ) ; return clientSSLSockFactory . createSocket ( hostAddress , port , localAddress , localPort ) ; } } Socket socket = clientSSLSockFactory . createSocket ( ) ; SocketAddress localAddr = new InetSocketAddress ( localAddress , localPort ) ; socket . bind ( localAddr ) ; SocketAddress remoteAddr = new InetSocketAddress ( host , port ) ; socket . connect ( remoteAddr , timeout ) ; return socket ;
public class UIOutput { /** * An expression that specifies the Converter for this component . * The value can either be a static value ( ID ) or an EL expression . When a static id is * specified , an instance of the converter type registered with that id is used . When this * is an EL expression , the result of evaluating the expression must be an object that * implements the Converter interface . */ @ JSFProperty ( partialStateHolder = true ) public Converter getConverter ( ) { } }
if ( _converter != null ) { return _converter ; } ValueExpression expression = getValueExpression ( "converter" ) ; if ( expression != null ) { return ( Converter ) expression . getValue ( getFacesContext ( ) . getELContext ( ) ) ; } return null ;
public class ToString { /** * Returns a human - readable string representation of the given * { @ link TermedDocument } . * @ see java . lang . Object # toString ( ) * @ param o * the object to represent as string * @ return a string representation of the object */ protected static String toStringForTermedDocument ( TermedDocument o ) { } }
StringBuilder sb = new StringBuilder ( ) ; boolean first ; sb . append ( "\n* Labels: " ) ; first = true ; SortedSet < String > labelKeys = new TreeSet < > ( o . getLabels ( ) . keySet ( ) ) ; for ( String key : labelKeys ) { if ( first ) { first = false ; } else { sb . append ( "; " ) ; } sb . append ( toString ( o . getLabels ( ) . get ( key ) ) ) ; } sb . append ( "\n* Descriptions: " ) ; first = true ; SortedSet < String > descriptionKeys = new TreeSet < > ( o . getDescriptions ( ) . keySet ( ) ) ; for ( String key : descriptionKeys ) { if ( first ) { first = false ; } else { sb . append ( "; " ) ; } sb . append ( toString ( o . getDescriptions ( ) . get ( key ) ) ) ; } sb . append ( "\n* Aliases: " ) ; first = true ; SortedSet < String > aliasKeys = new TreeSet < > ( o . getAliases ( ) . keySet ( ) ) ; for ( String key : aliasKeys ) { for ( MonolingualTextValue mtv : o . getAliases ( ) . get ( key ) ) { if ( first ) { first = false ; } else { sb . append ( "; " ) ; } sb . append ( toString ( mtv ) ) ; } } return sb . toString ( ) ;
public class TxManager { /** * Add updates of a transaction to the history tree . * To be called when starting a new commit ( ) . * @ param txId * @ param txContext * @ return A list of conflicting objects or { @ code null } if there are no conflicts */ synchronized List < Long > addUpdates ( long txId , TxContext txContext , boolean isTrialRun ) { } }
if ( isSingleSession ) { // no need to record history return null ; } ArrayList < TxObjInfo > updatesAndDeletes = txContext . getUpdatesAndDeletes ( ) ; // first , check for conflicts ArrayList < Long > conflicts = null ; for ( TxObjInfo clientInfo : updatesAndDeletes ) { long oid = clientInfo . getOid ( ) ; long ots = clientInfo . getTS ( ) ; // At this point we should not ignore objects that are apparently new ! // Why ? Even if the object appears new , the OID may be in use , which // may present a conflict . TxObjInfo serverInfo = updateSummary . get ( oid ) ; // OLD : // Did the current transaction begin before the other was committed ? // I . e . is the updateTimeStamp higher than the readTimeStamp of the current TX ? // NEW : We just check whether the cached TS equals the expected TS . // If not , we have a conflict . Note , that the new timestamp may be LOWER than the // cached timestamp if the object was updated AFTER the current TX started , but // before the current TX first accessed the object . // System . out . println ( " TxM - au : " + oid + " ots = " + ots + " txTS = " + ( serverInfo ! = null ? serverInfo . getTS ( ) : " null " ) ) ; if ( serverInfo != null && serverInfo . getTxId ( ) != ots ) { if ( clientInfo . isDeleted ( ) && serverInfo . isDeleted ( ) ) { // okay , ignore // continue ; // TODO For now we don ' t ignore these . To ignore these , we have to report back // so that there will be no attempts on updating any indexes . Furthermore , // it is not obvious that this is the right thing to do , because the TX may // semantically rely on having deleted an object , however the object is already // gone ( for example if the number of deleted objects counts ) . } if ( conflicts == null ) { conflicts = new ArrayList < > ( ) ; } conflicts . add ( oid ) ; } } if ( conflicts != null || isTrialRun ) { return conflicts ; } // apply updates updateHistory . put ( txId , updatesAndDeletes ) ; for ( TxObjInfo info : updatesAndDeletes ) { // + 1 to ensure conflicts even with latest transaction info . setTxId ( txId ) ; updateSummary . put ( info . getOid ( ) , info ) ; } // not very clean : ' null ' indicates no conflicts . return null ;
public class JsonConfig { /** * Finds a PropertyNameProcessor registered to the target class . < br > * Returns null if none is registered . < br > * [ JSON - & gt ; Java ] * @ param propertyType a class used for searching a PropertyNameProcessor . */ public PropertyNameProcessor findJavaPropertyNameProcessor ( Class beanClass ) { } }
if ( ! javaPropertyNameProcessorMap . isEmpty ( ) ) { Object key = javaPropertyNameProcessorMatcher . getMatch ( beanClass , javaPropertyNameProcessorMap . keySet ( ) ) ; return ( PropertyNameProcessor ) javaPropertyNameProcessorMap . get ( key ) ; } return null ;
public class AWSElasticBeanstalkAsyncClient { /** * Simplified method form for invoking the DescribeEnvironments operation with an AsyncHandler . * @ see # describeEnvironmentsAsync ( DescribeEnvironmentsRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < DescribeEnvironmentsResult > describeEnvironmentsAsync ( com . amazonaws . handlers . AsyncHandler < DescribeEnvironmentsRequest , DescribeEnvironmentsResult > asyncHandler ) { } }
return describeEnvironmentsAsync ( new DescribeEnvironmentsRequest ( ) , asyncHandler ) ;
public class StorageDir { /** * Gets the { @ link BlockMeta } from this storage dir by its block id . * @ param blockId the block id * @ return { @ link BlockMeta } of the given block or null * @ throws BlockDoesNotExistException if no block is found */ public BlockMeta getBlockMeta ( long blockId ) throws BlockDoesNotExistException { } }
BlockMeta blockMeta = mBlockIdToBlockMap . get ( blockId ) ; if ( blockMeta == null ) { throw new BlockDoesNotExistException ( ExceptionMessage . BLOCK_META_NOT_FOUND , blockId ) ; } return blockMeta ;
public class MyZipUtils { /** * Checks if a Zip is valid navigating through the entries * @ param file File to validate * @ throws IOException I / O Error */ public static void validateZip ( File file ) throws IOException { } }
ZipInputStream zipInput = new ZipInputStream ( new FileInputStream ( file ) ) ; ZipEntry zipEntry = zipInput . getNextEntry ( ) ; while ( zipEntry != null ) { zipEntry = zipInput . getNextEntry ( ) ; } try { if ( zipInput != null ) { zipInput . close ( ) ; } } catch ( IOException e ) { }
public class WebUtil { /** * find cookie value * @ param request current request * @ param name cookie name * @ return cookie value */ public static String findCookieValue ( HttpServletRequest request , String name ) { } }
Cookie cookie = findCookie ( request , name ) ; return cookie != null ? cookie . getValue ( ) : null ;
public class NoCompTreeMap { /** * Removes the mapping previously attached to < code > key < / code > . * Returns the old mapping if any , or < code > null < / code > otherwise . */ public final V remove ( Object key ) { } }
if ( key == null ) return null ; int key_hash_code = key . hashCode ( ) ; BinTreeNode < K , V > prev = null ; int son = 0 ; BinTreeNode < K , V > node = root ; while ( node != null ) { if ( key_hash_code < node . keyHashCode ) { prev = node ; node = node . left ; son = 0 ; } else { if ( ( key_hash_code > node . keyHashCode ) || ! node . key . equals ( key ) ) { prev = node ; node = node . right ; son = 1 ; } else { size -- ; cachedHashCode -= node . hashCode ( ) ; return remove_node ( node , prev , son ) ; } } } return null ;
public class PropNodeDegreeAtLeastIncr { private void checkAtLeast ( int i ) throws ContradictionException { } }
ISet pot = target . getPotSet ( g , i ) ; ISet ker = target . getMandSet ( g , i ) ; int potSize = pot . size ( ) ; if ( potSize < degrees [ i ] ) { g . removeNode ( i , this ) ; } else if ( potSize == degrees [ i ] && g . getMandatoryNodes ( ) . contains ( i ) && ker . size ( ) < potSize ) { for ( int s : pot ) { target . enforce ( g , i , s , this ) ; } }
public class EntityTypeUtils { /** * Returns attribute names for the given attributes * @ return attribute names */ public static Iterable < String > getAttributeNames ( Iterable < Attribute > attrs ) { } }
return ( ) -> stream ( attrs ) . map ( Attribute :: getName ) . iterator ( ) ;
public class QueueManager { /** * Called during initialization to populate the members of the queues . * @ param event the QueueMemberEvent received */ private void handleQueueMemberEvent ( QueueMemberEvent event ) { } }
final AsteriskQueueImpl queue = getInternalQueueByName ( event . getQueue ( ) ) ; if ( queue == null ) { logger . error ( "Ignored QueueEntryEvent for unknown queue " + event . getQueue ( ) ) ; return ; } AsteriskQueueMemberImpl member = queue . getMember ( event . getLocation ( ) ) ; if ( member == null ) { member = new AsteriskQueueMemberImpl ( server , queue , event . getLocation ( ) , QueueMemberState . valueOf ( event . getStatus ( ) ) , event . getPaused ( ) , event . getPenalty ( ) , event . getMembership ( ) , event . getCallsTaken ( ) , event . getLastCall ( ) ) ; queue . addMember ( member ) ; } else { manageQueueMemberChange ( queue , member , event ) ; }
public class Summarizer { /** * Associates the columns to be summarized with the functions that match their type . All valid combinations are used * @ param group A table slice group * @ return A table containing a row of summarized data for each group in the table slice group */ private Table summarize ( TableSliceGroup group ) { } }
List < Table > results = new ArrayList < > ( ) ; ArrayListMultimap < String , AggregateFunction < ? , ? > > reductionMultimap = getAggregateFunctionMultimap ( ) ; for ( String name : reductionMultimap . keys ( ) ) { List < AggregateFunction < ? , ? > > reductions = reductionMultimap . get ( name ) ; results . add ( group . aggregate ( name , reductions . toArray ( new AggregateFunction < ? , ? > [ 0 ] ) ) ) ; } return combineTables ( results ) ;
public class RawCursor { /** * Move the cursor to the previous available entry , decrementing by the * amount given . The actual amount decremented is returned . If the amount * is less then requested , the cursor must be positioned before the first * available entry . Subclasses may wish to override this method with a * faster implementation . * < p > Calling to toPrevious ( 1 ) is equivalent to calling toPrevious ( ) . * @ param amount positive amount to retreat * @ return actual amount retreated * @ throws IllegalStateException if cursor is not opened */ protected int toPrevious ( int amount ) throws FetchException { } }
if ( amount <= 1 ) { return ( amount <= 0 ) ? 0 : ( toPrevious ( ) ? 1 : 0 ) ; } int count = 0 ; disableKeyAndValue ( ) ; try { while ( amount > 0 ) { if ( toPrevious ( ) ) { count ++ ; amount -- ; } else { break ; } } } finally { enableKeyAndValue ( ) ; } return count ;
public class AbstractEndpointController { /** * Handle the incoming payload . * Upon receipt of a payload , the specialized payload * { @ link com . github . shredder121 . gh _ event _ api . handler handlers } are called that are registered for that particular event . * @ param payload the payload received */ @ RequestMapping ( method = POST ) public void handle ( @ Valid @ RequestBody P payload ) { } }
logger . debug ( "{} handlers" , handlers . size ( ) ) ; for ( H handler : handlers ) { executor . execute ( runnableHandler ( handler , payload ) ) ; }
public class ClassNode { /** * Finds a field matching the given name in this class or a parent class . * @ param name the name of the field of interest * @ return the method matching the given name and parameters or null */ public FieldNode getField ( String name ) { } }
ClassNode node = this ; while ( node != null ) { FieldNode fn = node . getDeclaredField ( name ) ; if ( fn != null ) return fn ; node = node . getSuperClass ( ) ; } return null ;
public class I18nSpecificInList { /** * < p > Setter for itsId . < / p > * @ param pItsId reference */ @ Override public final void setItsId ( final IdI18nSpecificInList pItsId ) { } }
this . itsId = pItsId ; if ( this . itsId == null ) { this . lang = null ; this . itemId = null ; this . itsType = null ; } else { this . lang = this . itsId . getLang ( ) ; this . itemId = this . itsId . getItemId ( ) ; this . itsType = this . itsId . getItsType ( ) ; }