signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class LTieSrtConsumerBuilder { /** * One of ways of creating builder . This might be the only way ( considering all _ functional _ builders ) that might be utilize to specify generic params only once . */ @ Nonnull public static < T > LTieSrtConsumerBuilder < T > tieSrtConsumer ( Consumer < LTieSrtConsumer < T > > consumer ) { } }
return new LTieSrtConsumerBuilder ( consumer ) ;
public class FileData { /** * Sets the file name including path and extension * @ param fullFileName */ public void setFullFileName ( String fullFileName ) { } }
fullFileName = StringSupport . replaceAll ( fullFileName , "\\" , "/" ) ; fullFileName = StringSupport . replaceAll ( fullFileName , "//" , "/" ) ; int lastFileSeparator = fullFileName . lastIndexOf ( '/' ) ; if ( lastFileSeparator != - 1 ) { path = fullFileName . substring ( 0 , lastFileSeparator ) ; } else { path = "" ; } setFileName ( fullFileName . substring ( lastFileSeparator + 1 , fullFileName . length ( ) ) ) ;
public class PHS398ChecklistV1_3Generator { /** * This method will set the values to setFormerInstitutionName * , setIsChangeOfInstitution based on condition */ private void setFormerInstitutionNameAndChangeOfInstitution ( PHS398Checklist13 phsChecklist ) { } }
String answer = getAnswer ( PROPOSAL_YNQ_QUESTION_116 , answerHeaders ) ; String explanation = getAnswer ( PROPOSAL_YNQ_QUESTION_117 , answerHeaders ) ; if ( YnqConstant . YES . code ( ) . equals ( answer ) ) { phsChecklist . setIsChangeOfInstitution ( YesNoDataType . Y_YES ) ; if ( explanation != null ) { phsChecklist . setFormerInstitutionName ( explanation ) ; } else { phsChecklist . setFormerInstitutionName ( null ) ; } } else { phsChecklist . setIsChangeOfInstitution ( YesNoDataType . N_NO ) ; }
public class DiscontinuousAnnotation { /** * getter for value - gets Annotations to be chained . * @ generated * @ return value of the feature */ public FSArray getValue ( ) { } }
if ( DiscontinuousAnnotation_Type . featOkTst && ( ( DiscontinuousAnnotation_Type ) jcasType ) . casFeat_value == null ) jcasType . jcas . throwFeatMissing ( "value" , "de.julielab.jules.types.DiscontinuousAnnotation" ) ; return ( FSArray ) ( jcasType . ll_cas . ll_getFSForRef ( jcasType . ll_cas . ll_getRefValue ( addr , ( ( DiscontinuousAnnotation_Type ) jcasType ) . casFeatCode_value ) ) ) ;
public class ProcessStarterArgs { /** * fill gaps by property lookup * @ param props */ public void underride ( Properties props ) { } }
if ( siblingHost == null ) { siblingHost = props . getProperty ( "starter.shost" ) ; } if ( siblingPort == 0 ) { siblingPort = Integer . parseInt ( props . getProperty ( "starter.sport" , "0" ) ) ; } if ( host == null ) { host = props . getProperty ( "starter.host" ) ; if ( host == null ) { try { host = InetAddress . getLocalHost ( ) . getHostName ( ) ; } catch ( UnknownHostException e ) { e . printStackTrace ( ) ; } } } if ( port == 0 ) { port = Integer . parseInt ( props . getProperty ( "starter.port" , "0" ) ) ; } if ( name == null ) { name = props . getProperty ( "starter.name" ) ; }
public class ArgoClientContext { /** * This gets a list of all the available network interface names . * @ param requiresMulticast return only NIs that are multicast capable * @ return the list of the currently available network interface names * @ throws SocketException if the * { @ linkplain NetworkInterface # getNetworkInterfaces ( ) } call fails */ public List < String > getAvailableNetworkInterfaces ( boolean requiresMulticast ) throws SocketException { } }
Enumeration < NetworkInterface > nis = NetworkInterface . getNetworkInterfaces ( ) ; List < String > multicastNIs = new ArrayList < String > ( ) ; // Console . info ( " Available Multicast - enabled Network Interfaces " ) ; while ( nis . hasMoreElements ( ) ) { NetworkInterface ni = nis . nextElement ( ) ; if ( ni . isUp ( ) && ! ni . isLoopback ( ) ) { if ( requiresMulticast ) { if ( ni . supportsMulticast ( ) ) multicastNIs . add ( ni . getName ( ) ) ; } else { multicastNIs . add ( ni . getName ( ) ) ; } } } return multicastNIs ;
public class CmsExportParameters { /** * Returns the file path , should be a zip file . < p > * @ return the file path */ public String getPath ( ) { } }
// ensure the export file name ends with " . zip " in case of ZIP file export if ( ( m_path != null ) && ! isExportAsFiles ( ) && ! m_path . toLowerCase ( ) . endsWith ( ".zip" ) ) { m_path += ".zip" ; } return m_path ;
public class ZoneTransferIn { /** * Instantiates a ZoneTransferIn object to do an IXFR ( incremental zone * transfer ) . * @ param zone The zone to transfer . * @ param serial The existing serial number . * @ param fallback If true , fall back to AXFR if IXFR is not supported . * @ param host The host from which to transfer the zone . * @ param key The TSIG key used to authenticate the transfer , or null . * @ return The ZoneTransferIn object . * @ throws UnknownHostException The host does not exist . */ public static ZoneTransferIn newIXFR ( Name zone , long serial , boolean fallback , String host , TSIG key ) throws UnknownHostException { } }
return newIXFR ( zone , serial , fallback , host , 0 , key ) ;
public class TokenStream { /** * Attempt to consume this current token as long as it matches the expected value , or throw an exception if the token does not * match . * The { @ link # ANY _ VALUE ANY _ VALUE } constant can be used in the expected values as a wildcard . * @ param expected the expected value of the current token * @ throws ParsingException if the current token doesn ' t match the supplied value * @ throws IllegalStateException if this method was called before the stream was { @ link # start ( ) started } */ public void consume ( String expected ) throws ParsingException , IllegalStateException { } }
if ( completed ) { String msg = CommonI18n . noMoreContentButWasExpectingToken . text ( expected ) ; throw new ParsingException ( tokens . get ( tokens . size ( ) - 1 ) . position ( ) , msg ) ; } // Get the value from the current token . . . if ( expected != ANY_VALUE && ! currentToken ( ) . matches ( expected ) ) { String found = currentToken ( ) . value ( ) ; Position pos = currentToken ( ) . position ( ) ; String fragment = generateFragment ( ) ; String msg = CommonI18n . unexpectedToken . text ( expected , found , pos . getLine ( ) , pos . getColumn ( ) , fragment ) ; throw new ParsingException ( pos , msg ) ; } moveToNextToken ( ) ;
public class TableSession { /** * Set a table property . * @ param strProperty The key to set . * @ param strValue The value to set it to . */ public void setProperty ( String strProperty , String strValue ) { } }
Record record = this . getMainRecord ( ) ; BaseTable table = record . getTable ( ) ; table . setProperty ( strProperty , strValue ) ;
public class SuffixTypeLoader { /** * { @ inheritDoc } */ public Type < T > loadType ( String name ) { } }
for ( String suffix : suffixes ) { Type < T > type = parent . loadType ( name + suffix ) ; if ( type != null ) return type ; } return null ;
public class F1 { /** * Returns a composed function that applies this function to it ' s input and * then applies the { @ code after } function to the result . If evaluation of either * function throws an exception , it is relayed to the caller of the composed * function . * @ param < T > * the type of return value of the new composed function * @ param after * the function applies after this function is applied * @ return the composed function * @ throws NullPointerException * if @ { code after } is null */ public < T > F1 < P1 , T > andThen ( final Function < ? super R , ? extends T > after ) { } }
E . NPE ( after ) ; final Function < P1 , R > me = this ; return new F1 < P1 , T > ( ) { @ Override public T apply ( P1 p1 ) { return after . apply ( me . apply ( p1 ) ) ; } } ;
public class Emitter { /** * Adds a LinkRef to this set of LinkRefs . * @ param key * The key / id . * @ param linkRef * The LinkRef . */ public void addLinkRef ( final String key , final LinkRef linkRef ) { } }
this . linkRefs . put ( key . toLowerCase ( ) , linkRef ) ;
public class Money { /** * Obtains an instance of { @ code Money } from a { @ code BigDecimal } . * This allows you to create an instance with a specific currency and amount . * No rounding is performed on the amount , so it must have a scale compatible * with the currency . * @ param currency the currency , not null * @ param amount the amount of money , not null * @ return the new instance , never null * @ throws ArithmeticException if the scale exceeds the currency scale */ public static Money of ( CurrencyUnit currency , BigDecimal amount ) { } }
MoneyUtils . checkNotNull ( currency , "Currency must not be null" ) ; MoneyUtils . checkNotNull ( amount , "Amount must not be null" ) ; if ( amount . scale ( ) > currency . getDecimalPlaces ( ) ) { throw new ArithmeticException ( "Scale of amount " + amount + " is greater than the scale of the currency " + currency ) ; } return Money . of ( currency , amount , RoundingMode . UNNECESSARY ) ;
public class SharedPool { /** * Return a share connection if it exists . * @ concurrency concurrent */ protected MCWrapper getSharedConnection ( Object affinity , Subject subject , ConnectionRequestInfo cri , boolean enforceSerialReuse , String pmiName , int commitPriority , int branchCoupling ) { } }
final boolean isTracingEnabled = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) { Tr . entry ( this , tc , "getSharedConnection" ) ; } MCWrapper mcWrapper = null ; boolean affinityIsEqual = false ; boolean cmConfigDataIsCompatible = false ; boolean dumpLTCMessage = true ; // We want to only dump one of 86 trace message for every getSharedConnection /* * Dirty read the size of the mcWrapperList . There are no mcWrappers in the * list if mcWrapperListSize is zero and it is imposible for a matching * mcWrapper and affinity id to be added to this list since this thread has * to do the adding . */ if ( mcWrapperListSize > 0 ) { MCWrapper mcWrapperTemp = null ; /* * We have one or more mcWrapper ( s ) . We need to synchronize . */ synchronized ( sharedLockObject ) { if ( mcWrapperListSize > 0 ) { /* * Look for a matching affinity id and commit priority . */ mcWrapperTemp = mcWrapperList [ 0 ] ; if ( affinity != null && affinity . equals ( mcWrapperTemp . getSharedPoolCoordinator ( ) ) ) { affinityIsEqual = true ; ConnectionManager cm = ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapperTemp ) . getCm ( ) ; ResourceRefInfo resRefInfo = cm . getResourceRefInfo ( ) ; if ( resRefInfo . getCommitPriority ( ) == commitPriority ) { int tempBranchCoupling = resRefInfo . getBranchCoupling ( ) ; if ( branchCoupling == tempBranchCoupling ) { // Check if they match first for performance cmConfigDataIsCompatible = true ; } else { cmConfigDataIsCompatible = cm . matchBranchCoupling ( branchCoupling , tempBranchCoupling , ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapperTemp ) . get_managedConnectionFactory ( ) ) ; } } } } } // Lets be really optimistic and release the lock if ( affinityIsEqual && cmConfigDataIsCompatible ) { // When the new transaction code is drop , we will want to use // the following if test for comparing Coordinators // if ( mcWrapperTemp . getSharedPoolCoordinator ( ) = = affinity ) { /* * We have a matching affinity id , now we need a matching user data */ boolean subjectMatch = false ; Subject mcWrapperSubject = mcWrapperTemp . getSubject ( ) ; if ( ( subject == null ) && ( mcWrapperSubject == null ) ) { subjectMatch = true ; } else { if ( ( subject != null ) && ( mcWrapperSubject != null ) ) { Equals e = new Equals ( ) ; e . setSubjects ( subject , mcWrapperTemp . getSubject ( ) ) ; if ( AccessController . doPrivileged ( e ) ) { subjectMatch = true ; } } } ManagedConnection mc = mcWrapperTemp . getManagedConnection ( ) ; ConnectionRequestInfo mcWrapperCRI = mc instanceof WSManagedConnection ? ( ( WSManagedConnection ) mc ) . getConnectionRequestInfo ( ) : mcWrapperTemp . getCRI ( ) ; // The cri can be null , so we need to check for null . boolean criMatch = cri == mcWrapperCRI || cri != null && cri . equals ( mcWrapperCRI ) ; if ( criMatch && subjectMatch ) { /* * We have a matching affinity id and user data , but we have one more * test . The following if checks the serial reuse rule . */ if ( enforceSerialReuse && ( mcWrapperTemp . getHandleCount ( ) >= 1 ) ) { /* * We can not use this connections . We need to look for a shared * connection with the handle count of zero or we need to get a new * shareable connection . */ if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "allocateConnection_Common: HandleCount = " + mcWrapperTemp . getHandleCount ( ) ) ; } if ( _pm . logSerialReuseMessage ) { Tr . info ( tc , "ATTEMPT_TO_SHARE_LTC_CONNECTION_J2CA0086" , mcWrapperTemp , pmiName ) ; _pm . logSerialReuseMessage = false ; } if ( dumpLTCMessage ) { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { dumpLTCMessage = false ; // only dump this info once for every getSharedConnection // request . Tr . debug ( this , tc , "Attempt to share connection within LTC (J2CA0086)" ) ; Tr . debug ( this , tc , "mcWrapper = " + mcWrapperTemp ) ; Tr . debug ( this , tc , "pmiName = " + pmiName ) ; Tr . debug ( this , tc , "LTC stack information = " + dumpLTCInformation ( mcWrapperTemp , pmiName , affinity ) ) ; } } } // end enforceSerialReuse & & ( mcWrapperTemp . getHandleCount ( ) > = 1) else { /* * We have a shareable connection to use */ if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { /* * This is used for tracking shared pool usage data */ ++ sop_gets ; } mcWrapper = mcWrapperTemp ; } } // end criMatch & & subjectMatch else { if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { /* * This is used for tracking shared pool usage data */ ++ snop_gets_notfound ; } } // end else ( cri , subj don ' t both match ) } // end affinityIsEqual & & commitPriorityIsEqual else { if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { /* * This is used for tracking shared pool usage data */ ++ snop_gets_notfound ; } } if ( mcWrapper == null ) { synchronized ( sharedLockObject ) { /* * If the mcWrapperListSize > 0 , we may have a connection that will * match . We most likely already checked one of the connections , but * we released the shared lock , so we need to check all of them again , * no more perf tricks . */ for ( int i = 0 ; i < mcWrapperListSize ; ++ i ) { mcWrapperTemp = mcWrapperList [ i ] ; /* * Look for a matching affinity id and user data . */ if ( affinity != null && affinity . equals ( mcWrapperTemp . getSharedPoolCoordinator ( ) ) ) { // When the new transaction code is drop , we will want to use // the following if test for comparing Coordinators // if ( mcWrapperTemp . getSharedPoolCoordinator ( ) = = affinity ) { /* * We have a matching affinity id , now we need a matching user * data */ boolean subjectMatch = false ; Subject mcWrapperSubject = mcWrapperTemp . getSubject ( ) ; if ( ( subject == null ) && ( mcWrapperSubject == null ) ) { subjectMatch = true ; } else { if ( ( subject != null ) && ( mcWrapperSubject != null ) ) { Equals e = new Equals ( ) ; e . setSubjects ( subject , mcWrapperTemp . getSubject ( ) ) ; if ( ( AccessController . doPrivileged ( e ) ) . booleanValue ( ) ) { subjectMatch = true ; } } } ManagedConnection mc = mcWrapperTemp . getManagedConnection ( ) ; ConnectionRequestInfo mcWrapperCRI = mc instanceof WSManagedConnection ? ( ( WSManagedConnection ) mc ) . getConnectionRequestInfo ( ) : mcWrapperTemp . getCRI ( ) ; // The cri can be null , so we need to check for null . boolean criMatch = cri == mcWrapperCRI || cri != null && cri . equals ( mcWrapperCRI ) ; if ( criMatch && subjectMatch ) { /* * We have a matching affinity id and user data , but we have one * more test . The following if checks the serial reuse rule . */ if ( enforceSerialReuse && ( mcWrapperTemp . getHandleCount ( ) >= 1 ) ) { /* * We can not use this connections . We need to look for a * shared connection with the handle count of zero or we need * to get a new shareable connection . */ if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "allocateConnection_Common: HandleCount = " + mcWrapperTemp . getHandleCount ( ) ) ; } if ( _pm . logSerialReuseMessage ) { Tr . info ( tc , "ATTEMPT_TO_SHARE_LTC_CONNECTION_J2CA0086" , mcWrapperTemp , pmiName ) ; _pm . logSerialReuseMessage = false ; } if ( dumpLTCMessage ) { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { dumpLTCMessage = false ; // only dump this info once for every getSharedConnection // request . Tr . debug ( this , tc , "Attempt to share connection within LTC (J2CA0086)" ) ; Tr . debug ( this , tc , "mcWrapper = " + mcWrapperTemp ) ; Tr . debug ( this , tc , "pmiName = " + pmiName ) ; Tr . debug ( this , tc , "LTC stack information = " + dumpLTCInformation ( mcWrapperTemp , pmiName , affinity ) ) ; } } } // end enforceSerialReuse & & ( mcWrapperTemp . getHandleCount ( ) > = 1) else { /* * Look for a matching commitPriority / branchCoupling . */ ConnectionManager cm = ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapperTemp ) . getCm ( ) ; ResourceRefInfo resRefInfo = cm . getResourceRefInfo ( ) ; if ( resRefInfo . getCommitPriority ( ) == commitPriority ) { int tempBranchCoupling = resRefInfo . getBranchCoupling ( ) ; if ( branchCoupling == tempBranchCoupling ) { // Check if they match first for performance cmConfigDataIsCompatible = true ; } else { cmConfigDataIsCompatible = cm . matchBranchCoupling ( branchCoupling , tempBranchCoupling , ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapperTemp ) . get_managedConnectionFactory ( ) ) ; } } if ( cmConfigDataIsCompatible ) { /* * We have a shareable connection to use */ if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { /* * This is used for tracking shared pool usage data */ ++ snop_gets ; } mcWrapper = mcWrapperTemp ; break ; } } } // end criMatch & & subjectMatch } // end mcWrapperTemp . getSharedPoolCoordinator ( ) . equals ( affinity ) } // end for loop } // end synchronized if ( mcWrapper == null ) { if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { /* * This is used for tracking shared pool usage data */ ++ snop_gets_notfound ; } } } // end mcWrapper = = null } else { if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { ++ sop_gets_notfound ; } } if ( isTracingEnabled && tc . isEntryEnabled ( ) ) { Tr . exit ( this , tc , "getSharedConnection" , mcWrapper ) ; } return mcWrapper ;
public class ElementMatchers { /** * Creates a matcher that matches none of the given objects by the { @ link java . lang . Object # equals ( Object ) } method . * None of the values must be { @ code null } . * @ param value The input values to be compared against . * @ param < T > The type of the matched object . * @ return A matcher that checks for the equality with none of the given objects . */ public static < T > ElementMatcher . Junction < T > noneOf ( Object ... value ) { } }
return noneOf ( Arrays . asList ( value ) ) ;
public class TableStats { /** * Create a deep copy of " this " instance . * @ return a deep copy */ public TableStats copy ( ) { } }
TableStats copy = new TableStats ( this . rowCount ) ; for ( Map . Entry < String , ColumnStats > entry : this . colStats . entrySet ( ) ) { copy . colStats . put ( entry . getKey ( ) , entry . getValue ( ) . copy ( ) ) ; } return copy ;
public class Functions { /** * Fluent transform operation using primitive types * e . g . * < pre > * { @ code * import static cyclops . ReactiveSeq . mapDoubles ; * ReactiveSeq . ofDoubles ( 1d , 2d , 3d ) * . to ( mapDoubles ( i - > i * 2 ) ) ; * / / [ 2d , 4d , 6d ] * < / pre > */ public static Function < ? super ReactiveSeq < Double > , ? extends ReactiveSeq < Double > > mapDoubles ( DoubleUnaryOperator b ) { } }
return a -> a . doubles ( i -> i , s -> s . map ( b ) ) ;
public class VersionHistory { /** * Returns the timestamp at the first install of the current versionName of this app that Apptentive was aware of . */ public Apptentive . DateTime getTimeAtInstallForVersionName ( String versionName ) { } }
for ( VersionHistoryItem item : versionHistoryItems ) { Apptentive . Version entryVersionName = new Apptentive . Version ( ) ; Apptentive . Version currentVersionName = new Apptentive . Version ( ) ; entryVersionName . setVersion ( item . getVersionName ( ) ) ; currentVersionName . setVersion ( versionName ) ; if ( entryVersionName . equals ( currentVersionName ) ) { return new Apptentive . DateTime ( item . getTimestamp ( ) ) ; } } return new Apptentive . DateTime ( Util . currentTimeSeconds ( ) ) ;
public class ChainedAggregatorImpl { /** * return false if can ' t increment anymore */ private boolean increment ( TridentCollector [ ] lengths , int [ ] indices , int j ) { } }
if ( j == - 1 ) return false ; indices [ j ] ++ ; CaptureCollector capturer = ( CaptureCollector ) lengths [ j ] ; if ( indices [ j ] >= capturer . captured . size ( ) ) { indices [ j ] = 0 ; return increment ( lengths , indices , j - 1 ) ; } return true ;
public class VideoDisplayActivity { /** * Changes the CV algorithm running . Should only be called from a GUI thread . */ public void setProcessing ( VideoProcessing processing ) { } }
if ( this . processing != null ) { // kill the old process this . processing . stopProcessing ( ) ; } if ( Looper . getMainLooper ( ) . getThread ( ) != Thread . currentThread ( ) ) { throw new RuntimeException ( "Not called from a GUI thread. Bad stuff could happen" ) ; } this . processing = processing ; // if the camera is null then it will be initialized when the camera is initialized if ( processing != null && mCamera != null ) { processing . init ( mDraw , mCamera , mCameraInfo , previewRotation ) ; }
public class DependencyFinder { /** * Parses the exported API of the named class from the given archive and * returns all target locations the named class references . */ public Set < Location > parseExportedAPIs ( Archive archive , String name ) { } }
try { return parse ( archive , API_FINDER , name ) ; } catch ( IOException e ) { throw new UncheckedIOException ( e ) ; }
public class Subscription { /** * / * ( non - Javadoc ) * @ see Continueable # closeAll ( ) */ @ Override public void closeAll ( ) { } }
closed . set ( true ) ; queues . stream ( ) . forEach ( Queue :: closeAndClear ) ;
public class PersistentEntityStoreImpl { /** * Clears all properties of specified entity . * @ param entity to clear . */ @ SuppressWarnings ( { } }
"OverlyLongMethod" } ) public void clearProperties ( @ NotNull final PersistentStoreTransaction txn , @ NotNull final Entity entity ) { final Transaction envTxn = txn . getEnvironmentTransaction ( ) ; final PersistentEntityId id = ( PersistentEntityId ) entity . getId ( ) ; final int entityTypeId = id . getTypeId ( ) ; final long entityLocalId = id . getLocalId ( ) ; final PropertiesTable properties = getPropertiesTable ( txn , entityTypeId ) ; final PropertyKey propertyKey = new PropertyKey ( entityLocalId , 0 ) ; try ( Cursor cursor = getPrimaryPropertyIndexCursor ( txn , properties ) ) { for ( boolean success = cursor . getSearchKeyRange ( PropertyKey . propertyKeyToEntry ( propertyKey ) ) != null ; success ; success = cursor . getNext ( ) ) { ByteIterable keyEntry = cursor . getKey ( ) ; final PropertyKey key = PropertyKey . entryToPropertyKey ( keyEntry ) ; if ( key . getEntityLocalId ( ) != entityLocalId ) { break ; } final int propertyId = key . getPropertyId ( ) ; final ByteIterable value = cursor . getValue ( ) ; final PropertyValue propValue = propertyTypes . entryToPropertyValue ( value ) ; txn . propertyChanged ( id , propertyId , propValue . getData ( ) , null ) ; properties . deleteNoFail ( txn , entityLocalId , value , propertyId , propValue . getType ( ) ) ; } }
public class ProcessAdminServicesClientImpl { /** * helper methods */ protected void updateTimer ( String containerId , Long processInstanceId , long timerId , long delay , long period , int repeatLimit , boolean relative ) { } }
Map < String , Number > timerUpdate = new HashMap < > ( ) ; timerUpdate . put ( "delay" , delay ) ; timerUpdate . put ( "period" , period ) ; timerUpdate . put ( "repeatLimit" , repeatLimit ) ; if ( config . isRest ( ) ) { Map < String , Object > valuesMap = new HashMap < String , Object > ( ) ; valuesMap . put ( CONTAINER_ID , containerId ) ; valuesMap . put ( PROCESS_INST_ID , processInstanceId ) ; valuesMap . put ( TIMER_INSTANCE_ID , timerId ) ; Map < String , String > headers = new HashMap < String , String > ( ) ; makeHttpPutRequestAndCreateCustomResponse ( build ( loadBalancer . getUrl ( ) , ADMIN_PROCESS_URI + "/" + UPDATE_TIMER_PROCESS_INST_PUT_URI , valuesMap ) + "?relative=" + relative , timerUpdate , null , headers ) ; } else { CommandScript script = new CommandScript ( Collections . singletonList ( ( KieServerCommand ) new DescriptorCommand ( "ProcessAdminService" , "updateTimer" , serialize ( timerUpdate ) , marshaller . getFormat ( ) . getType ( ) , new Object [ ] { containerId , processInstanceId , timerId , relative } ) ) ) ; ServiceResponse < ? > response = ( ServiceResponse < ? > ) executeJmsCommand ( script , DescriptorCommand . class . getName ( ) , "BPM" , containerId ) . getResponses ( ) . get ( 0 ) ; throwExceptionOnFailure ( response ) ; }
public class File { /** * Returns the relative sort ordering of the paths for this file and the * file { @ code another } . The ordering is platform dependent . * @ param another * a file to compare this file to * @ return an int determined by comparing the two paths . Possible values are * described in the Comparable interface . * @ see Comparable */ public int compareTo ( File another ) { } }
if ( caseSensitive ) { return this . getPath ( ) . compareTo ( another . getPath ( ) ) ; } return this . getPath ( ) . compareTo ( another . getPath ( ) ) ;
public class KDTree { /** * Corrects the start and end indices of a * KDTreeNode after an instance is added to * the tree . The start and end indices for * the master index array ( m _ InstList ) * stored in the nodes need to be updated * for all nodes in the subtree on the * right of a node where the instance * was added . * NOTE : No outside class should call this * method . * @ param node KDTreeNode whose start and end indices * need to be updated . */ protected void afterAddInstance ( KDTreeNode node ) { } }
node . m_Start ++ ; node . m_End ++ ; if ( ! node . isALeaf ( ) ) { afterAddInstance ( node . m_Left ) ; afterAddInstance ( node . m_Right ) ; }
public class StringHelper { /** * Remove all occurrences of the passed character from the specified input * string * @ param sInputString * The input string where the character should be removed . If this * parameter is < code > null < / code > or empty , no removing is done . * @ param sRemoveString * The String to be removed . May be < code > null < / code > or empty in which * case nothing happens . * @ return The input string as is , if the input string is empty or if the remove * string is empty or not contained . */ @ Nullable public static String removeAll ( @ Nullable final String sInputString , @ Nullable final String sRemoveString ) { } }
// Is input string empty ? if ( hasNoText ( sInputString ) ) return sInputString ; final int nRemoveLength = getLength ( sRemoveString ) ; if ( nRemoveLength == 0 ) { // Nothing to be removed return sInputString ; } if ( nRemoveLength == 1 ) { // Shortcut to char version return removeAll ( sInputString , sRemoveString . charAt ( 0 ) ) ; } // Does the string occur anywhere ? int nIndex = sInputString . indexOf ( sRemoveString , 0 ) ; if ( nIndex == STRING_NOT_FOUND ) return sInputString ; // build output buffer final StringBuilder ret = new StringBuilder ( sInputString . length ( ) ) ; int nOldIndex = 0 ; do { ret . append ( sInputString , nOldIndex , nIndex ) ; nOldIndex = nIndex + nRemoveLength ; nIndex = sInputString . indexOf ( sRemoveString , nOldIndex ) ; } while ( nIndex != STRING_NOT_FOUND ) ; ret . append ( sInputString , nOldIndex , sInputString . length ( ) ) ; return ret . toString ( ) ;
public class Repository { /** * topological sort */ public List < Module > sequence ( List < Module > moduleList ) throws CyclicDependency { } }
List < Module > work ; Graph < Module > graph ; Module left ; Module right ; graph = new Graph < Module > ( ) ; for ( int i = 0 ; i < moduleList . size ( ) ; i ++ ) { right = moduleList . get ( i ) ; graph . addNode ( right ) ; if ( i > 0 ) { // inject dependency to previous modules left = moduleList . get ( i - 1 ) ; graph . addEdge ( left , right ) ; } } work = new ArrayList < Module > ( ) ; work . addAll ( moduleList ) ; for ( int i = 0 ; i < work . size ( ) ; i ++ ) { // list grows ! right = work . get ( i ) ; for ( Module l : right . dependencies ( ) ) { graph . addEdge ( l , right ) ; if ( ! work . contains ( l ) ) { work . add ( l ) ; } } } return graph . sort ( ) ;
public class GraphUtil { /** * Create a subgraph by specifying the vertices from the original { @ literal * graph } to { @ literal include } in the subgraph . The provided vertices also * provide the mapping between vertices in the subgraph and the original . * < blockquote > < pre > { @ code * int [ ] [ ] g = toAdjList ( naphthalene ) ; * int [ ] vs = new int [ ] { 0 , 1 , 2 , 3 , 4 , 5 } ; * int [ ] [ ] h = subgraph ( g , vs ) ; * / / for the vertices in h , the provided ' vs ' gives the original index * for ( int v = 0 ; v < h . length ; v + + ) { * / / vs [ v ] is ' v ' in ' g ' * } < / pre > < / blockquote > * @ param graph adjacency list graph * @ param include the vertices of he graph to include in the subgraph * @ return the subgraph */ public static int [ ] [ ] subgraph ( int [ ] [ ] graph , int [ ] include ) { } }
// number of vertices in the graph and the subgraph int n = graph . length ; int m = include . length ; // mapping from vertex in ' graph ' to ' subgraph ' int [ ] mapping = new int [ n ] ; for ( int i = 0 ; i < m ; i ++ ) { mapping [ include [ i ] ] = i + 1 ; } // initialise the subgraph int [ ] degree = new int [ m ] ; int [ ] [ ] subgraph = new int [ m ] [ DEFAULT_DEGREE ] ; // build the subgraph , in the subgraph we denote to adjacent // vertices p and q . If p or q is less then 0 then it is not // in the subgraph for ( int v = 0 ; v < n ; v ++ ) { int p = mapping [ v ] - 1 ; if ( p < 0 ) continue ; for ( int w : graph [ v ] ) { int q = mapping [ w ] - 1 ; if ( q < 0 ) continue ; if ( degree [ p ] == subgraph [ p ] . length ) subgraph [ p ] = copyOf ( subgraph [ p ] , 2 * subgraph [ p ] . length ) ; subgraph [ p ] [ degree [ p ] ++ ] = q ; } } // truncate excess storage for ( int p = 0 ; p < m ; p ++ ) { subgraph [ p ] = copyOf ( subgraph [ p ] , degree [ p ] ) ; } return subgraph ;
public class MSPDIReader { /** * When projectmanager . com exports schedules as MSPDI ( via Aspose tasks ) * they do not have finish dates , just a start date and a duration . * This method populates finish dates . * @ param task task to validate */ private void validateFinishDate ( Task task ) { } }
if ( task . getFinish ( ) == null ) { Date startDate = task . getStart ( ) ; if ( startDate != null ) { if ( task . getMilestone ( ) ) { task . setFinish ( startDate ) ; } else { Duration duration = task . getDuration ( ) ; if ( duration != null ) { ProjectCalendar calendar = task . getEffectiveCalendar ( ) ; task . setFinish ( calendar . getDate ( startDate , duration , false ) ) ; } } } }
public class HazelcastInstanceManager { /** * Shut down the Hazelcast instance and clear stores references . */ public void reset ( ) { } }
if ( null != hazelcastInstance ) { synchronized ( mutex ) { if ( null == hazelcastInstance ) { hazelcastInstance . shutdown ( ) ; hazelcastInstance = null ; } } } if ( ! stores . isEmpty ( ) ) { synchronized ( mutex ) { stores . clear ( ) ; } }
public class RichClientFramework { /** * Compares two objects . Returns false if one is null but the other isn ' t , returns true if both * are null , otherwise returns the result of their equals ( ) method . * @ param o1 * @ param o2 * @ return Returns true if o1 and o2 are equal . */ private boolean isEqual ( Object o1 , Object o2 ) { } }
// Both null - they are equal if ( o1 == null && o2 == null ) { return true ; } // One is null and the other isn ' t - they are not equal if ( ( o1 == null && o2 != null ) || ( o1 != null && o2 == null ) ) { return false ; } // Otherwise fight it out amongst themselves return o1 . equals ( o2 ) ;
public class AbstractAtomFeedParser { /** * Parse the next item in the feed and return a new parsed instance of the item type . If there is * no item to parse , it will return { @ code null } and automatically close the parser ( in which case * there is no need to call { @ link # close ( ) } . * @ throws IOException I / O exception * @ throws XmlPullParserException XML pull parser exception */ public Object parseNextEntry ( ) throws IOException , XmlPullParserException { } }
if ( ! feedParsed ) { feedParsed = true ; Xml . parseElement ( parser , null , namespaceDictionary , Atom . StopAtAtomEntry . INSTANCE ) ; } boolean close = true ; try { if ( parser . getEventType ( ) == XmlPullParser . START_TAG ) { Object result = parseEntryInternal ( ) ; parser . next ( ) ; close = false ; return result ; } } finally { if ( close ) { close ( ) ; } } return null ;
public class UIData { /** * Visit each UIColumn and any facets it may have defined exactly once */ private boolean visitColumnsAndColumnFacets ( VisitContext context , VisitCallback callback , boolean visitRows ) { } }
if ( visitRows ) { setRowIndex ( - 1 ) ; } if ( getChildCount ( ) > 0 ) { for ( UIComponent column : getChildren ( ) ) { if ( column instanceof UIColumn ) { VisitResult result = context . invokeVisitCallback ( column , callback ) ; // visit the column directly if ( result == VisitResult . COMPLETE ) { return true ; } if ( column . getFacetCount ( ) > 0 ) { for ( UIComponent columnFacet : column . getFacets ( ) . values ( ) ) { if ( columnFacet . visitTree ( context , callback ) ) { return true ; } } } } } } return false ;
public class TableWriteItems { /** * Used to specify multiple hash - only primary keys to be deleted from the * current table . * @ param hashKeyName * hash - only key name * @ param hashKeyValues * a list of hash key values */ public TableWriteItems withHashOnlyKeysToDelete ( String hashKeyName , Object ... hashKeyValues ) { } }
if ( hashKeyName == null ) throw new IllegalArgumentException ( ) ; PrimaryKey [ ] primaryKeys = new PrimaryKey [ hashKeyValues . length ] ; for ( int i = 0 ; i < hashKeyValues . length ; i ++ ) primaryKeys [ i ] = new PrimaryKey ( hashKeyName , hashKeyValues [ i ] ) ; return withPrimaryKeysToDelete ( primaryKeys ) ;
public class FoundationFileRollingAppender { /** * Sets the { @ link java . util . Locale } to be used when processing date patterns . * Variants are not supported ; only language and ( optionally ) country may be * used , e . g . & nbsp ; & quot ; en & quot ; , & nbsp ; & quot ; en _ GB & quot ; or * & quot ; fr _ CA & quot ; are all valid . If no locale is supplied , * { @ link java . util . Locale # ENGLISH } will be used . * @ param datePatternLocale * @ see java . util . Locale * @ see # setDatePattern ( String ) */ public void setDatePatternLocale ( String datePatternLocale ) { } }
if ( datePatternLocale == null ) { LogLog . warn ( "Null date pattern locale supplied for appender [" + this . getName ( ) + "], defaulting to " + this . getProperties ( ) . getDatePatternLocale ( ) ) ; return ; } datePatternLocale = datePatternLocale . trim ( ) ; if ( "" . equals ( datePatternLocale ) ) { LogLog . warn ( "Empty date pattern locale supplied for appender [" + this . getName ( ) + "], defaulting to " + this . getProperties ( ) . getDatePatternLocale ( ) ) ; return ; } final String [ ] parts = datePatternLocale . split ( "_" ) ; switch ( parts . length ) { case 1 : this . getProperties ( ) . setDatePatternLocale ( new Locale ( parts [ 0 ] ) ) ; break ; case 2 : this . getProperties ( ) . setDatePatternLocale ( new Locale ( parts [ 0 ] , parts [ 1 ] ) ) ; break ; default : LogLog . warn ( "Unable to parse date pattern locale supplied for appender [" + this . getName ( ) + "], defaulting to " + this . getProperties ( ) . getDatePatternLocale ( ) ) ; }
public class SoyTypes { /** * Returns true if the input type is a primitive type . This includes bool , int , float , string and * all sanitized contents . Two special cases are proto enum and number : these are proto or * aggregate type in Soy ' s type system , but they should really be treated as primitive types . */ private static boolean isDefinitePrimitive ( SoyType type ) { } }
return type . getKind ( ) == SoyType . Kind . BOOL || isNumericPrimitive ( type ) || type . getKind ( ) . isKnownStringOrSanitizedContent ( ) ;
public class JCudaDriver { /** * Creates a memset node and adds it to a graph . < br > * < br > * Creates a new memset node and adds it to \ p hGraph with \ p numDependencies * dependencies specified via \ p dependencies . * It is possible for \ p numDependencies to be 0 , in which case the node will be placed * at the root of the graph . \ p dependencies may not have any duplicate entries . * A handle to the new node will be returned in \ p phGraphNode . < br > * < br > * The element size must be 1 , 2 , or 4 bytes . * When the graph is launched , the node will perform the memset described by \ p memsetParams . * @ param phGraphNode - Returns newly created node * @ param hGraph - Graph to which to add the node * @ param dependencies - Dependencies of the node * @ param numDependencies - Number of dependencies * @ param memsetParams - Parameters for the memory set * @ param ctx - Context on which to run the node * @ return * CUDA _ SUCCESS , * CUDA _ ERROR _ DEINITIALIZED , * CUDA _ ERROR _ NOT _ INITIALIZED , * CUDA _ ERROR _ INVALID _ VALUE , * CUDA _ ERROR _ INVALID _ CONTEXT * @ see * JCudaDriver # cuMemsetD2D32 * JCudaDriver # cuGraphMemsetNodeGetParams * JCudaDriver # cuGraphMemsetNodeSetParams * JCudaDriver # cuGraphCreate * JCudaDriver # cuGraphDestroyNode * JCudaDriver # cuGraphAddChildGraphNode * JCudaDriver # cuGraphAddEmptyNode * JCudaDriver # cuGraphAddKernelNode * JCudaDriver # cuGraphAddHostNode * JCudaDriver # cuGraphAddMemcpyNode */ public static int cuGraphAddMemsetNode ( CUgraphNode phGraphNode , CUgraph hGraph , CUgraphNode dependencies [ ] , long numDependencies , CUDA_MEMSET_NODE_PARAMS memsetParams , CUcontext ctx ) { } }
return checkResult ( cuGraphAddMemsetNodeNative ( phGraphNode , hGraph , dependencies , numDependencies , memsetParams , ctx ) ) ;
public class FctBnAccEntitiesProcessors { /** * < p > Get PrcWageLineDelete ( create and put into map ) . < / p > * @ param pAddParam additional param * @ return requested PrcWageLineDelete * @ throws Exception - an exception */ protected final PrcWageLineDelete < RS > lazyGetPrcWageLineDelete ( final Map < String , Object > pAddParam ) throws Exception { } }
@ SuppressWarnings ( "unchecked" ) PrcWageLineDelete < RS > proc = ( PrcWageLineDelete < RS > ) this . processorsMap . get ( PrcWageLineDelete . class . getSimpleName ( ) ) ; if ( proc == null ) { proc = new PrcWageLineDelete < RS > ( ) ; proc . setSrvAccSettings ( getSrvAccSettings ( ) ) ; proc . setSrvOrm ( getSrvOrm ( ) ) ; proc . setSrvDatabase ( getSrvDatabase ( ) ) ; // assigning fully initialized object : this . processorsMap . put ( PrcWageLineDelete . class . getSimpleName ( ) , proc ) ; } return proc ;
public class DatePickerSettings { /** * zApplyGapBeforeButtonPixels , This applies the named setting to the parent component . */ void zApplyGapBeforeButtonPixels ( ) { } }
int gapPixels = ( gapBeforeButtonPixels == null ) ? 3 : gapBeforeButtonPixels ; ConstantSize gapSizeObject = new ConstantSize ( gapPixels , ConstantSize . PIXEL ) ; ColumnSpec columnSpec = ColumnSpec . createGap ( gapSizeObject ) ; FormLayout layout = ( ( FormLayout ) parentDatePicker . getLayout ( ) ) ; layout . setColumnSpec ( 2 , columnSpec ) ;
public class StandardDirectoryAgentServer { /** * Handles a unicast TCP AttrRqst message arrived to this directory agent . * < br / > * This directory agent will reply with an AttrRply containing the result of the attribute request . * @ param attrRqst the AttrRqst message to handle * @ param socket the socket connected to the client where to write the reply */ protected void handleTCPAttrRqst ( AttrRqst attrRqst , Socket socket ) { } }
// Match scopes , RFC 2608 , 11.1 if ( ! scopes . weakMatch ( attrRqst . getScopes ( ) ) ) { tcpAttrRply . perform ( socket , attrRqst , SLPError . SCOPE_NOT_SUPPORTED ) ; return ; } Attributes attributes = matchAttributes ( attrRqst ) ; if ( logger . isLoggable ( Level . FINE ) ) logger . fine ( "DirectoryAgent " + this + " returning attributes for service " + attrRqst . getURL ( ) + ": " + attributes . asString ( ) ) ; tcpAttrRply . perform ( socket , attrRqst , attributes ) ;
public class PartitionedStepControllerImpl { /** * Spawn the partitions and wait for them to complete . */ @ FFDCIgnore ( JobStoppingException . class ) private void executeAndWaitForCompletion ( PartitionPlanDescriptor currentPlan ) throws JobRestartException { } }
if ( isStoppingStoppedOrFailed ( ) ) { logger . fine ( "Job already in " + runtimeWorkUnitExecution . getWorkUnitJobContext ( ) . getBatchStatus ( ) . toString ( ) + " state, exiting from executeAndWaitForCompletion() before beginning execution" ) ; return ; } List < Integer > partitionsToExecute = getPartitionNumbersToExecute ( currentPlan ) ; logger . fine ( "Partitions to execute in this run: " + partitionsToExecute + ". Total number of partitions in step: " + currentPlan . getNumPartitionsInPlan ( ) ) ; List < Integer > startedPartitions = new ArrayList < Integer > ( ) ; List < Integer > finishedPartitions = new ArrayList < Integer > ( ) ; List < Throwable > analyzerExceptions = new ArrayList < Throwable > ( ) ; // Keep looping until all partitions have finished . while ( finishedPartitions . size ( ) < partitionsToExecute . size ( ) ) { startPartitions ( partitionsToExecute , startedPartitions , finishedPartitions , currentPlan ) ; // Check that there are still un - finished partitions running . // If not , break out of the loop . if ( finishedPartitions . size ( ) >= partitionsToExecute . size ( ) ) { break ; } // Break this loop when nextPartitionFinished is - 1 try { waitForNextPartitionToFinish ( analyzerExceptions , finishedPartitions ) ; } catch ( JobStoppingException e ) { // break the loop break ; } } if ( ! analyzerExceptions . isEmpty ( ) ) { rollbackPartitionedStep ( ) ; throw new BatchContainerRuntimeException ( "Exception previously thrown by Analyzer, rolling back step." , analyzerExceptions . get ( 0 ) ) ; }
public class KvStateSerializer { /** * Deserializes the key and namespace into a { @ link Tuple2 } . * @ param serializedKeyAndNamespace Serialized key and namespace * @ param keySerializer Serializer for the key * @ param namespaceSerializer Serializer for the namespace * @ param < K > Key type * @ param < N > Namespace * @ return Tuple2 holding deserialized key and namespace * @ throws IOException if the deserialization fails for any reason */ public static < K , N > Tuple2 < K , N > deserializeKeyAndNamespace ( byte [ ] serializedKeyAndNamespace , TypeSerializer < K > keySerializer , TypeSerializer < N > namespaceSerializer ) throws IOException { } }
DataInputDeserializer dis = new DataInputDeserializer ( serializedKeyAndNamespace , 0 , serializedKeyAndNamespace . length ) ; try { K key = keySerializer . deserialize ( dis ) ; byte magicNumber = dis . readByte ( ) ; if ( magicNumber != MAGIC_NUMBER ) { throw new IOException ( "Unexpected magic number " + magicNumber + "." ) ; } N namespace = namespaceSerializer . deserialize ( dis ) ; if ( dis . available ( ) > 0 ) { throw new IOException ( "Unconsumed bytes in the serialized key and namespace." ) ; } return new Tuple2 < > ( key , namespace ) ; } catch ( IOException e ) { throw new IOException ( "Unable to deserialize key " + "and namespace. This indicates a mismatch in the key/namespace " + "serializers used by the KvState instance and this access." , e ) ; }
public class Gram { /** * This function unregisters the job from callback * listener . The job status will not be updated . * @ throws GramException if an error occurs during unregistering * @ param job the job */ public static void unregisterListener ( GramJob job ) throws GramException , GSSException { } }
CallbackHandler handler ; GSSCredential cred = getJobCredentials ( job ) ; handler = initCallbackHandler ( cred ) ; unregisterListener ( job , handler ) ;
public class CorrelationAnalysisSolution { /** * Text output of the equation system */ @ Override public void writeToText ( TextWriterStream out , String label ) { } }
if ( label != null ) { out . commentPrintLn ( label ) ; } out . commentPrintLn ( "Model class: " + this . getClass ( ) . getName ( ) ) ; try { if ( getNormalizedLinearEquationSystem ( null ) != null ) { // TODO : more elegant way of doing normalization here ? /* * if ( out instanceof TextWriterStreamNormalizing ) { * TextWriterStreamNormalizing < V > nout = * ( TextWriterStreamNormalizing < V > ) out ; LinearEquationSystem lq = * getNormalizedLinearEquationSystem ( nout . getNormalization ( ) ) ; * out . commentPrint ( " Linear Equation System : " ) ; * out . commentPrintLn ( lq . equationsToString ( nf ) ) ; } else { */ LinearEquationSystem lq = getNormalizedLinearEquationSystem ( null ) ; out . commentPrint ( "Linear Equation System: " ) ; out . commentPrintLn ( lq . equationsToString ( nf ) ) ; } } catch ( NonNumericFeaturesException e ) { LoggingUtil . exception ( e ) ; }
public class SqlUtils { /** * Retrieve a JDBC column value from a ResultSet , using the most appropriate * value type . The returned value should be a detached value object , not * having any ties to the active ResultSet : in particular , it should not be * a Blob or Clob object but rather a byte array respectively String * representation . * Uses the < code > getObject ( index ) < / code > method , but includes additional * " hacks " to get around Oracle 10g returning a non - standard object for its * TIMESTAMP datatype and a < code > java . sql . Date < / code > for DATE columns * leaving out the time portion : These columns will explicitly be extracted * as standard < code > java . sql . Timestamp < / code > object . * @ param rs is the ResultSet holding the data * @ param index is the column index * @ return the value object * @ throws SQLException if thrown by the JDBC API * @ see java . sql . Blob * @ see java . sql . Clob * @ see java . sql . Timestamp */ private static String getResultSetValue ( ResultSet rs , int index ) throws SQLException { } }
Object obj = rs . getObject ( index ) ; return ( obj == null ) ? null : convertUtilsBean . convert ( obj ) ;
public class LineParser { /** * Returns an array of all tokens being parsed . * @ return token array */ public String getToken ( ) { } }
if ( this . tokenPosition > 0 ) { String [ ] ar = StringUtils . split ( this . line , null , this . tokenPosition + 1 ) ; if ( ar != null && ar . length > ( this . tokenPosition - 1 ) ) { return StringUtils . trim ( ar [ this . tokenPosition - 1 ] ) ; } } return null ;
public class StopWatch { /** * 记录当前时间 * @ return StopWatch */ public StopWatch tick ( ) { } }
long old = ticker ; ticker = System . currentTimeMillis ( ) ; lastElapsed = ( int ) ( ticker - old ) ; return this ;
public class JSONCompareResult { /** * Identify that the comparison failed * @ param field Which field failed * @ param expected Expected result * @ param actual Actual result * @ return result of comparision */ public JSONCompareResult fail ( String field , Object expected , Object actual ) { } }
_fieldFailures . add ( new FieldComparisonFailure ( field , expected , actual ) ) ; this . _field = field ; this . _expected = expected ; this . _actual = actual ; fail ( formatFailureMessage ( field , expected , actual ) ) ; return this ;
public class EnvironmentPropertyUpdates { /** * Describes updates to the execution property groups . * @ param propertyGroups * Describes updates to the execution property groups . */ public void setPropertyGroups ( java . util . Collection < PropertyGroup > propertyGroups ) { } }
if ( propertyGroups == null ) { this . propertyGroups = null ; return ; } this . propertyGroups = new java . util . ArrayList < PropertyGroup > ( propertyGroups ) ;
public class RunbookDraftsInner { /** * Publish runbook draft . * @ param resourceGroupName Name of an Azure Resource group . * @ param automationAccountName The name of the automation account . * @ param runbookName The parameters supplied to the publish runbook operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceResponseWithHeaders } object if successful . */ public Observable < Void > beginPublishAsync ( String resourceGroupName , String automationAccountName , String runbookName ) { } }
return beginPublishWithServiceResponseAsync ( resourceGroupName , automationAccountName , runbookName ) . map ( new Func1 < ServiceResponseWithHeaders < Void , RunbookDraftPublishHeaders > , Void > ( ) { @ Override public Void call ( ServiceResponseWithHeaders < Void , RunbookDraftPublishHeaders > response ) { return response . body ( ) ; } } ) ;
public class SafeDatasetCommit { /** * Check if it is OK to commit the output data of a dataset . * A dataset can be committed if and only if any of the following conditions is satisfied : * < ul > * < li > The { @ link JobCommitPolicy # COMMIT _ ON _ PARTIAL _ SUCCESS } policy is used . < / li > * < li > The { @ link JobCommitPolicy # COMMIT _ SUCCESSFUL _ TASKS } policy is used . < / li > * < li > The { @ link JobCommitPolicy # COMMIT _ ON _ FULL _ SUCCESS } policy is used and all of the tasks succeed . < / li > * < / ul > * This method is thread - safe . */ private boolean canCommitDataset ( JobState . DatasetState datasetState ) { } }
// Only commit a dataset if 1 ) COMMIT _ ON _ PARTIAL _ SUCCESS is used , or 2) // COMMIT _ ON _ FULL _ SUCCESS is used and all of the tasks of the dataset have succeeded . return this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_ON_PARTIAL_SUCCESS || this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_SUCCESSFUL_TASKS || ( this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_ON_FULL_SUCCESS && datasetState . getState ( ) == JobState . RunningState . SUCCESSFUL ) ;
public class ExpressRouteCrossConnectionPeeringsInner { /** * Gets the specified peering for the ExpressRouteCrossConnection . * @ param resourceGroupName The name of the resource group . * @ param crossConnectionName The name of the ExpressRouteCrossConnection . * @ param peeringName The name of the peering . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the ExpressRouteCrossConnectionPeeringInner object */ public Observable < ExpressRouteCrossConnectionPeeringInner > getAsync ( String resourceGroupName , String crossConnectionName , String peeringName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , crossConnectionName , peeringName ) . map ( new Func1 < ServiceResponse < ExpressRouteCrossConnectionPeeringInner > , ExpressRouteCrossConnectionPeeringInner > ( ) { @ Override public ExpressRouteCrossConnectionPeeringInner call ( ServiceResponse < ExpressRouteCrossConnectionPeeringInner > response ) { return response . body ( ) ; } } ) ;
public class JcNumber { /** * < div color = ' red ' style = " font - size : 24px ; color : red " > < b > < i > < u > JCYPHER < / u > < / i > < / b > < / div > * < div color = ' red ' style = " font - size : 18px ; color : red " > < i > return the result of dividing a number by another number , return a < b > JcNumber < / b > < / i > < / div > * < br / > */ public JcNumber div ( Number val ) { } }
JcNumber ret = new JcNumber ( val , this , OPERATOR . Number . DIV ) ; QueryRecorder . recordInvocationConditional ( this , "div" , ret , QueryRecorder . literal ( val ) ) ; return ret ;
public class BytesUtils { /** * Search for a big - endian 4 - byte integer in a array of bytes . * @ param a array of containing only big - endian 4 - byte integers . * @ param key the value to seach for . * @ return the index found . */ public static int binarySearch ( byte [ ] a , int key ) { } }
int low = 0 ; int high = a . length ; while ( low < high ) { int mid = ( low + high ) >>> 1 ; if ( mid % 4 != 0 ) { if ( high == a . length ) { mid = low ; } else { mid = high ; } } int midVal = getInt ( a , mid ) ; if ( midVal < key ) low = mid + 4 ; else if ( midVal > key ) high = mid - 4 ; else return mid ; // key found } if ( low == a . length ) { return low ; } return key > getInt ( a , low ) ? low + 4 : low ;
public class FailoverProxy { /** * Launch reconnect implementation . * @ throws SQLException exception */ public void reconnect ( ) throws SQLException { } }
try { listener . reconnect ( ) ; } catch ( SQLException e ) { ExceptionMapper . throwException ( e , null , null ) ; }
public class ResourceReaderImpl { /** * / * ( non - Javadoc ) * @ see net . crowmagnumb . util . ResourceReader # getObject ( java . lang . String , java . lang . String ) */ @ Override public Object getObject ( final String key , final String defaultClassName ) throws UtilException { } }
return formatObject ( key , getString ( key , defaultClassName ) ) ;
public class ResourceInformation { /** * Hide with implementation / interface */ @ Deprecated public void initNesting ( ) { } }
boolean nested = setupManyNesting ( ) ; if ( ! nested && shouldBeNested ( ) ) { setupOneNesting ( ) ; } if ( isNested ( ) ) { PreconditionUtil . verify ( parentField . getOppositeName ( ) != null , "relationship between parent pointing to a nested resource must specify @JsonApiRelation.mappedBy or @JsonApiRelation.opposite, not " + "found for '%s' of %s" , parentField . getUnderlyingName ( ) , implementationClass ) ; }
public class TraceServiceClient { /** * Sends new spans to new or existing traces . You cannot update existing spans . * < p > Sample code : * < pre > < code > * try ( TraceServiceClient traceServiceClient = TraceServiceClient . create ( ) ) { * ProjectName name = ProjectName . of ( " [ PROJECT ] " ) ; * List & lt ; Span & gt ; spans = new ArrayList & lt ; & gt ; ( ) ; * traceServiceClient . batchWriteSpans ( name . toString ( ) , spans ) ; * < / code > < / pre > * @ param name Required . The name of the project where the spans belong . The format is * ` projects / [ PROJECT _ ID ] ` . * @ param spans A list of new spans . The span names must not match existing spans , or the results * are undefined . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ public final void batchWriteSpans ( String name , List < Span > spans ) { } }
BatchWriteSpansRequest request = BatchWriteSpansRequest . newBuilder ( ) . setName ( name ) . addAllSpans ( spans ) . build ( ) ; batchWriteSpans ( request ) ;
public class FileInputFormat { /** * Set the array of { @ link Path } s as the list of inputs * for the map - reduce job . * @ param conf Configuration of the job . * @ param inputPaths the { @ link Path } s of the input directories / files * for the map - reduce job . */ public static void setInputPaths ( JobConf conf , Path ... inputPaths ) { } }
if ( ! inputPaths [ 0 ] . isAbsolute ( ) ) { FileSystem . LogForCollect . info ( "set relative path to non absolute path: " + inputPaths [ 0 ] + " working directory: " + conf . getWorkingDirectory ( ) ) ; } Path path = new Path ( conf . getWorkingDirectory ( ) , inputPaths [ 0 ] ) ; StringBuffer str = new StringBuffer ( StringUtils . escapeString ( path . toString ( ) ) ) ; for ( int i = 1 ; i < inputPaths . length ; i ++ ) { str . append ( StringUtils . COMMA_STR ) ; if ( ! inputPaths [ i ] . isAbsolute ( ) ) { FileSystem . LogForCollect . info ( "set input path to non absolute path: " + inputPaths [ i ] + " working directory: " + conf . getWorkingDirectory ( ) ) ; } path = new Path ( conf . getWorkingDirectory ( ) , inputPaths [ i ] ) ; str . append ( StringUtils . escapeString ( path . toString ( ) ) ) ; } conf . set ( "mapred.input.dir" , str . toString ( ) ) ;
public class SPX { /** * serialization of a single boundary object */ private static Object readBoundary ( ObjectInput in , byte header ) throws IOException , ClassNotFoundException { } }
int past = ( header & 0x1 ) ; if ( past == 1 ) { return Boundary . infinitePast ( ) ; } int future = ( header & 0x2 ) ; if ( future == 2 ) { return Boundary . infiniteFuture ( ) ; } int openClosed = in . readByte ( ) ; IntervalEdge edge ; switch ( openClosed ) { case 0 : edge = IntervalEdge . CLOSED ; break ; case 1 : edge = IntervalEdge . OPEN ; break ; default : throw new StreamCorruptedException ( "Invalid edge state." ) ; } Object t = in . readObject ( ) ; return Boundary . of ( edge , t ) ;
public class Hours { /** * Creates a new < code > Hours < / code > by parsing a string in the ISO8601 format ' PTnH ' . * The parse will accept the full ISO syntax of PnYnMnWnDTnHnMnS however only the * hours component may be non - zero . If any other component is non - zero , an exception * will be thrown . * @ param periodStr the period string , null returns zero * @ return the period in hours * @ throws IllegalArgumentException if the string format is invalid */ @ FromString public static Hours parseHours ( String periodStr ) { } }
if ( periodStr == null ) { return Hours . ZERO ; } Period p = PARSER . parsePeriod ( periodStr ) ; return Hours . hours ( p . getHours ( ) ) ;
public class PartialResponseChangesTypeImpl { /** * Returns all < code > insert < / code > elements * @ return list of < code > insert < / code > */ public List < PartialResponseInsertType < PartialResponseChangesType < T > > > getAllInsert ( ) { } }
List < PartialResponseInsertType < PartialResponseChangesType < T > > > list = new ArrayList < PartialResponseInsertType < PartialResponseChangesType < T > > > ( ) ; List < Node > nodeList = childNode . get ( "insert" ) ; for ( Node node : nodeList ) { PartialResponseInsertType < PartialResponseChangesType < T > > type = new PartialResponseInsertTypeImpl < PartialResponseChangesType < T > > ( this , "insert" , childNode , node ) ; list . add ( type ) ; } return list ;
public class TimestampInterval { /** * / * [ deutsch ] * < p > Kombiniert dieses lokale Zeitstempelintervall mit dem angegebenen * Zeitzonen - Offset zu einem globalen UTC - Intervall . < / p > * @ param offset timezone offset * @ return global timestamp interval interpreted at given offset * @ since 2.0 * @ see # atUTC ( ) * @ see # inTimezone ( TZID ) */ public MomentInterval at ( ZonalOffset offset ) { } }
Boundary < Moment > b1 ; Boundary < Moment > b2 ; if ( this . getStart ( ) . isInfinite ( ) ) { b1 = Boundary . infinitePast ( ) ; } else { Moment m1 = this . getStart ( ) . getTemporal ( ) . at ( offset ) ; b1 = Boundary . of ( this . getStart ( ) . getEdge ( ) , m1 ) ; } if ( this . getEnd ( ) . isInfinite ( ) ) { b2 = Boundary . infiniteFuture ( ) ; } else { Moment m2 = this . getEnd ( ) . getTemporal ( ) . at ( offset ) ; b2 = Boundary . of ( this . getEnd ( ) . getEdge ( ) , m2 ) ; } return new MomentInterval ( b1 , b2 ) ;
public class MapHelper { /** * Return the least power of two greater than or equal to the specified value . * Note that this function will return 1 when the argument is 0. * @ param nValue * a long integer smaller than or equal to 2 < sup > 62 < / sup > . * @ return the least power of two greater than or equal to the specified * value . */ public static long nextPowerOfTwo ( final long nValue ) { } }
if ( nValue == 0 ) return 1 ; long x = nValue - 1 ; x |= x >> 1 ; x |= x >> 2 ; x |= x >> 4 ; x |= x >> 8 ; x |= x >> 16 ; return ( x | x >> 32 ) + 1 ;
public class Pager { /** * Sets the " page " query parameter . * @ param page the value for the " page " query parameter */ private void setPageParam ( int page ) { } }
pageParam . set ( 0 , Integer . toString ( page ) ) ; queryParams . put ( PAGE_PARAM , pageParam ) ;
public class NameConstraintsExtension { /** * Perform the RFC 822 special case check . We have a certificate * that does not contain any subject alternative names . Check that * any EMAILADDRESS attributes in its subject name conform to these * NameConstraints . * @ param subject the certificate ' s subject name * @ returns true if certificate verifies successfully * @ throws IOException on error */ public boolean verifyRFC822SpecialCase ( X500Name subject ) throws IOException { } }
for ( AVA ava : subject . allAvas ( ) ) { ObjectIdentifier attrOID = ava . getObjectIdentifier ( ) ; if ( attrOID . equals ( ( Object ) PKCS9Attribute . EMAIL_ADDRESS_OID ) ) { String attrValue = ava . getValueString ( ) ; if ( attrValue != null ) { RFC822Name emailName ; try { emailName = new RFC822Name ( attrValue ) ; } catch ( IOException ioe ) { continue ; } if ( ! verify ( emailName ) ) { return ( false ) ; } } } } return true ;
public class DefaultPlexusCipher { public String unDecorate ( final String str ) throws PlexusCipherException { } }
Matcher matcher = ENCRYPTED_STRING_PATTERN . matcher ( str ) ; if ( matcher . matches ( ) || matcher . find ( ) ) { return matcher . group ( 1 ) ; } else { throw new PlexusCipherException ( "default.plexus.cipher.badEncryptedPassword" ) ; }
public class RDFBlueprintsHandler { /** * Simplifies the lexical representation of a value , in particular by taking the fragment identifier of URIs . * This is a lossy operation ; many distinct URIs may map to the same fragment . * Conflicts with reserved tokens are avoided . * @ param resource the Value to map * @ return the simplified fragment */ private String createFragment ( final Value resource ) { } }
if ( resource instanceof URI ) { String frag = ( ( URI ) resource ) . getLocalName ( ) ; return RESERVED_FRAGMENTS . contains ( frag ) ? frag + "_" : frag ; } else { return resource . stringValue ( ) ; }
public class RevisionDecoder { /** * Decodes the information and returns the Diff . * @ return Diff * @ throws UnsupportedEncodingException * if the character encoding is unsupported * @ throws DecodingException * if the decoding failed */ public Diff decode ( ) throws UnsupportedEncodingException , DecodingException { } }
int header = r . read ( 3 ) ; if ( DiffAction . parse ( header ) != DiffAction . DECODER_DATA ) { throw new DecodingException ( "Invalid codecData code: " + header ) ; } int blockSize_C = 3 ; int blockSize_S = r . read ( 5 ) ; int blockSize_E = r . read ( 5 ) ; int blockSize_B = r . read ( 5 ) ; int blockSize_L = r . read ( 5 ) ; r . read ( 1 ) ; if ( blockSize_S < 0 || blockSize_S > 31 ) { throw new DecodingException ( "blockSize_S out of range: " + blockSize_S ) ; } if ( blockSize_E < 0 || blockSize_E > 31 ) { throw new DecodingException ( "blockSize_E out of range: " + blockSize_E ) ; } if ( blockSize_B < 0 || blockSize_B > 31 ) { throw new DecodingException ( "blockSize_B out of range: " + blockSize_B ) ; } if ( blockSize_L < 0 || blockSize_L > 31 ) { throw new DecodingException ( "blockSize_L out of range: " + blockSize_L ) ; } return decode ( blockSize_C , blockSize_S , blockSize_E , blockSize_B , blockSize_L ) ;
public class ConnectionValidator { /** * Remove listener to validation list . * @ param listener listener */ public void removeListener ( Listener listener ) { } }
queue . remove ( listener ) ; if ( queue . isEmpty ( ) ) { synchronized ( queue ) { if ( currentScheduledFrequency . get ( ) > 0 && queue . isEmpty ( ) ) { currentScheduledFrequency . set ( - 1 ) ; } } }
public class ApplicationLauncherApp { /** * Called after a new process is added to the process list * @ param e */ @ Override public void intervalAdded ( ListDataEvent e ) { } }
// retrieve the most recently added process and display it DefaultListModel listModel = ( DefaultListModel ) e . getSource ( ) ; ActiveProcess process = ( ActiveProcess ) listModel . get ( listModel . getSize ( ) - 1 ) ; addProcessTab ( process , outputPanel ) ;
public class StringFunctionLoader { /** * Tokenizes the string into siblings . * @ param inString string to be parsed , in the form arg , arg , . . where arg can be fn ( arg , arg , . . ) * @ return vector of siblings */ static List < String > getCommaTokens ( String inString ) { } }
String input = inString . trim ( ) ; List < String > tokens = new ArrayList < String > ( ) ; String token ; while ( 0 < input . length ( ) ) { if ( COMMA == input . charAt ( 0 ) ) { input = input . substring ( 1 ) ; } token = getNextCommaToken ( input ) ; input = input . substring ( token . length ( ) ) ; tokens . add ( token . trim ( ) ) ; } return tokens ;
public class DefaultXPathBinder { /** * Evaluate the XPath as a list of the given type . * @ param componentType * Possible values : primitive types ( e . g . Short . Type ) , Projection interfaces , any * class with a String constructor or a String factory method , and org . w3c . Node * @ return List of return type that reflects the evaluation result . */ @ SuppressWarnings ( "unchecked" ) @ Override public < T > CloseableList < T > asListOf ( final Class < T > componentType ) { } }
Class < ? > callerClass = ReflectionHelper . getDirectCallerClass ( ) ; return ( CloseableList < T > ) bindMultiValues ( componentType , callerClass , CollectionType . LIST ) ;
public class JDK14Logger { /** * This is legacy way of enable logging in JDBC ( through TRACING parameter ) * Only effective when java . util . logging . config . file is not specified */ @ Deprecated public static synchronized void honorTracingParameter ( Level level ) { } }
if ( ! isLegacyLoggerInit && System . getProperty ( "java.util.logging.config.file" ) == null && System . getProperty ( "java.util.logging.config.class" ) == null ) { legacyLoggerInit ( level ) ; isLegacyLoggerInit = true ; }
public class XCARespondingGatewayAuditor { /** * Audits an ITI - 43 Retrieve Document Set event for XCA Responding Gateway actors . * @ param eventOutcome The event outcome indicator * @ param repositoryEndpointUri The Web service endpoint URI for the document repository * @ param repositoryUniqueIds The XDS . b RepositoryUniqueId value for the repository * @ param documentUniqueIds The list of Document Entry UniqueId ( s ) for the document ( s ) retrieved * @ param homeCommunityIds The list of home community ids used in the transaction * @ param purposesOfUse purpose of use codes ( may be taken from XUA token ) * @ param userRoles roles of the human user ( may be taken from XUA token ) */ public void auditRetrieveDocumentSetEvent ( RFC3881EventOutcomeCodes eventOutcome , String repositoryEndpointUri , String userName , String [ ] documentUniqueIds , String [ ] repositoryUniqueIds , String homeCommunityIds [ ] , List < CodedValueType > purposesOfUse , List < CodedValueType > userRoles ) { } }
if ( ! isAuditorEnabled ( ) ) { return ; } XDSConsumerAuditor . getAuditor ( ) . auditRetrieveDocumentSetEvent ( eventOutcome , repositoryEndpointUri , userName , documentUniqueIds , repositoryUniqueIds , homeCommunityIds , null , purposesOfUse , userRoles ) ;
public class AbsoluteGraphQLError { /** * Creating absolute paths follows the following logic : * Relative path is null - > Absolute path null * Relative path is empty - > Absolute paths is path up to the field . * Relative path is not empty - > Absolute paths [ base Path , relative Path ] * @ return List of paths from the root . */ private List < Object > createAbsolutePath ( ExecutionPath executionPath , GraphQLError relativeError ) { } }
return Optional . ofNullable ( relativeError . getPath ( ) ) . map ( originalPath -> { List < Object > path = new ArrayList < > ( ) ; path . addAll ( executionPath . toList ( ) ) ; path . addAll ( relativeError . getPath ( ) ) ; return path ; } ) . map ( Collections :: unmodifiableList ) . orElse ( null ) ;
public class SimpleDBUtils { /** * Decodes date value from the string representation created using encodeDate ( . . ) function . * @ param value * string representation of the date value * @ return original date value */ public static Date decodeDate ( String value ) throws ParseException { } }
String javaValue = value . substring ( 0 , value . length ( ) - 3 ) + value . substring ( value . length ( ) - 2 ) ; SimpleDateFormat dateFormatter = new SimpleDateFormat ( dateFormat ) ; return dateFormatter . parse ( javaValue ) ;
public class Assert { /** * Assert that an object is not { @ code null } . * < pre class = " code " > * Assert . notNull ( clazz , " The class must not be null " ) ; * < / pre > * @ param object the object to check * @ param message the exception message to use if the assertion fails * @ throws IllegalArgumentException if the object is { @ code null } */ public static void notNull ( @ Nullable final Object object , final String message ) { } }
if ( object == null ) { throw new IllegalArgumentException ( message ) ; }
public class ClassInfo { /** * " Visit " a method . * @ param access field access modifiers , etc . * @ param name field name * @ param description field description * @ param signature field signature * @ param exceptions list of exception names the method throws * @ return null . */ @ Override public MethodVisitor visitMethod ( int access , String name , String description , String signature , String [ ] exceptions ) { } }
methods . add ( new MethodInfo ( access , name , description , signature , exceptions ) ) ; return null ;
public class ConnectionFactoryService { /** * { @ inheritDoc } */ @ Override protected void checkAccess ( ) throws ResourceException { } }
BootstrapContextImpl bootstrapContext = bootstrapContextRef . getServiceWithException ( ) ; ResourceAdapterMetaData metadata = bootstrapContext . getResourceAdapterMetaData ( ) ; if ( metadata != null && metadata . isEmbedded ( ) ) { // metadata is null for SIB / MQ ComponentMetaData cData = ComponentMetaDataAccessorImpl . getComponentMetaDataAccessor ( ) . getComponentMetaData ( ) ; String currentApp = null ; if ( cData != null ) currentApp = cData . getJ2EEName ( ) . getApplication ( ) ; String adapterName = bootstrapContext . getResourceAdapterName ( ) ; String embeddedApp = metadata . getJ2EEName ( ) . getApplication ( ) ; Utils . checkAccessibility ( jndiName , adapterName , embeddedApp , currentApp , false ) ; }
public class LIBORMarketModelStandard { /** * Return the complete vector of the drift for the time index timeIndex , given that current state is realizationAtTimeIndex . * Note : The random variable returned is a defensive copy and may be modified . * The drift will be zero for rates being already fixed . * @ see net . finmath . montecarlo . interestrate . models . LIBORMarketModelStandard # getNumeraire ( double ) The calculation of the drift is consistent with the calculation of the numeraire in < code > getNumeraire < / code > . * @ param timeIndex Time index < i > i < / i > for which the drift should be returned < i > & mu ; ( t < sub > i < / sub > ) < / i > . * @ param realizationAtTimeIndex Time current forward rate vector at time index < i > i < / i > which should be used in the calculation . * @ return The drift vector & mu ; ( t < sub > i < / sub > ) as < code > RandomVariableFromDoubleArray [ ] < / code > */ @ Override public RandomVariable [ ] getDrift ( int timeIndex , RandomVariable [ ] realizationAtTimeIndex , RandomVariable [ ] realizationPredictor ) { } }
double time = getTime ( timeIndex ) ; int firstLiborIndex = this . getLiborPeriodIndex ( time ) + 1 ; if ( firstLiborIndex < 0 ) { firstLiborIndex = - firstLiborIndex - 1 + 1 ; } // Allocate drift vector and initialize to zero ( will be used to sum up drift components ) RandomVariable [ ] drift = new RandomVariable [ getNumberOfComponents ( ) ] ; RandomVariable [ ] [ ] covarianceFactorSums = new RandomVariable [ getNumberOfComponents ( ) ] [ getNumberOfFactors ( ) ] ; for ( int componentIndex = firstLiborIndex ; componentIndex < getNumberOfComponents ( ) ; componentIndex ++ ) { drift [ componentIndex ] = new RandomVariableFromDoubleArray ( 0.0 ) ; } // Calculate drift for the component componentIndex ( starting at firstLiborIndex , others are zero ) for ( int componentIndex = firstLiborIndex ; componentIndex < getNumberOfComponents ( ) ; componentIndex ++ ) { double periodLength = liborPeriodDiscretization . getTimeStep ( componentIndex ) ; RandomVariable libor = realizationAtTimeIndex [ componentIndex ] ; RandomVariable oneStepMeasureTransform = libor . discount ( libor , periodLength ) . mult ( periodLength ) ; // oneStepMeasureTransform = oneStepMeasureTransform . mult ( libor ) ; RandomVariable [ ] factorLoading = getFactorLoading ( timeIndex , componentIndex , realizationAtTimeIndex ) ; RandomVariable [ ] covarianceFactors = new RandomVariable [ getNumberOfFactors ( ) ] ; for ( int factorIndex = 0 ; factorIndex < getNumberOfFactors ( ) ; factorIndex ++ ) { covarianceFactors [ factorIndex ] = factorLoading [ factorIndex ] . mult ( oneStepMeasureTransform ) ; covarianceFactorSums [ componentIndex ] [ factorIndex ] = covarianceFactors [ factorIndex ] ; if ( componentIndex > firstLiborIndex ) { covarianceFactorSums [ componentIndex ] [ factorIndex ] = covarianceFactorSums [ componentIndex ] [ factorIndex ] . add ( covarianceFactorSums [ componentIndex - 1 ] [ factorIndex ] ) ; } } for ( int factorIndex = 0 ; factorIndex < getNumberOfFactors ( ) ; factorIndex ++ ) { drift [ componentIndex ] = drift [ componentIndex ] . addProduct ( covarianceFactorSums [ componentIndex ] [ factorIndex ] , factorLoading [ factorIndex ] ) ; } } // Above is the drift for the spot measure : a simple conversion makes it the drift of the terminal measure . if ( measure == Measure . TERMINAL ) { for ( int componentIndex = firstLiborIndex ; componentIndex < getNumberOfComponents ( ) ; componentIndex ++ ) { drift [ componentIndex ] = drift [ componentIndex ] . sub ( drift [ getNumberOfComponents ( ) - 1 ] ) ; } } // Drift adjustment for log - coordinate in each component for ( int componentIndex = firstLiborIndex ; componentIndex < getNumberOfComponents ( ) ; componentIndex ++ ) { RandomVariable variance = covarianceModel . getCovariance ( timeIndex , componentIndex , componentIndex , realizationAtTimeIndex ) ; drift [ componentIndex ] = drift [ componentIndex ] . addProduct ( variance , - 0.5 ) ; } return drift ;
public class AbsoluteGraphQLError { /** * Creating absolute locations follows the following logic : * Relative locations is null - > Absolute locations null * Relative locations is empty - > Absolute locations base locations of the field . * Relative locations is not empty - > Absolute locations [ base line + relative line location ] * @ param relativeError relative error * @ param fields fields on the current field . * @ return List of locations from the root . */ private List < SourceLocation > createAbsoluteLocations ( GraphQLError relativeError , MergedField fields ) { } }
Optional < SourceLocation > baseLocation = Optional . ofNullable ( fields . getSingleField ( ) . getSourceLocation ( ) ) ; // if ( ! fields . isEmpty ( ) ) { // baseLocation = Optional . ofNullable ( fields . get ( 0 ) . getSourceLocation ( ) ) ; // } else { // baseLocation = Optional . empty ( ) ; // relative error empty path should yield an absolute error with the base path if ( relativeError . getLocations ( ) != null && relativeError . getLocations ( ) . isEmpty ( ) ) { return baseLocation . map ( Collections :: singletonList ) . orElse ( null ) ; } return Optional . ofNullable ( relativeError . getLocations ( ) ) . map ( locations -> locations . stream ( ) . map ( l -> baseLocation . map ( base -> new SourceLocation ( base . getLine ( ) + l . getLine ( ) , base . getColumn ( ) + l . getColumn ( ) ) ) . orElse ( null ) ) . collect ( Collectors . toList ( ) ) ) . map ( Collections :: unmodifiableList ) . orElse ( null ) ;
public class JenkinsServer { /** * Create a view on the server using the provided xml and in the provided * folder . * @ param folder the folder . * @ param viewName the view name . * @ param viewXml the view xml . * @ param crumbFlag < code > true < / code > to add < b > crumbIssuer < / b > * < code > false < / code > otherwise . * @ throws IOException in case of an error . */ public JenkinsServer createView ( FolderJob folder , String viewName , String viewXml , Boolean crumbFlag ) throws IOException { } }
client . post_xml ( UrlUtils . toBaseUrl ( folder ) + "createView?name=" + EncodingUtils . formParameter ( viewName ) , viewXml , crumbFlag ) ; return this ;
public class DataLabelingServiceClient { /** * Starts a labeling task for image . The type of image labeling task is configured by feature in * the request . * < p > Sample code : * < pre > < code > * try ( DataLabelingServiceClient dataLabelingServiceClient = DataLabelingServiceClient . create ( ) ) { * String formattedParent = DataLabelingServiceClient . formatDatasetName ( " [ PROJECT ] " , " [ DATASET ] " ) ; * HumanAnnotationConfig basicConfig = HumanAnnotationConfig . newBuilder ( ) . build ( ) ; * LabelImageRequest . Feature feature = LabelImageRequest . Feature . FEATURE _ UNSPECIFIED ; * AnnotatedDataset response = dataLabelingServiceClient . labelImageAsync ( formattedParent , basicConfig , feature ) . get ( ) ; * < / code > < / pre > * @ param parent Required . Name of the dataset to request labeling task , format : * projects / { project _ id } / datasets / { dataset _ id } * @ param basicConfig Required . Basic human annotation config . * @ param feature Required . The type of image labeling task . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < AnnotatedDataset , LabelOperationMetadata > labelImageAsync ( String parent , HumanAnnotationConfig basicConfig , LabelImageRequest . Feature feature ) { } }
DATASET_PATH_TEMPLATE . validate ( parent , "labelImage" ) ; LabelImageRequest request = LabelImageRequest . newBuilder ( ) . setParent ( parent ) . setBasicConfig ( basicConfig ) . setFeature ( feature ) . build ( ) ; return labelImageAsync ( request ) ;
public class StringReducer { /** * Get the output schema , given the input schema */ @ Override public Schema transform ( Schema schema ) { } }
int nCols = schema . numColumns ( ) ; List < ColumnMetaData > meta = schema . getColumnMetaData ( ) ; List < ColumnMetaData > newMeta = new ArrayList < > ( nCols ) ; newMeta . addAll ( meta ) ; newMeta . add ( new StringMetaData ( outputColumnName ) ) ; return schema . newSchema ( newMeta ) ;
public class SinkExecutor { /** * Add TimerTask to invoke flush ( ) in IMetricsSink */ private void flushSinkAtInterval ( ) { } }
Object flushIntervalObj = sinkConfig . get ( MetricsSinksConfig . CONFIG_KEY_FLUSH_FREQUENCY_MS ) ; // If the config is not set , we consider the flush ( ) would never be invoked if ( flushIntervalObj != null ) { final Duration flushInterval = TypeUtils . getDuration ( flushIntervalObj , ChronoUnit . MILLIS ) ; Runnable flushSink = new Runnable ( ) { @ Override public void run ( ) { metricsSink . flush ( ) ; // Plan itself in future slaveLooper . registerTimerEvent ( flushInterval , this ) ; } } ; // Plan the runnable explicitly at the first time slaveLooper . registerTimerEvent ( flushInterval , flushSink ) ; }
public class RequestProbeService { /** * Iterate through all the probe extensions and process the counter methods * of interested probe extensions * @ param event * : Event for which the probe extensions to be processed . */ public static void processAllCounterProbeExtensions ( Event event ) { } }
List < ProbeExtension > probeExtnList = RequestProbeService . getProbeExtensions ( ) ; for ( int i = 0 ; i < probeExtnList . size ( ) ; i ++ ) { ProbeExtension probeExtension = probeExtnList . get ( i ) ; try { // Check if this probe extension is interested in // counter events if ( probeExtension . invokeForCounter ( ) ) { probeExtension . processCounter ( event ) ; } } catch ( Exception e ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "----------------Probe extension invocation failure---------------" ) ; Tr . debug ( tc , probeExtension . getClass ( ) . getName ( ) + ".processCounterEvent failed because of the following reason:" ) ; Tr . debug ( tc , e . getMessage ( ) ) ; } FFDCFilter . processException ( e , RequestProbeService . class . getName ( ) + ".processAllCounterProbeExtensions" , "215" ) ; } }
public class JmxMetricPoller { /** * There are issues loading some JMX attributes on some systems . This protects us from a * single bad attribute stopping us reading any metrics ( or just a random sampling ) out of * the system . */ private static List < Attribute > safelyLoadAttributes ( MBeanServerConnection server , ObjectName objectName , List < String > matchingNames ) { } }
try { // first try batch loading all attributes as this is faster return batchLoadAttributes ( server , objectName , matchingNames ) ; } catch ( Exception e ) { // JBOSS ticket : https : / / issues . jboss . org / browse / AS7-4404 LOGGER . info ( "Error batch loading attributes for {} : {}" , objectName , e . getMessage ( ) ) ; // some containers ( jboss I am looking at you ) fail the entire getAttributes request // if one is broken we can get the working attributes if we ask for them individually return individuallyLoadAttributes ( server , objectName , matchingNames ) ; }
public class ResponseAttachmentInputStreamSupport { /** * Gets a handler for requests to close an input stream . * @ return the handler */ ManagementRequestHandler < Void , Void > getCloseHandler ( ) { } }
return new AbstractAttachmentHandler ( ) { @ Override void handleRequest ( TimedStreamEntry entry , FlushableDataOutput output ) throws IOException { // no - op as AbstractAttachmentHandler will close the entry after calling this } @ Override void handleMissingStream ( int requestId , int index , FlushableDataOutput output ) throws IOException { // no - op as there ' s nothing to do } } ;
public class Daemon { /** * 0program is running or service is OK * 1program is dead and / var / run pid file exists * 2program is dead and / var / lock lock file exists * 3program is not running * 4program or service status is unknown * 5-99reserved for future LSB use * 100-149reserved for distribution use * 150-199reserved for application use * 200-254reserved */ public DaemonStatus checkStatus ( ) { } }
if ( this . pidfile == null ) { throw new IllegalStateException ( "No pidfile specified, cannot check status!" ) ; } if ( ! pidfile . exists ( ) ) { return DaemonStatus . STATUS_NOT_RUNNING ; } final int pid ; try { byte [ ] content = Files . readAllBytes ( pidfile . toPath ( ) ) ; String s = new String ( content , StandardCharsets . UTF_8 ) . trim ( ) ; pid = Integer . parseInt ( s ) ; } catch ( Exception e ) { System . err . println ( e . getMessage ( ) ) ; return DaemonStatus . STATUS_UNKNOWN ; } int rs = posix . kill ( pid , 0 ) ; if ( rs == 0 ) { return DaemonStatus . STATUS_RUNNING ; } else { return DaemonStatus . STATUS_DEAD ; }
public class TSDB { /** * Verifies that the data and UID tables exist in HBase and optionally the * tree and meta data tables if the user has enabled meta tracking or tree * building * @ return An ArrayList of objects to wait for * @ throws TableNotFoundException * @ since 2.0 */ public Deferred < ArrayList < Object > > checkNecessaryTablesExist ( ) { } }
final ArrayList < Deferred < Object > > checks = new ArrayList < Deferred < Object > > ( 2 ) ; checks . add ( client . ensureTableExists ( config . getString ( "tsd.storage.hbase.data_table" ) ) ) ; checks . add ( client . ensureTableExists ( config . getString ( "tsd.storage.hbase.uid_table" ) ) ) ; if ( config . enable_tree_processing ( ) ) { checks . add ( client . ensureTableExists ( config . getString ( "tsd.storage.hbase.tree_table" ) ) ) ; } if ( config . enable_realtime_ts ( ) || config . enable_realtime_uid ( ) || config . enable_tsuid_incrementing ( ) ) { checks . add ( client . ensureTableExists ( config . getString ( "tsd.storage.hbase.meta_table" ) ) ) ; } return Deferred . group ( checks ) ;
public class XMLMapHandler { /** * Write the passed map to the passed output stream using the predefined XML * layout . * @ param aMap * The map to be written . May not be < code > null < / code > . * @ param aOS * The output stream to write to . The stream is closed independent of * success or failure . May not be < code > null < / code > . * @ return { @ link ESuccess # SUCCESS } when everything went well , * { @ link ESuccess # FAILURE } otherwise . */ @ Nonnull public static ESuccess writeMap ( @ Nonnull final Map < String , String > aMap , @ Nonnull @ WillClose final OutputStream aOS ) { } }
ValueEnforcer . notNull ( aMap , "Map" ) ; ValueEnforcer . notNull ( aOS , "OutputStream" ) ; try { final IMicroDocument aDoc = createMapDocument ( aMap ) ; return MicroWriter . writeToStream ( aDoc , aOS , XMLWriterSettings . DEFAULT_XML_SETTINGS ) ; } finally { StreamHelper . close ( aOS ) ; }
public class UpgradableLock { /** * Release a previously acquired upgrade lock . */ public final void unlockFromUpgrade ( L locker ) { } }
int upgradeCount = mUpgradeCount - 1 ; if ( upgradeCount < 0 ) { throw new IllegalMonitorStateException ( "Too many upgrade locks released" ) ; } if ( upgradeCount == 0 && mWriteCount > 0 ) { // Don ' t release last upgrade lock and switch write lock to // automatic upgrade mode . clearUpgradeLock ( mState ) ; return ; } mUpgradeCount = upgradeCount ; if ( upgradeCount > 0 ) { return ; } mOwner = null ; // keep looping on CAS failure if reader mucked with state while ( ! clearUpgradeLock ( mState ) ) { } Node h = mUHead ; if ( h != null && h . mWaitStatus != 0 ) { unparkUpgradeSuccessor ( h ) ; }
public class JobId { /** * Parse a job id string . * < p > This parsing method can be used when input is trusted , i . e . failing to parse it indicates * programming error and not bad input . * @ param id A string representation of the job ID . * @ return The JobId object . * @ see # parse ( String ) */ public static JobId fromString ( final String id ) { } }
try { return parse ( id ) ; } catch ( JobIdParseException e ) { throw new IllegalArgumentException ( e ) ; }
public class ImportImageRequest { /** * Information about the disk containers . * @ param diskContainers * Information about the disk containers . */ public void setDiskContainers ( java . util . Collection < ImageDiskContainer > diskContainers ) { } }
if ( diskContainers == null ) { this . diskContainers = null ; return ; } this . diskContainers = new com . amazonaws . internal . SdkInternalList < ImageDiskContainer > ( diskContainers ) ;
public class MobicentsDNSResolver { /** * / * ( non - Javadoc ) * @ see org . mobicents . javax . servlet . sip . dns . DNSResolver # locateURIs ( javax . servlet . sip . URI ) */ @ Override public List < SipURI > locateURIs ( SipURI uri ) { } }
List < SipURI > uris = new CopyOnWriteArrayList ( ) ; if ( uri instanceof SipURIImpl && createAddressFactory != null ) { SipURIImpl uriImpl = ( SipURIImpl ) uri ; Queue < Hop > hops = dnsServerLocator . locateHops ( uriImpl . getSipURI ( ) ) ; if ( hops != null ) { for ( Hop hop : hops ) { javax . sip . address . SipURI createSipURI ; try { // use null as user so this uri may be used potentially // as Route Header createSipURI = createAddressFactory . createSipURI ( null , hop . getHost ( ) ) ; createSipURI . setPort ( hop . getPort ( ) ) ; createSipURI . setTransportParam ( hop . getTransport ( ) ) ; SipURI sipURI = new SipURIImpl ( createSipURI , ModifiableRule . NotModifiable ) ; uris . add ( sipURI ) ; } catch ( ParseException ex ) { logger . debug ( "Error creating SipURI." , ex ) ; } } } } return uris ;
public class ClientConsumerCommons { /** * Answer a new ClientRequestResponseChannel from the { @ code configuration } . * @ param configuration the Configuration * @ return ClientRequestResponseChannel * @ throws Exception when the channel cannot be created */ static ClientRequestResponseChannel clientChannel ( final Configuration configuration , final ResponseChannelConsumer consumer , final Logger logger ) throws Exception { } }
return new ClientRequestResponseChannel ( configuration . addressOfHost , consumer , configuration . readBufferPoolSize , configuration . readBufferSize , logger ) ;
public class VersionInfoConfig { /** * Gets the representation of the version */ public VersionInfo toInfo ( ) { } }
return new VersionInfo ( parseDate ( date ) , display , full , branch , build , commit , source , sourceType ) ;