signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ExcelFunctions { /** * Returns the first characters in a text string */ public static String left ( EvaluationContext ctx , Object text , Object numChars ) { } }
int _numChars = Conversions . toInteger ( numChars , ctx ) ; if ( _numChars < 0 ) { throw new RuntimeException ( "Number of chars can't be negative" ) ; } return StringUtils . left ( Conversions . toString ( text , ctx ) , _numChars ) ;
public class Movie { /** * Gets the width of this movie ' s display area * @ return int */ public int getWidth ( ) { } }
if ( getAttributes ( ) . isDefined ( Attribute . WIDTH ) ) { final int wide = ( int ) ( getAttributes ( ) . getWidth ( ) + 0.5 ) ; if ( wide > 0 ) { return wide ; } } if ( null != m_video ) { return m_video . getVideoWidth ( ) ; } return 0 ;
public class BitcoinBlockReader { /** * Parse an AUXPowBranch * @ param rawByteBuffer ByteBuffer from which the AuxPOWBranch should be parsed * @ return AuxPOWBranch */ public BitcoinAuxPOWBranch parseAuxPOWBranch ( ByteBuffer rawByteBuffer ) { } }
byte [ ] noOfLinksVarInt = BitcoinUtil . convertVarIntByteBufferToByteArray ( rawByteBuffer ) ; long currentNoOfLinks = BitcoinUtil . getVarInt ( noOfLinksVarInt ) ; ArrayList < byte [ ] > links = new ArrayList ( ( int ) currentNoOfLinks ) ; for ( int i = 0 ; i < currentNoOfLinks ; i ++ ) { byte [ ] currentLink = new byte [ 32 ] ; rawByteBuffer . get ( currentLink , 0 , 32 ) ; links . add ( currentLink ) ; } byte [ ] branchSideBitmask = new byte [ 4 ] ; rawByteBuffer . get ( branchSideBitmask , 0 , 4 ) ; return new BitcoinAuxPOWBranch ( noOfLinksVarInt , links , branchSideBitmask ) ;
public class Math { /** * Returns the sample correlation matrix . * @ param mu the known mean of data . */ public static double [ ] [ ] cor ( double [ ] [ ] data , double [ ] mu ) { } }
double [ ] [ ] sigma = cov ( data , mu ) ; int n = data [ 0 ] . length ; double [ ] sd = new double [ n ] ; for ( int i = 0 ; i < n ; i ++ ) { sd [ i ] = sqrt ( sigma [ i ] [ i ] ) ; } for ( int i = 0 ; i < n ; i ++ ) { for ( int j = 0 ; j <= i ; j ++ ) { sigma [ i ] [ j ] /= sd [ i ] * sd [ j ] ; sigma [ j ] [ i ] = sigma [ i ] [ j ] ; } } return sigma ;
public class sdcard { /** * Check if file exists on SDCard or not * @ param filePath - its the path of the file after SDCardDirectory ( no need for * getExternalStorageDirectory ( ) ) * @ return boolean - if file exist on SDCard or not */ public static boolean checkIfFileExists ( String filePath ) { } }
File file = new File ( filePath ) ; // getSDCardPath ( ) , filePath ) ; return ( file . exists ( ) ? true : false ) ;
public class ServerJFapCommunicator { /** * Sets the CommsConnection associated with this Conversation * @ param cc */ @ Override protected void setCommsConnection ( CommsConnection cc ) { } }
if ( tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "setCommsConnection" ) ; // Retrieve Client Conversation State if necessary validateConversationState ( ) ; sConState . setCommsConnection ( cc ) ; if ( tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "setCommsConnection" ) ;
public class AbstractDomainQuery { /** * Create a match from a DomainObjectMatch specified in the context of another query * @ param domainObjectMatch a match specified in the context of another query * @ return a DomainObjectMatch */ @ SuppressWarnings ( "unchecked" ) public < T > DomainObjectMatch < T > createMatchFrom ( DomainObjectMatch < T > domainObjectMatch ) { } }
DomainObjectMatch < T > ret ; FromPreviousQueryExpression pqe ; DomainObjectMatch < ? > match ; DomainObjectMatch < ? > delegate = APIAccess . getDelegate ( domainObjectMatch ) ; if ( delegate != null ) { // generic model DomainObjectMatch < ? > newDelegate = APIAccess . createDomainObjectMatch ( delegate , this . queryExecutor . getDomainObjectMatches ( ) . size ( ) , this . queryExecutor . getMappingInfo ( ) ) ; this . queryExecutor . getDomainObjectMatches ( ) . add ( newDelegate ) ; pqe = new FromPreviousQueryExpression ( newDelegate , delegate ) ; ret = ( DomainObjectMatch < T > ) APIAccess . createDomainObjectMatch ( DomainObject . class , newDelegate ) ; match = newDelegate ; } else { ret = APIAccess . createDomainObjectMatch ( domainObjectMatch , this . queryExecutor . getDomainObjectMatches ( ) . size ( ) , this . queryExecutor . getMappingInfo ( ) ) ; this . queryExecutor . getDomainObjectMatches ( ) . add ( ret ) ; pqe = new FromPreviousQueryExpression ( ret , domainObjectMatch ) ; match = ret ; } this . queryExecutor . addAstObject ( pqe ) ; QueryRecorder . recordAssignment ( this , "createMatchFrom" , match , QueryRecorder . reference ( domainObjectMatch ) ) ; return ret ;
public class ClosureSignatureHint { /** * A helper method which will extract the n - th generic type from a class node . * @ param type the class node from which to pick a generic type * @ param gtIndex the index of the generic type to extract * @ return the n - th generic type , or { @ link org . codehaus . groovy . ast . ClassHelper # OBJECT _ TYPE } if it doesn ' t exist . */ public static ClassNode pickGenericType ( ClassNode type , int gtIndex ) { } }
final GenericsType [ ] genericsTypes = type . getGenericsTypes ( ) ; if ( genericsTypes == null || genericsTypes . length < gtIndex ) { return ClassHelper . OBJECT_TYPE ; } return genericsTypes [ gtIndex ] . getType ( ) ;
public class GetInstanceRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetInstanceRequest getInstanceRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getInstanceRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getInstanceRequest . getInstanceName ( ) , INSTANCENAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ItemHandlerImpl { /** * Updates the existing node with the properties ( and optionally children ) as described by { @ code jsonNode } . * @ param node the node to be updated * @ param jsonNode the JSON - encoded representation of the node or nodes to be updated . * @ param changes the versionable changes ; may not be null * @ return the Node that was updated ; never null * @ throws javax . jcr . RepositoryException if any other error occurs */ protected Node updateNode ( Node node , JsonNode jsonNode , VersionableChanges changes ) throws RepositoryException { } }
// If the JSON object has a properties holder , then this is likely a subgraph . . . JsonNode properties = jsonNode ; if ( jsonNode . has ( PROPERTIES_HOLDER ) ) { properties = jsonNode . get ( PROPERTIES_HOLDER ) ; } changes . checkout ( node ) ; // Change the primary type first . . . if ( properties . has ( PRIMARY_TYPE_PROPERTY ) ) { String primaryType = properties . get ( PRIMARY_TYPE_PROPERTY ) . asText ( ) ; primaryType = primaryType . trim ( ) ; if ( primaryType . length ( ) != 0 && ! node . getPrimaryNodeType ( ) . getName ( ) . equals ( primaryType ) ) { node . setPrimaryType ( primaryType ) ; } } Set < String > mixinsToRemove = new HashSet < String > ( ) ; if ( properties . has ( MIXIN_TYPES_PROPERTY ) ) { // Next add new mixins , but don ' t remove old ones yet , because that needs to happen only after all the children // and properties have been processed mixinsToRemove = updateMixins ( node , properties . get ( MIXIN_TYPES_PROPERTY ) ) ; } // Now set all the other properties . . . for ( Iterator < String > iter = properties . fieldNames ( ) ; iter . hasNext ( ) ; ) { String key = iter . next ( ) ; if ( PRIMARY_TYPE_PROPERTY . equals ( key ) || MIXIN_TYPES_PROPERTY . equals ( key ) || CHILD_NODE_HOLDER . equals ( key ) ) { continue ; } setPropertyOnNode ( node , key , properties . get ( key ) ) ; } // If the JSON object has a children holder , then we need to update the list of children and child nodes . . . if ( hasChildren ( jsonNode ) ) { updateChildren ( node , jsonNode , changes ) ; } // after all the children and properties have been processed , remove mixins because that will trigger validation for ( String mixinToRemove : mixinsToRemove ) { node . removeMixin ( mixinToRemove ) ; } return node ;
public class URLRewritingPolicy { /** * Finds all matching instances of the regular expression and replaces them with * the replacement value . * @ param headerValue * @ param fromRegex * @ param toReplacement */ private String doHeaderReplaceAll ( String headerValue , String fromRegex , String toReplacement ) { } }
return headerValue . replaceAll ( fromRegex , toReplacement ) ;
public class MemcachedClient { /** * Get the values for multiple keys from the cache . * @ param keyIter Iterator that produces the keys * @ return a map of the values ( for each value that exists ) * @ throws OperationTimeoutException if the global operation timeout is * exceeded * @ throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ @ Override public Map < String , Object > getBulk ( Iterator < String > keyIter ) { } }
return getBulk ( keyIter , transcoder ) ;
public class PropertiesTableModel { /** * { @ inheritDoc } */ @ Override public Object getValueAt ( final int rowIndex , final int columnIndex ) { } }
final String key = ( String ) data . get ( rowIndex ) ; final PropertiesColumns column = PropertiesColumns . values ( ) [ columnIndex ] ; switch ( column ) { case KEY : return key ; case VALUE : return data . get ( key ) ; } return null ;
public class MutableBigInteger { /** * A primitive used for division by long . * Specialized version of the method divadd . * dh is a high part of the divisor , dl is a low part */ private int divaddLong ( int dh , int dl , int [ ] result , int offset ) { } }
long carry = 0 ; long sum = ( dl & LONG_MASK ) + ( result [ 1 + offset ] & LONG_MASK ) ; result [ 1 + offset ] = ( int ) sum ; sum = ( dh & LONG_MASK ) + ( result [ offset ] & LONG_MASK ) + carry ; result [ offset ] = ( int ) sum ; carry = sum >>> 32 ; return ( int ) carry ;
public class PutVoiceConnectorOriginationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( PutVoiceConnectorOriginationRequest putVoiceConnectorOriginationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( putVoiceConnectorOriginationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( putVoiceConnectorOriginationRequest . getVoiceConnectorId ( ) , VOICECONNECTORID_BINDING ) ; protocolMarshaller . marshall ( putVoiceConnectorOriginationRequest . getOrigination ( ) , ORIGINATION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class vrid { /** * Use this API to fetch all the vrid resources that are configured on netscaler . */ public static vrid [ ] get ( nitro_service service ) throws Exception { } }
vrid obj = new vrid ( ) ; vrid [ ] response = ( vrid [ ] ) obj . get_resources ( service ) ; return response ;
public class DisconfItemCoreProcessorImpl { /** * 更新一个配置 */ private void updateOneConfItem ( String keyName , DisconfCenterItem disconfCenterItem ) throws Exception { } }
if ( disconfCenterItem == null ) { throw new Exception ( "cannot find disconfCenterItem " + keyName ) ; } String value = null ; // 开启disconf才需要远程下载 , 否则就用默认值 if ( DisClientConfig . getInstance ( ) . ENABLE_DISCONF ) { // 下载配置 try { String url = disconfCenterItem . getRemoteServerUrl ( ) ; value = fetcherMgr . getValueFromServer ( url ) ; if ( value != null ) { LOGGER . debug ( "value: " + value ) ; } } catch ( Exception e ) { LOGGER . error ( "cannot use remote configuration: " + keyName , e ) ; LOGGER . info ( "using local variable: " + keyName ) ; } LOGGER . debug ( "download ok." ) ; } // 注入到仓库中 disconfStoreProcessor . inject2Store ( keyName , new DisconfValue ( value , null ) ) ; LOGGER . debug ( "inject ok." ) ; // Watch if ( DisClientConfig . getInstance ( ) . ENABLE_DISCONF ) { if ( watchMgr != null ) { DisConfCommonModel disConfCommonModel = disconfStoreProcessor . getCommonModel ( keyName ) ; watchMgr . watchPath ( this , disConfCommonModel , keyName , DisConfigTypeEnum . ITEM , value ) ; LOGGER . debug ( "watch ok." ) ; } else { LOGGER . warn ( "cannot monitor {} because watch mgr is null" , keyName ) ; } }
public class TunnelConnection { /** * Closes the underlying ssh session causing all tunnels to be closed . */ public void close ( ) throws IOException { } }
if ( session != null && session . isConnected ( ) ) { session . disconnect ( ) ; } session = null ; // unnecessary , but seems right to undo what we did for ( Tunnel tunnel : tunnels ) { tunnel . setAssignedLocalPort ( 0 ) ; }
public class MoreMeters { /** * Returns a newly - created immutable { @ link Map } which contains all values of { @ link Meter } s in the * specified { @ link MeterRegistry } . The format of the key string is : * < ul > * < li > { @ code < name > # < statistic > { tagName = tagValue , . . . } } < / li > * < li > e . g . { @ code " armeria . server . activeRequests # value { method = greet } " } < / li > * < li > e . g . { @ code " someSubsystem . someValue # sumOfSquares " } ( no tags ) < / li > * < / ul > * Note : It is not recommended to use this method for the purposes other than testing . */ public static Map < String , Double > measureAll ( MeterRegistry registry ) { } }
requireNonNull ( registry , "registry" ) ; final ImmutableMap . Builder < String , Double > builder = ImmutableMap . builder ( ) ; registry . forEachMeter ( meter -> Streams . stream ( meter . measure ( ) ) . forEach ( measurement -> { final String fullName = measurementName ( meter . getId ( ) , measurement ) ; final double value = measurement . getValue ( ) ; builder . put ( fullName , value ) ; } ) ) ; return builder . build ( ) ;
public class AbstractSynchronizationFuture { /** * Start the internal synchronization task . */ protected void init ( ) { } }
// create a synchronisation task which makes sure that the change requested by // the internal future has at one time been synchronized to the remote synchronisationFuture = GlobalCachedExecutorService . submit ( ( ) -> { dataProvider . addDataObserver ( notifyChangeObserver ) ; try { dataProvider . waitForData ( ) ; T result = internalFuture . get ( ) ; waitForSynchronization ( result ) ; } catch ( CouldNotPerformException ex ) { ExceptionPrinter . printHistory ( "Could not sync with internal future!" , ex , logger ) ; } finally { dataProvider . removeDataObserver ( notifyChangeObserver ) ; } return null ; } ) ;
public class ShardingProperties { /** * Get property value . * @ param shardingPropertiesConstant sharding properties constant * @ param < T > class type of return value * @ return property value */ @ SuppressWarnings ( "unchecked" ) public < T > T getValue ( final ShardingPropertiesConstant shardingPropertiesConstant ) { } }
if ( cachedProperties . containsKey ( shardingPropertiesConstant ) ) { return ( T ) cachedProperties . get ( shardingPropertiesConstant ) ; } String value = props . getProperty ( shardingPropertiesConstant . getKey ( ) ) ; if ( Strings . isNullOrEmpty ( value ) ) { Object obj = props . get ( shardingPropertiesConstant . getKey ( ) ) ; if ( null == obj ) { value = shardingPropertiesConstant . getDefaultValue ( ) ; } else { value = obj . toString ( ) ; } } Object result ; if ( boolean . class == shardingPropertiesConstant . getType ( ) ) { result = Boolean . valueOf ( value ) ; } else if ( int . class == shardingPropertiesConstant . getType ( ) ) { result = Integer . valueOf ( value ) ; } else if ( long . class == shardingPropertiesConstant . getType ( ) ) { result = Long . valueOf ( value ) ; } else { result = value ; } cachedProperties . put ( shardingPropertiesConstant , result ) ; return ( T ) result ;
public class Ifc4FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcSpatialZoneTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class CronMapper { /** * Creates a CronMapper that maps a cron4j expression to a quartz expression . * @ return a CronMapper for mapping from cron4j to quartz */ public static CronMapper fromCron4jToQuartz ( ) { } }
return new CronMapper ( CronDefinitionBuilder . instanceDefinitionFor ( CronType . CRON4J ) , CronDefinitionBuilder . instanceDefinitionFor ( CronType . QUARTZ ) , setQuestionMark ( ) ) ;
public class GetReservationCoverageRequest { /** * The measurement that you want your reservation coverage reported in . * Valid values are < code > Hour < / code > , < code > Unit < / code > , and < code > Cost < / code > . You can use multiple values in a * request . * @ param metrics * The measurement that you want your reservation coverage reported in . < / p > * Valid values are < code > Hour < / code > , < code > Unit < / code > , and < code > Cost < / code > . You can use multiple values * in a request . */ public void setMetrics ( java . util . Collection < String > metrics ) { } }
if ( metrics == null ) { this . metrics = null ; return ; } this . metrics = new java . util . ArrayList < String > ( metrics ) ;
public class HttpOutboundServiceContextImpl { /** * Once we know we are reconnected to the target server , reset the TCP * buffers and start the async resend . */ protected void nowReconnectedAsync ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Reconnected async for " + this ) ; } // reset the data buffers first if ( ! resetWriteBuffers ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Resetting buffers (async) failed" ) ; } // otherwise pass the error along to the channel above us , or close // the connection if nobody is above IOException ioe = new IOException ( "Failed reconnect" ) ; if ( null != getAppWriteCallback ( ) ) { getAppWriteCallback ( ) . error ( getVC ( ) , ioe ) ; } else { // nobody above us , just close the connection getLink ( ) . getDeviceLink ( ) . close ( getVC ( ) , ioe ) ; } return ; } // now that we ' ve reconnected , we should reset the " broken " flag . First // we set it to the default and then recheck the request msg setPersistent ( true ) ; updatePersistence ( getRequestImpl ( ) ) ; // in case we previously read any partial data , clean out the response resetRead ( ) ; // attempt to write the data VirtualConnection rc = getTSC ( ) . getWriteInterface ( ) . write ( TCPWriteRequestContext . WRITE_ALL_DATA , HttpOSCWriteCallback . getRef ( ) , isForceAsync ( ) , getWriteTimeout ( ) ) ; if ( null != rc ) { // if we ' ve finished writing part of a request , let the channel // above know that it can write more , otherwise start the read // for the response if ( ! isMessageSent ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Calling callback.complete of app channel." ) ; } getAppWriteCallback ( ) . complete ( getLink ( ) . getVirtualConnection ( ) ) ; } else { if ( isReadAheadEnabled ( ) ) { // after a reconnect , there is no more read - ahead active this . bReadAheadEnabled = false ; } // force an async read for the response now . Avoid trying to // re - use any existing read buffer to skip complexity with // adjusting for partial reads before the reconnect . setupJITRead ( getHttpConfig ( ) . getIncomingHdrBufferSize ( ) ) ; getTSC ( ) . getReadInterface ( ) . read ( 1 , HttpOSCReadCallback . getRef ( ) , true , getReadTimeout ( ) ) ; } }
public class RippleEffectDrawer { /** * Performs the entire Ripple Effect drawing frame by frame animating the process * Calls the { @ link ActionButton # postInvalidate ( ) } after each { @ link # currentRadius } update * to draw the current frame animating the ripple effect drawing * @ param canvas canvas , which the Ripple Effect is drawing on */ void draw ( Canvas canvas ) { } }
updateRadius ( ) ; drawRipple ( canvas ) ; ViewInvalidator invalidator = getActionButton ( ) . getInvalidator ( ) ; if ( isDrawingInProgress ( ) ) { invalidator . requireInvalidation ( ) ; LOGGER . trace ( "Drawing Ripple Effect in progress, invalidating the Action Button" ) ; } else if ( isDrawingFinished ( ) && ! isPressed ( ) ) { invalidator . requireDelayedInvalidation ( ) ; invalidator . setInvalidationDelay ( POST_INVALIDATION_DELAY_MS ) ; LOGGER . trace ( "Completed Ripple Effect drawing, posting the last invalidate" ) ; }
public class XDSConsumerAuditor { /** * Audits an ITI - 16 Registry Query event for XDS . a Document Consumer actors . * @ param eventOutcome The event outcome indicator * @ param registryEndpointUri The endpoint of the registry in this transaction * @ param adhocQueryRequestPayload The payload of the adhoc query request element * @ param patientId The patient ID queried ( if query pertained to a patient id ) */ public void auditRegistryQueryEvent ( RFC3881EventOutcomeCodes eventOutcome , String registryEndpointUri , String consumerUserName , String adhocQueryRequestPayload , String patientId ) { } }
if ( ! isAuditorEnabled ( ) ) { return ; } auditQueryEvent ( true , new IHETransactionEventTypeCodes . RegistrySQLQuery ( ) , eventOutcome , getAuditSourceId ( ) , getAuditEnterpriseSiteId ( ) , getSystemUserId ( ) , getSystemAltUserId ( ) , getSystemUserName ( ) , getSystemNetworkId ( ) , consumerUserName , consumerUserName , true , registryEndpointUri , null , "" , adhocQueryRequestPayload , "" , patientId , null , null ) ;
public class LoadBalancerProbesInner { /** * Gets load balancer probe . * @ param resourceGroupName The name of the resource group . * @ param loadBalancerName The name of the load balancer . * @ param probeName The name of the probe . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the ProbeInner object if successful . */ public ProbeInner get ( String resourceGroupName , String loadBalancerName , String probeName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , loadBalancerName , probeName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class WTree { /** * { @ inheritDoc } */ @ Override public Set < String > getRequestValue ( final Request request ) { } }
if ( isPresent ( request ) ) { return getNewSelections ( request ) ; } else { return getValue ( ) ; }
public class ModuleTypeLoader { /** * @ param name The name * @ return The result of stripping all trailing occurrences of array brackets ( " [ ] " ) * from < code > name < / code > . Examples : * < pre > * entity . Coverage = > entity . Coverage * entity . Coverage [ ] = > entity . Coverage * entity . Coverage [ ] [ ] [ ] = > entity . Coverage * < / pre > */ static String stripArrayBrackets ( String name ) { } }
if ( name == null ) { return "" ; } int checkPos = name . length ( ) ; while ( checkPos > 2 && name . charAt ( checkPos - 2 ) == '[' && name . charAt ( checkPos - 1 ) == ']' ) { checkPos -= 2 ; } assert checkPos <= name . length ( ) ; return checkPos == name . length ( ) ? name : name . substring ( 0 , checkPos ) ;
public class MonitoringConfigurationMarshaller { /** * Marshall the given parameter object . */ public void marshall ( MonitoringConfiguration monitoringConfiguration , ProtocolMarshaller protocolMarshaller ) { } }
if ( monitoringConfiguration == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( monitoringConfiguration . getConfigurationType ( ) , CONFIGURATIONTYPE_BINDING ) ; protocolMarshaller . marshall ( monitoringConfiguration . getMetricsLevel ( ) , METRICSLEVEL_BINDING ) ; protocolMarshaller . marshall ( monitoringConfiguration . getLogLevel ( ) , LOGLEVEL_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class VocabularyHolder { /** * This method removes low - frequency words based on their frequency change between activations . * I . e . if word has appeared only once , and it ' s retained the same frequency over consequence activations , we can assume it can be removed freely */ protected synchronized void activateScavenger ( ) { } }
int initialSize = vocabulary . size ( ) ; List < VocabularyWord > words = new ArrayList < > ( vocabulary . values ( ) ) ; for ( VocabularyWord word : words ) { // scavenging could be applied only to non - special tokens that are below minWordFrequency if ( word . isSpecial ( ) || word . getCount ( ) >= minWordFrequency || word . getFrequencyShift ( ) == null ) { word . setFrequencyShift ( null ) ; continue ; } // save current word counter to byte array at specified position word . getFrequencyShift ( ) [ word . getRetentionStep ( ) ] = ( byte ) word . getCount ( ) ; /* we suppose that we ' re hunting only low - freq words that already passed few activations so , we assume word personal threshold as 20 % of minWordFrequency , but not less then 1. so , if after few scavenging cycles wordCount is still < = activation - just remove word . otherwise nullify word . frequencyShift to avoid further checks */ int activation = Math . max ( minWordFrequency / 5 , 2 ) ; logger . debug ( "Current state> Activation: [" + activation + "], retention info: " + Arrays . toString ( word . getFrequencyShift ( ) ) ) ; if ( word . getCount ( ) <= activation && word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] > 0 ) { // if final word count at latest retention point is the same as at the beginning - just remove word if ( word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] <= activation && word . getFrequencyShift ( ) [ this . retentionDelay - 1 ] == word . getFrequencyShift ( ) [ 0 ] ) { vocabulary . remove ( word . getWord ( ) ) ; } } // shift retention history to the left if ( word . getRetentionStep ( ) < retentionDelay - 1 ) { word . incrementRetentionStep ( ) ; } else { for ( int x = 1 ; x < retentionDelay ; x ++ ) { word . getFrequencyShift ( ) [ x - 1 ] = word . getFrequencyShift ( ) [ x ] ; } } } logger . info ( "Scavenger was activated. Vocab size before: [" + initialSize + "], after: [" + vocabulary . size ( ) + "]" ) ;
public class RaftSessionInvoker { /** * Submits an operation attempt . * @ param attempt The attempt to submit . */ private < T extends OperationRequest , U extends OperationResponse > void invoke ( OperationAttempt < T , U > attempt ) { } }
if ( state . getState ( ) == PrimitiveState . CLOSED ) { attempt . fail ( new PrimitiveException . ClosedSession ( "session closed" ) ) ; } else { attempts . put ( attempt . sequence , attempt ) ; attempt . send ( ) ; attempt . future . whenComplete ( ( r , e ) -> attempts . remove ( attempt . sequence ) ) ; }
public class SibRaManagedConnectionFactory { /** * Creates a managed connection containing a core SPI connection . If the * request information already contains a core SPI connection , this will be * a clone of that connection . If a new core SPI connection is required then * the credentials will be those from the container < code > Subject < / code > , * if passed , or failing that , those from the request information . The map * of connection properties comes from the request information . * @ param subject * the container provided < code > Subject < / code > if container * managed authentication has been selected , otherwise * < code > null < / code > * @ param requestInfo * the request information * @ throws ResourceAdapterInternalException * if the request parameter was < code > null < / code > or not a * < code > SibRaConnectionRequestInfo < / code > * @ throws ResourceException * if an attempt to create or clone a core SPI connection fails */ public ManagedConnection createManagedConnection ( final Subject subject , final ConnectionRequestInfo requestInfo ) throws ResourceAdapterInternalException , ResourceException { } }
if ( TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , "createManagedConnection" , new Object [ ] { SibRaUtils . subjectToString ( subject ) , requestInfo } ) ; } if ( requestInfo == null ) { // This typically indicates that the connection mangaer is trying // to obtain an XAResource during transaction recovery . This is not // supported as transaction recovery should be performed via the // SibRaXaResourceFactory . final ResourceAdapterInternalException exception = new ResourceAdapterInternalException ( NLS . getString ( "NULL_REQUEST_INFO_CWSIV0352" ) ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } throw exception ; } final ManagedConnection managedConnection ; // Check it is one of our request info objects if ( requestInfo instanceof SibRaConnectionRequestInfo ) { final SibRaConnectionRequestInfo sibRaRequestInfo = ( SibRaConnectionRequestInfo ) requestInfo ; // Decode the subject and request info using a connection info // object final SibRaConnectionInfo connectionInfo = new SibRaConnectionInfo ( this , subject , sibRaRequestInfo ) ; final SICoreConnection coreConnection ; try { // Determine whether the request has previously been allocated // a different managed connection . . . if ( sibRaRequestInfo . getCoreConnection ( ) == null ) { // . . . if not , create a new core connection coreConnection = connectionInfo . createConnection ( ) ; } else { // . . . otherwise , clone the connection from the request coreConnection = sibRaRequestInfo . getCoreConnection ( ) . cloneConnection ( ) ; } } catch ( SIException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.impl.SibRaManagedConnectionFactory.createManagedConnection" , FFDC_PROBE_2 , this ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } throw new ResourceException ( NLS . getFormattedMessage ( "CONNECTION_CLONE_CWSIV0353" , new Object [ ] { exception } , null ) , exception ) ; } catch ( SIErrorException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.impl.SibRaManagedConnectionFactory.createManagedConnection" , FFDC_PROBE_3 , this ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } throw new ResourceException ( NLS . getFormattedMessage ( "CONNECTION_CLONE_CWSIV0353" , new Object [ ] { exception } , null ) , exception ) ; } try { managedConnection = new SibRaManagedConnection ( this , connectionInfo , coreConnection ) ; managedConnection . setLogWriter ( _logWriter ) ; } catch ( SIException ex ) { FFDCFilter . processException ( ex , "com.ibm.ws.sib.ra.impl.SibRaManagedConnectionFactory.createManagedConnection" , FFDC_PROBE_5 , this ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , ex ) ; } throw new ResourceException ( NLS . getFormattedMessage ( "CREATE_MANAGED_CONNECTION_CWSIV0355" , new Object [ ] { ex } , null ) , ex ) ; } } else { // Connection manager error if it is passing us someone else ' s // request information throw new ResourceAdapterInternalException ( NLS . getFormattedMessage ( "UNRECOGNISED_REQUEST_INFO_CWSIV0354" , new Object [ ] { requestInfo , SibRaConnectionRequestInfo . class } , null ) ) ; } if ( TRACE . isEntryEnabled ( ) ) { SibTr . exit ( this , TRACE , "createManagedConnection" , managedConnection ) ; } return managedConnection ;
public class Crypto { /** * Generates Private Key from Base64 encoded string * @ param key Base64 encoded string which represents the key * @ return The PrivateKey * @ throws MangooEncryptionException if getting private key from string fails */ public PrivateKey getPrivateKeyFromString ( String key ) throws MangooEncryptionException { } }
Objects . requireNonNull ( key , Required . KEY . toString ( ) ) ; try { return KeyFactory . getInstance ( ALGORITHM ) . generatePrivate ( new PKCS8EncodedKeySpec ( decodeBase64 ( key ) ) ) ; } catch ( InvalidKeySpecException | NoSuchAlgorithmException e ) { throw new MangooEncryptionException ( "Failed to get private key from string" , e ) ; }
public class PoolThreadCache { /** * Try to allocate a tiny buffer out of the cache . Returns { @ code true } if successful { @ code false } otherwise */ boolean allocateTiny ( PoolArena < ? > area , PooledByteBuf < ? > buf , int reqCapacity , int normCapacity ) { } }
return allocate ( cacheForTiny ( area , normCapacity ) , buf , reqCapacity ) ;
public class CollationKey { /** * Produces a bound for the sort order of a given collation key and a * strength level . This API does not attempt to find a bound for the * CollationKey String representation , hence null will be returned in its * place . * Resulting bounds can be used to produce a range of strings that are * between upper and lower bounds . For example , if bounds are produced * for a sortkey of string " smith " , strings between upper and lower * bounds with primary strength would include " Smith " , " SMITH " , " sMiTh " . * There are two upper bounds that can be produced . If BoundMode . UPPER * is produced , strings matched would be as above . However , if a bound * is produced using BoundMode . UPPER _ LONG is used , the above example will * also match " Smithsonian " and similar . * For more on usage , see example in test procedure * < a href = " http : / / source . icu - project . org / repos / icu / icu4j / trunk / src / com / ibm / icu / dev / test / collator / CollationAPITest . java " > * src / com / ibm / icu / dev / test / collator / CollationAPITest / TestBounds . * Collation keys produced may be compared using the < TT > compare < / TT > API . * @ param boundType Mode of bound required . It can be BoundMode . LOWER , which * produces a lower inclusive bound , BoundMode . UPPER , that * produces upper bound that matches strings of the same * length or BoundMode . UPPER _ LONG that matches strings that * have the same starting substring as the source string . * @ param noOfLevels Strength levels required in the resulting bound * ( for most uses , the recommended value is PRIMARY ) . This * strength should be less than the maximum strength of * this CollationKey . * See users guide for explanation on the strength levels a * collation key can have . * @ return the result bounded CollationKey with a valid sort order but * a null String representation . * @ exception IllegalArgumentException thrown when the strength level * requested is higher than or equal to the strength in this * CollationKey . * In the case of an Exception , information * about the maximum strength to use will be returned in the * Exception . The user can then call getBound ( ) again with the * appropriate strength . * @ see CollationKey * @ see CollationKey . BoundMode * @ see Collator # PRIMARY * @ see Collator # SECONDARY * @ see Collator # TERTIARY * @ see Collator # QUATERNARY * @ see Collator # IDENTICAL */ public CollationKey getBound ( int boundType , int noOfLevels ) { } }
// Scan the string until we skip enough of the key OR reach the end of // the key int offset = 0 ; int keystrength = Collator . PRIMARY ; if ( noOfLevels > Collator . PRIMARY ) { while ( offset < m_key_ . length && m_key_ [ offset ] != 0 ) { if ( m_key_ [ offset ++ ] == Collation . LEVEL_SEPARATOR_BYTE ) { keystrength ++ ; noOfLevels -- ; if ( noOfLevels == Collator . PRIMARY || offset == m_key_ . length || m_key_ [ offset ] == 0 ) { offset -- ; break ; } } } } if ( noOfLevels > 0 ) { throw new IllegalArgumentException ( "Source collation key has only " + keystrength + " strength level. Call getBound() again " + " with noOfLevels < " + keystrength ) ; } // READ ME : this code assumes that the values for BoundMode variables // will not change . They are set so that the enum value corresponds to // the number of extra bytes each bound type needs . byte resultkey [ ] = new byte [ offset + boundType + 1 ] ; System . arraycopy ( m_key_ , 0 , resultkey , 0 , offset ) ; switch ( boundType ) { case BoundMode . LOWER : // Lower bound just gets terminated . No extra bytes break ; case BoundMode . UPPER : // Upper bound needs one extra byte resultkey [ offset ++ ] = 2 ; break ; case BoundMode . UPPER_LONG : // Upper long bound needs two extra bytes resultkey [ offset ++ ] = ( byte ) 0xFF ; resultkey [ offset ++ ] = ( byte ) 0xFF ; break ; default : throw new IllegalArgumentException ( "Illegal boundType argument" ) ; } resultkey [ offset ] = 0 ; return new CollationKey ( null , resultkey , offset ) ;
public class InstanceAggregatedAssociationOverview { /** * The number of associations for the instance ( s ) . * @ param instanceAssociationStatusAggregatedCount * The number of associations for the instance ( s ) . * @ return Returns a reference to this object so that method calls can be chained together . */ public InstanceAggregatedAssociationOverview withInstanceAssociationStatusAggregatedCount ( java . util . Map < String , Integer > instanceAssociationStatusAggregatedCount ) { } }
setInstanceAssociationStatusAggregatedCount ( instanceAssociationStatusAggregatedCount ) ; return this ;
public class CrawlToCsv { /** * Get the options . * @ return the specific CrawlToCsv options */ @ Override protected Options getOptions ( ) { } }
final Options options = super . getOptions ( ) ; final Option filenameOption = new Option ( "f" , "the name of the csv output file, default name is " + DEFAULT_FILENAME + " [optional]" ) ; filenameOption . setArgName ( "FILENAME" ) ; filenameOption . setLongOpt ( "filename" ) ; filenameOption . setRequired ( false ) ; filenameOption . setArgs ( 1 ) ; options . addOption ( filenameOption ) ; return options ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcTopologicalRepresentationItem ( ) { } }
if ( ifcTopologicalRepresentationItemEClass == null ) { ifcTopologicalRepresentationItemEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 725 ) ; } return ifcTopologicalRepresentationItemEClass ;
public class InstanceMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Instance instance , ProtocolMarshaller protocolMarshaller ) { } }
if ( instance == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( instance . getAgentVersion ( ) , AGENTVERSION_BINDING ) ; protocolMarshaller . marshall ( instance . getAmiId ( ) , AMIID_BINDING ) ; protocolMarshaller . marshall ( instance . getArchitecture ( ) , ARCHITECTURE_BINDING ) ; protocolMarshaller . marshall ( instance . getArn ( ) , ARN_BINDING ) ; protocolMarshaller . marshall ( instance . getAutoScalingType ( ) , AUTOSCALINGTYPE_BINDING ) ; protocolMarshaller . marshall ( instance . getAvailabilityZone ( ) , AVAILABILITYZONE_BINDING ) ; protocolMarshaller . marshall ( instance . getBlockDeviceMappings ( ) , BLOCKDEVICEMAPPINGS_BINDING ) ; protocolMarshaller . marshall ( instance . getCreatedAt ( ) , CREATEDAT_BINDING ) ; protocolMarshaller . marshall ( instance . getEbsOptimized ( ) , EBSOPTIMIZED_BINDING ) ; protocolMarshaller . marshall ( instance . getEc2InstanceId ( ) , EC2INSTANCEID_BINDING ) ; protocolMarshaller . marshall ( instance . getEcsClusterArn ( ) , ECSCLUSTERARN_BINDING ) ; protocolMarshaller . marshall ( instance . getEcsContainerInstanceArn ( ) , ECSCONTAINERINSTANCEARN_BINDING ) ; protocolMarshaller . marshall ( instance . getElasticIp ( ) , ELASTICIP_BINDING ) ; protocolMarshaller . marshall ( instance . getHostname ( ) , HOSTNAME_BINDING ) ; protocolMarshaller . marshall ( instance . getInfrastructureClass ( ) , INFRASTRUCTURECLASS_BINDING ) ; protocolMarshaller . marshall ( instance . getInstallUpdatesOnBoot ( ) , INSTALLUPDATESONBOOT_BINDING ) ; protocolMarshaller . marshall ( instance . getInstanceId ( ) , INSTANCEID_BINDING ) ; protocolMarshaller . marshall ( instance . getInstanceProfileArn ( ) , INSTANCEPROFILEARN_BINDING ) ; protocolMarshaller . marshall ( instance . getInstanceType ( ) , INSTANCETYPE_BINDING ) ; protocolMarshaller . marshall ( instance . getLastServiceErrorId ( ) , LASTSERVICEERRORID_BINDING ) ; protocolMarshaller . marshall ( instance . getLayerIds ( ) , LAYERIDS_BINDING ) ; protocolMarshaller . marshall ( instance . getOs ( ) , OS_BINDING ) ; protocolMarshaller . marshall ( instance . getPlatform ( ) , PLATFORM_BINDING ) ; protocolMarshaller . marshall ( instance . getPrivateDns ( ) , PRIVATEDNS_BINDING ) ; protocolMarshaller . marshall ( instance . getPrivateIp ( ) , PRIVATEIP_BINDING ) ; protocolMarshaller . marshall ( instance . getPublicDns ( ) , PUBLICDNS_BINDING ) ; protocolMarshaller . marshall ( instance . getPublicIp ( ) , PUBLICIP_BINDING ) ; protocolMarshaller . marshall ( instance . getRegisteredBy ( ) , REGISTEREDBY_BINDING ) ; protocolMarshaller . marshall ( instance . getReportedAgentVersion ( ) , REPORTEDAGENTVERSION_BINDING ) ; protocolMarshaller . marshall ( instance . getReportedOs ( ) , REPORTEDOS_BINDING ) ; protocolMarshaller . marshall ( instance . getRootDeviceType ( ) , ROOTDEVICETYPE_BINDING ) ; protocolMarshaller . marshall ( instance . getRootDeviceVolumeId ( ) , ROOTDEVICEVOLUMEID_BINDING ) ; protocolMarshaller . marshall ( instance . getSecurityGroupIds ( ) , SECURITYGROUPIDS_BINDING ) ; protocolMarshaller . marshall ( instance . getSshHostDsaKeyFingerprint ( ) , SSHHOSTDSAKEYFINGERPRINT_BINDING ) ; protocolMarshaller . marshall ( instance . getSshHostRsaKeyFingerprint ( ) , SSHHOSTRSAKEYFINGERPRINT_BINDING ) ; protocolMarshaller . marshall ( instance . getSshKeyName ( ) , SSHKEYNAME_BINDING ) ; protocolMarshaller . marshall ( instance . getStackId ( ) , STACKID_BINDING ) ; protocolMarshaller . marshall ( instance . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( instance . getSubnetId ( ) , SUBNETID_BINDING ) ; protocolMarshaller . marshall ( instance . getTenancy ( ) , TENANCY_BINDING ) ; protocolMarshaller . marshall ( instance . getVirtualizationType ( ) , VIRTUALIZATIONTYPE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class PoolWaitFuture { /** * { @ inheritDoc } */ @ Override public T get ( ) throws InterruptedException , ExecutionException { } }
try { return get ( 0 , TimeUnit . MILLISECONDS ) ; } catch ( TimeoutException ex ) { throw new ExecutionException ( ex ) ; }
public class RBFInterpolation { /** * Interpolate the function at given point . */ public double interpolate ( double ... x ) { } }
if ( x . length != this . x [ 0 ] . length ) { throw new IllegalArgumentException ( String . format ( "Invalid input vector size: %d, expected: %d" , x . length , this . x [ 0 ] . length ) ) ; } double sum = 0.0 , sumw = 0.0 ; for ( int i = 0 ; i < this . x . length ; i ++ ) { double f = rbf . f ( Math . distance ( x , this . x [ i ] ) ) ; sumw += w [ i ] * f ; sum += f ; } return normalized ? sumw / sum : sumw ;
public class DFSck { /** * Print fsck usage information */ static void printUsage ( ) { } }
System . err . println ( "Usage: DFSck <path> [-list-corruptfileblocks | " + "[-move | -delete | -openforwrite ] " + "[-files [-blocks [-locations | -racks]]]] " + "[-limit <limit>] [-service serviceName]" + "[-(zero/one)]" ) ; System . err . println ( "\t<path>\tstart checking from this path" ) ; System . err . println ( "\t-move\tmove corrupted files to /lost+found" ) ; System . err . println ( "\t-delete\tdelete corrupted files" ) ; System . err . println ( "\t-files\tprint out files being checked" ) ; System . err . println ( "\t-openforwrite\tprint out files opened for write" ) ; System . err . println ( "\t-list-corruptfileblocks\tprint out list of missing " + "blocks and files they belong to" ) ; System . err . println ( "\t-blocks\tprint out block report" ) ; System . err . println ( "\t-locations\tprint out locations for every block" ) ; System . err . println ( "\t-racks\tprint out network topology for data-node locations" ) ; System . err . println ( "\t-limit\tlimit output to <limit> corrupt files. " + "The default value of the limit is 500." ) ; System . err . println ( "\t\tBy default fsck ignores files opened for write, " + "use -openforwrite to report such files. They are usually " + " tagged CORRUPT or HEALTHY depending on their block " + "allocation status" ) ; ToolRunner . printGenericCommandUsage ( System . err ) ;
public class UserZoneEventHandler { /** * ( non - Javadoc ) * @ see com . tvd12 . ezyfox . sfs2x . serverhandler . ServerUserEventHandler # init ( ) */ @ Override protected void init ( ) { } }
handlers = new ZoneEventHandlerCenter ( ) . addHandlers ( handlerClasses , context . getUserClass ( ) , context . getGameUserClasses ( ) ) ;
public class CdnClient { /** * Delete an existing domain acceleration * @ param request The request containing user - defined domain information . * @ return Result of the deleteDomain operation returned by the service . */ public DeleteDomainResponse deleteDomain ( DeleteDomainRequest request ) { } }
checkNotNull ( request , "The parameter request should NOT be null." ) ; InternalRequest internalRequest = createRequest ( request , HttpMethodName . DELETE , DOMAIN , request . getDomain ( ) ) ; return invokeHttpClient ( internalRequest , DeleteDomainResponse . class ) ;
public class WMultiSelectPairRenderer { /** * Renders the options in list order . * @ param multiSelectPair the WMultiSelectPair to paint . * @ param options the options to render * @ param startIndex the starting option index * @ param xml the XmlStringBuilder to paint to . * @ param renderSelectionsOnly true to only render selected options , false to render all options . * @ return the number of options which were rendered . */ private int renderUnorderedOptions ( final WMultiSelectPair multiSelectPair , final List < ? > options , final int startIndex , final XmlStringBuilder xml , final boolean renderSelectionsOnly ) { } }
List < ? > selections = multiSelectPair . getSelected ( ) ; int optionIndex = startIndex ; for ( Object option : options ) { if ( option instanceof OptionGroup ) { xml . appendTagOpen ( "ui:optgroup" ) ; xml . appendAttribute ( "label" , ( ( OptionGroup ) option ) . getDesc ( ) ) ; xml . appendClose ( ) ; // Recurse to render options inside option groups . List < ? > nestedOptions = ( ( OptionGroup ) option ) . getOptions ( ) ; optionIndex += renderUnorderedOptions ( multiSelectPair , nestedOptions , optionIndex , xml , renderSelectionsOnly ) ; xml . appendEndTag ( "ui:optgroup" ) ; } else { renderOption ( multiSelectPair , option , optionIndex ++ , xml , selections , renderSelectionsOnly ) ; } } return optionIndex - startIndex ;
public class BooleanUtils { /** * < p > Converts an Integer to a Boolean using the convention that { @ code zero } * is { @ code false } . < / p > * < p > { @ code null } will be converted to { @ code null } . < / p > * < p > NOTE : This returns null and will throw a NullPointerException if unboxed to a boolean . < / p > * < pre > * BooleanUtils . toBoolean ( Integer . valueOf ( 0 ) ) = Boolean . FALSE * BooleanUtils . toBoolean ( Integer . valueOf ( 1 ) ) = Boolean . TRUE * BooleanUtils . toBoolean ( Integer . valueOf ( null ) ) = null * < / pre > * @ param value the Integer to convert * @ return Boolean . TRUE if non - zero , Boolean . FALSE if zero , * { @ code null } if { @ code null } input */ public static Boolean toBooleanObject ( final Integer value ) { } }
if ( value == null ) { return null ; } return value . intValue ( ) == 0 ? Boolean . FALSE : Boolean . TRUE ;
public class DbService { /** * Returns all model classes registered on this datasource * @ return model classes talk to this datasource */ public Set < Class > entityClasses ( ) { } }
EntityMetaInfoRepo repo = app ( ) . entityMetaInfoRepo ( ) . forDb ( id ) ; return null == repo ? C . < Class > set ( ) : repo . entityClasses ( ) ;
public class HttpClient { /** * Expects the Absolute URL for the Request * @ param uri * @ return * @ throws HibiscusException */ public HttpClient setURI ( final String uri ) throws HibiscusException { } }
try { setURI ( new URI ( uri ) ) ; } catch ( URISyntaxException e ) { throw new HibiscusException ( e ) ; } return this ;
public class af_config_info { /** * < pre > * Use this operation to delete a property . * < / pre > */ public static af_config_info delete ( nitro_service client , af_config_info resource ) throws Exception { } }
resource . validate ( "delete" ) ; return ( ( af_config_info [ ] ) resource . delete_resource ( client ) ) [ 0 ] ;
public class PrototypeMeasurementFilter { /** * Find the IncludeExcludePatterns for filtering a given metric . * The result is the union of all the individual pattern entries * where their specified metric name patterns matches the actual metric name . */ public IncludeExcludePatterns metricToPatterns ( String metric ) { } }
IncludeExcludePatterns foundPatterns = metricNameToPatterns . get ( metric ) ; if ( foundPatterns != null ) { return foundPatterns ; } // Since the keys in the prototype can be regular expressions , // need to look at all of them and can potentially match multiple , // each having a different set of rules . foundPatterns = new IncludeExcludePatterns ( ) ; for ( MeterFilterPattern meterPattern : includePatterns ) { if ( meterPattern . namePattern . matcher ( metric ) . matches ( ) ) { foundPatterns . include . addAll ( meterPattern . values ) ; } } for ( MeterFilterPattern meterPattern : excludePatterns ) { if ( meterPattern . namePattern . matcher ( metric ) . matches ( ) ) { foundPatterns . exclude . addAll ( meterPattern . values ) ; } } metricNameToPatterns . put ( metric , foundPatterns ) ; return foundPatterns ;
public class ServiceBackedDataModel { /** * Calls the service to obtain data . Implementations should make a service call using the * callback provided . If needCount is set , the implementation should also request the total * number of items from the server ( this is normally done in the same call that requests a * page but may be optional for performance reasons ) . By default , this calls * { @ link # callFetchService ( int , int , boolean , AsyncCallback ) } method with the * { @ code requests } ' s members . * NOTE : subclasses must override one of the two callFetchService methods */ protected void callFetchService ( PagedRequest request , AsyncCallback < R > callback ) { } }
callFetchService ( request . offset , request . count , request . needCount , callback ) ;
public class AWSIoTAnalyticsClient { /** * Sets or updates the AWS IoT Analytics logging options . * Note that if you update the value of any < code > loggingOptions < / code > field , it takes up to one minute for the * change to take effect . Also , if you change the policy attached to the role you specified in the roleArn field * ( for example , to correct an invalid policy ) it takes up to 5 minutes for that change to take effect . * @ param putLoggingOptionsRequest * @ return Result of the PutLoggingOptions operation returned by the service . * @ throws InvalidRequestException * The request was not valid . * @ throws InternalFailureException * There was an internal failure . * @ throws ServiceUnavailableException * The service is temporarily unavailable . * @ throws ThrottlingException * The request was denied due to request throttling . * @ sample AWSIoTAnalytics . PutLoggingOptions * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / iotanalytics - 2017-11-27 / PutLoggingOptions " target = " _ top " > AWS * API Documentation < / a > */ @ Override public PutLoggingOptionsResult putLoggingOptions ( PutLoggingOptionsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executePutLoggingOptions ( request ) ;
public class AbstractDocumentSource { /** * Resolves all { @ link SwaggerExtension } instances configured to be added to the Swagger configuration . * @ return List of { @ link SwaggerExtension } which should be added to the swagger configuration * @ throws GenerateException if the swagger extensions could not be created / resolved */ protected List < SwaggerExtension > resolveSwaggerExtensions ( ) throws GenerateException { } }
List < String > clazzes = apiSource . getSwaggerExtensions ( ) ; List < SwaggerExtension > resolved = new ArrayList < SwaggerExtension > ( ) ; if ( clazzes != null ) { for ( String clazz : clazzes ) { SwaggerExtension extension = null ; // Try to get a parameterized constructor for extensions that are log - enabled . try { try { Constructor < ? > constructor = Class . forName ( clazz ) . getConstructor ( Log . class ) ; extension = ( SwaggerExtension ) constructor . newInstance ( LOG ) ; } catch ( NoSuchMethodException nsme ) { extension = ( SwaggerExtension ) Class . forName ( clazz ) . newInstance ( ) ; } } catch ( Exception e ) { throw new GenerateException ( "Cannot load Swagger extension: " + clazz , e ) ; } resolved . add ( extension ) ; } } return resolved ;
public class AMIImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eUnset ( int featureID ) { } }
switch ( featureID ) { case AfplibPackage . AMI__DSPLCMNT : setDSPLCMNT ( DSPLCMNT_EDEFAULT ) ; return ; } super . eUnset ( featureID ) ;
public class Vector4f { /** * Set this { @ link Vector4f } to the values of the given < code > v < / code > . * Note that due to the given vector < code > v < / code > storing the components in double - precision , * there is the possibility to lose precision . * @ param v * the vector whose values will be copied into this * @ return this */ public Vector4f set ( Vector4dc v ) { } }
return set ( ( float ) v . x ( ) , ( float ) v . y ( ) , ( float ) v . z ( ) , ( float ) v . w ( ) ) ;
public class MtasTokenCollection { /** * Check . * @ param autoRepair the auto repair * @ param makeUnique the make unique * @ throws MtasParserException the mtas parser exception */ public void check ( Boolean autoRepair , Boolean makeUnique ) throws MtasParserException { } }
if ( autoRepair ) { autoRepair ( ) ; } if ( makeUnique ) { makeUnique ( ) ; } checkTokenCollectionIndex ( ) ; for ( Integer i : tokenCollectionIndex ) { // minimal properties if ( tokenCollection . get ( i ) . getId ( ) == null || tokenCollection . get ( i ) . getPositionStart ( ) == null || tokenCollection . get ( i ) . getPositionEnd ( ) == null || tokenCollection . get ( i ) . getValue ( ) == null ) { clear ( ) ; break ; } }
public class WidgetPopupMenuModel { /** * return true if we reset settings for a dashboard link with UserWidgetParameters */ private void resetSettings ( FeedbackPanel feedbackPanel , Component component , Widget widget , AjaxRequestTarget target ) { } }
// on reset settings we must delete UserWidgetParameters if any try { UserWidgetParameters wp = dashboardService . getUserWidgetParameters ( widget . getId ( ) ) ; if ( wp != null ) { storageService . removeEntityById ( wp . getId ( ) ) ; dashboardService . resetCache ( widget . getId ( ) ) ; final WidgetPanel widgetPanel = component . findParent ( WidgetPanel . class ) ; ModalWindow . closeCurrent ( target ) ; // target . add ( widgetPanel ) ; // ChartRendererPanel uses container . replace ( " chart " ) // we should add widgetView again instead of a simple target . add ( widgetPanel ) , otherwise we will see two refreshes widgetPanel . refresh ( target ) ; return ; } } catch ( NotFoundException ex ) { // should not happen Log . error ( ex . getMessage ( ) , ex ) ; } if ( ( widget instanceof DrillDownWidget ) && ( ( ( DrillDownWidget ) widget ) . getEntity ( ) instanceof Chart ) ) { final WidgetPanel widgetPanel = component . findParent ( WidgetPanel . class ) ; ChartUtil . updateWidget ( widget , ChartUtil . getRuntimeModel ( storageService . getSettings ( ) , ( EntityWidget ) widget , reportService , dataSourceService , false ) ) ; try { if ( component . findParent ( DashboardPanel . class ) == null ) { errorRefresh ( ) ; target . add ( feedbackPanel ) ; return ; } else { ModalWindow . closeCurrent ( target ) ; } dashboardService . modifyWidget ( getDashboardId ( widget . getId ( ) ) , widget ) ; } catch ( NotFoundException e ) { // never happening } widgetPanel . refresh ( target ) ; } else if ( widget instanceof ChartWidget ) { final WidgetPanel widgetPanel = component . findParent ( WidgetPanel . class ) ; if ( component . findParent ( DashboardPanel . class ) == null ) { errorRefresh ( ) ; target . add ( feedbackPanel ) ; return ; } else { ModalWindow . closeCurrent ( target ) ; } ChartUtil . updateWidget ( widget , ChartUtil . getDefaultRuntimeModel ( storageService . getSettings ( ) , ( ChartWidget ) widget , reportService , dataSourceService ) ) ; try { dashboardService . modifyWidget ( getDashboardId ( widget . getId ( ) ) , widget ) ; } catch ( NotFoundException e ) { // never happening } // target . add ( widgetPanel ) ; // ChartRendererPanel uses container . replace ( " chart " ) // we should add widgetView again instead of a simple target . add ( widgetPanel ) , otherwise we will see two refreshes widgetPanel . refresh ( target ) ; }
public class KeyCount { /** * Returns true if field corresponding to fieldID is set ( has been assigned a value ) and false otherwise */ public boolean isSet ( _Fields field ) { } }
if ( field == null ) { throw new IllegalArgumentException ( ) ; } switch ( field ) { case KEY : return isSetKey ( ) ; case COUNT : return isSetCount ( ) ; } throw new IllegalStateException ( ) ;
public class PlainDeserializer { /** * < p > Deserializes the response content to a type which is assignable to { @ link CharSequence } . < / p > * @ see AbstractDeserializer # run ( InvocationContext , HttpResponse ) */ @ Override public CharSequence deserialize ( InvocationContext context , HttpResponse response ) { } }
try { HttpEntity entity = response . getEntity ( ) ; return entity == null ? "" : EntityUtils . toString ( entity ) ; } catch ( Exception e ) { throw new DeserializerException ( new StringBuilder ( "Plain deserialization failed for request <" ) . append ( context . getRequest ( ) . getName ( ) ) . append ( "> on endpoint <" ) . append ( context . getEndpoint ( ) . getName ( ) ) . append ( ">" ) . toString ( ) , e ) ; }
public class MmffAtomTypeMatcher { /** * Obtain the MMFF symbolic types to the atoms of the provided structure . * @ param container structure representation * @ param graph adj list data structure * @ param bonds bond lookup map * @ param mmffArom flags which bonds are aromatic by MMFF model * @ return MMFF symbolic types for each atom index */ String [ ] symbolicTypes ( final IAtomContainer container , final int [ ] [ ] graph , final EdgeToBondMap bonds , final Set < IBond > mmffArom ) { } }
// Array of symbolic types , MMFF refers to these as ' SYMB ' and the numeric // value a s ' TYPE ' . final String [ ] symbs = new String [ container . getAtomCount ( ) ] ; checkPreconditions ( container ) ; assignPreliminaryTypes ( container , symbs ) ; // aromatic types , set by upgrading preliminary types in specified positions // and conditions . This requires a fair bit of code and is delegated to a separate class . aromaticTypes . assign ( container , symbs , bonds , graph , mmffArom ) ; // special case , ' NCN + ' matches entries that the validation suite say should // actually be ' NC = N ' . We can achieve 100 % compliance by checking if NCN + is still // next to CNN + or CIM + after aromatic types are assigned fixNCNTypes ( symbs , graph ) ; assignHydrogenTypes ( container , symbs , graph ) ; return symbs ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcMaterialList ( ) { } }
if ( ifcMaterialListEClass == null ) { ifcMaterialListEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 366 ) ; } return ifcMaterialListEClass ;
public class GenericStatsDecorator { /** * Get array of Strings from metrics - one from each with given Extractor . * @ param metrics metrics to extract values from . * @ param e extractor that extracts values * @ return array of corresponding strings . */ private static String [ ] extract ( List < ? extends IGenericMetrics > metrics , Extractor e ) { } }
List < String > strings = new ArrayList < > ( metrics . size ( ) ) ; for ( IGenericMetrics metric : metrics ) { strings . add ( e . extract ( metric ) ) ; } return strings . toArray ( new String [ strings . size ( ) ] ) ;
public class UrlUtil { /** * Tries to open an { @ link InputStream } to the given { @ link URL } . * @ param url * URL which should be opened * @ return opened stream * @ throws net . sf . qualitycheck . exception . IllegalNullArgumentException * if the given argument is { @ code null } * @ throws CanNotOpenStreamException * if no stream to the given { @ code URL } can be established */ public static InputStream open ( @ Nonnull final URL url ) { } }
Check . notNull ( url , "url" ) ; final InputStream ret ; try { ret = url . openStream ( ) ; } catch ( final IOException e ) { throw new CanNotOpenStreamException ( url . toString ( ) , e ) ; } return ret ;
public class PipedInputStream { /** * { @ inheritDoc } * < p > Unlike most streams , { @ code PipedInputStream } returns 0 rather than throwing * { @ code IOException } if the stream has been closed . Unconnected and broken pipes also * return 0. * @ throws IOException if an I / O error occurs */ @ Override public synchronized int available ( ) throws IOException { } }
if ( buffer == null || in == - 1 ) { return 0 ; } return in <= out ? buffer . length - out + in : in - out ;
public class VoltTable { /** * Make a printable , short string for a varbinary . * String includes a CRC and the contents of the varbinary in hex . * Contents longer than 13 chars are truncated and elipsized . * Yes , " elipsized " is totally a word . * Example : " bin [ crc : 1298399436 , value : 0xABCDEF12345 . . . ] " * @ param bin The bytes to print out . * @ return A string representation that is printable and short . */ public static String varbinaryToPrintableString ( byte [ ] bin ) { } }
PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "bin[crc:" ) ; crc . update ( bin ) ; sb . append ( crc . getValue ( ) ) ; sb . append ( ",value:0x" ) ; String hex = Encoder . hexEncode ( bin ) ; if ( hex . length ( ) > 13 ) { sb . append ( hex . substring ( 0 , 10 ) ) ; sb . append ( "..." ) ; } else { sb . append ( hex ) ; } sb . append ( "]" ) ; return sb . toString ( ) ;
public class KriptonBinderResponseBodyConverter { /** * / * ( non - Javadoc ) * @ see retrofit2 . Converter # convert ( java . lang . Object ) */ @ Override public T convert ( ResponseBody value ) throws IOException { } }
try { return ( T ) binderContext . parse ( value . byteStream ( ) , clazz ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; return null ; } finally { value . close ( ) ; }
public class JSONCompareUtil { /** * Creates a cardinality map from { @ code coll } . * @ param coll the collection of items to convert * @ param < T > the type of elements in the input collection * @ return the cardinality map */ public static < T > Map < T , Integer > getCardinalityMap ( final Collection < T > coll ) { } }
Map count = new HashMap < T , Integer > ( ) ; for ( T item : coll ) { Integer c = ( Integer ) ( count . get ( item ) ) ; if ( c == null ) { count . put ( item , INTEGER_ONE ) ; } else { count . put ( item , new Integer ( c . intValue ( ) + 1 ) ) ; } } return count ;
public class OrmLiteDefaultContentProvider { /** * @ see android . content . ContentProvider # getType ( android . net . Uri ) */ @ Override public String getType ( Uri uri ) { } }
if ( ! controller . hasPreinitialized ( ) ) { throw new IllegalStateException ( "Controller has not been initialized." ) ; } int patternCode = controller . getUriMatcher ( ) . match ( uri ) ; MatcherPattern pattern = controller . findMatcherPattern ( patternCode ) ; if ( pattern == null ) { throw new IllegalArgumentException ( "unknown uri : " + uri . toString ( ) ) ; } return pattern . getMimeTypeVndString ( ) ;
public class DefaultApplicationContext { /** * Start the environment . */ protected void startEnvironment ( ) { } }
Environment defaultEnvironment = getEnvironment ( ) ; defaultEnvironment . start ( ) ; registerSingleton ( Environment . class , defaultEnvironment ) ; registerSingleton ( new AnnotationProcessorListener ( ) ) ;
public class CertificateUtils { /** * Find a PrivateKeyInfo in the PEM object details . Returns null if the PEM object type is unknown . */ @ CheckForNull private static PrivateKeyInfo getPrivateKeyInfoOrNull ( Object pemObject ) throws NoSuchAlgorithmException { } }
PrivateKeyInfo privateKeyInfo = null ; if ( pemObject instanceof PEMKeyPair ) { PEMKeyPair pemKeyPair = ( PEMKeyPair ) pemObject ; privateKeyInfo = pemKeyPair . getPrivateKeyInfo ( ) ; } else if ( pemObject instanceof PrivateKeyInfo ) { privateKeyInfo = ( PrivateKeyInfo ) pemObject ; } else if ( pemObject instanceof ASN1ObjectIdentifier ) { // no idea how it can be used final ASN1ObjectIdentifier asn1ObjectIdentifier = ( ASN1ObjectIdentifier ) pemObject ; LOG . trace ( "Ignoring asn1ObjectIdentifier {}" , asn1ObjectIdentifier ) ; } else { LOG . warn ( "Unknown object '{}' from PEMParser" , pemObject ) ; } return privateKeyInfo ;
public class XMLStreamReader { /** * Utility method that initialize a XMLStreamReader , initialize it , and * return an AsyncWork which is unblocked when characters are available to be read . */ public static AsyncWork < XMLStreamReader , Exception > start ( IO . Readable . Buffered io , int charactersBufferSize , int maxBuffers ) { } }
AsyncWork < XMLStreamReader , Exception > result = new AsyncWork < > ( ) ; new Task . Cpu . FromRunnable ( "Start reading XML " + io . getSourceDescription ( ) , io . getPriority ( ) , ( ) -> { XMLStreamReader reader = new XMLStreamReader ( io , charactersBufferSize , maxBuffers ) ; try { Starter start = new Starter ( io , reader . defaultEncoding , reader . charactersBuffersSize , reader . maxBuffers ) ; reader . stream = start . start ( ) ; reader . stream . canStartReading ( ) . listenAsync ( new Task . Cpu . FromRunnable ( "Start reading XML " + io . getSourceDescription ( ) , io . getPriority ( ) , ( ) -> { try { reader . next ( ) ; result . unblockSuccess ( reader ) ; } catch ( Exception e ) { result . unblockError ( e ) ; } } ) , true ) ; } catch ( Exception e ) { result . unblockError ( e ) ; } } ) . startOn ( io . canStartReading ( ) , true ) ; return result ;
public class OrderManager { /** * Place an order and retry if Exception occur * @ param order - new BitfinexOrder to place * @ throws BitfinexClientException * @ throws InterruptedException */ public void placeOrderAndWaitUntilActive ( final BitfinexNewOrder order ) throws BitfinexClientException , InterruptedException { } }
final BitfinexApiKeyPermissions capabilities = client . getApiKeyPermissions ( ) ; if ( ! capabilities . isOrderWritePermission ( ) ) { throw new BitfinexClientException ( "Unable to wait for order " + order + " connection has not enough capabilities: " + capabilities ) ; } order . setApiKey ( client . getConfiguration ( ) . getApiKey ( ) ) ; final Callable < Boolean > orderCallable = ( ) -> placeOrderOrderOnAPI ( order ) ; // Bitfinex does not implement a happens - before relationship . Sometimes // canceling a stop - loss order and placing a new stop - loss order results // in an ' ERROR , reason is Invalid order : not enough exchange balance ' // error for some seconds . The retryer tries to place the order up to // three times final Retryer < Boolean > retryer = new Retryer < > ( ORDER_RETRIES , RETRY_DELAY_IN_MS , TimeUnit . MILLISECONDS , orderCallable ) ; retryer . execute ( ) ; if ( retryer . getNeededExecutions ( ) > 1 ) { logger . info ( "Nedded {} executions for placing the order" , retryer . getNeededExecutions ( ) ) ; } if ( ! retryer . isSuccessfully ( ) ) { final Exception lastException = retryer . getLastException ( ) ; if ( lastException == null ) { throw new BitfinexClientException ( "Unable to execute order" ) ; } else { throw new BitfinexClientException ( lastException ) ; } }
public class DescribeContinuousExportsRequest { /** * The unique IDs assigned to the exports . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setExportIds ( java . util . Collection ) } or { @ link # withExportIds ( java . util . Collection ) } if you want to * override the existing values . * @ param exportIds * The unique IDs assigned to the exports . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeContinuousExportsRequest withExportIds ( String ... exportIds ) { } }
if ( this . exportIds == null ) { setExportIds ( new java . util . ArrayList < String > ( exportIds . length ) ) ; } for ( String ele : exportIds ) { this . exportIds . add ( ele ) ; } return this ;
public class NoCompTreeMap { /** * Returns an unmodifiable set view of the map entries . */ public final Set < Map . Entry < K , V > > entrySet ( ) { } }
return new AbstractSet < Map . Entry < K , V > > ( ) { public Iterator < Map . Entry < K , V > > iterator ( ) { return entryIterator ( ) ; } public int size ( ) { return size ; } } ;
public class DefinitionsDocument { /** * Builds definition title * @ param markupDocBuilder the markupDocBuilder do use for output * @ param title definition title * @ param anchor optional anchor ( null = > auto - generate from title ) */ private void buildDefinitionTitle ( MarkupDocBuilder markupDocBuilder , String title , String anchor ) { } }
markupDocBuilder . sectionTitleWithAnchorLevel2 ( title , anchor ) ;
public class MultipleAlignmentScorer { /** * Calculates and puts the RMSD and the average TM - Score of the * MultipleAlignment . * @ param alignment * @ throws StructureException * @ see # getAvgTMScore ( MultipleAlignment ) * @ see # getRMSD ( MultipleAlignment ) */ public static void calculateScores ( MultipleAlignment alignment ) throws StructureException { } }
// Put RMSD List < Atom [ ] > trans = MultipleAlignmentTools . transformAtoms ( alignment ) ; alignment . putScore ( RMSD , getRMSD ( trans ) ) ; // Put AvgTM - Score List < Integer > lengths = new ArrayList < Integer > ( alignment . size ( ) ) ; for ( Atom [ ] atoms : alignment . getAtomArrays ( ) ) { lengths . add ( atoms . length ) ; } alignment . putScore ( AVGTM_SCORE , getAvgTMScore ( trans , lengths ) ) ;
public class PollingBasedFileMonitor { /** * Polls the file system for a file change ( synchronized ) * @ param currentTimeMillis * @ return true if the file has changed */ private synchronized boolean poll ( long currentTimeMillis ) { } }
long timeDiffMillis = currentTimeMillis - _lastPolled ; if ( timeDiffMillis > POLL_THRESHOLD_MILLIS ) { final long lastModifiedBefore = _lastModified ; _lastModified = _file . lastModified ( ) ; _lastPolled = System . currentTimeMillis ( ) ; return _lastModified != lastModifiedBefore ; } return false ;
public class RespokeGroup { /** * Notify the group that a connection has joined . This is used internally to the SDK and should not be called directly by your client application . * @ param connection The connection that has joined the group */ public void connectionDidJoin ( final RespokeConnection connection ) { } }
members . add ( connection ) ; new Handler ( Looper . getMainLooper ( ) ) . post ( new Runnable ( ) { @ Override public void run ( ) { Listener listener = listenerReference . get ( ) ; if ( null != listener ) { listener . onJoin ( connection , RespokeGroup . this ) ; } } } ) ;
public class CPDefinitionSpecificationOptionValueUtil { /** * Returns a range of all the cp definition specification option values where CPDefinitionId = & # 63 ; and CPSpecificationOptionId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPDefinitionSpecificationOptionValueModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param CPDefinitionId the cp definition ID * @ param CPSpecificationOptionId the cp specification option ID * @ param start the lower bound of the range of cp definition specification option values * @ param end the upper bound of the range of cp definition specification option values ( not inclusive ) * @ return the range of matching cp definition specification option values */ public static List < CPDefinitionSpecificationOptionValue > findByC_CSO ( long CPDefinitionId , long CPSpecificationOptionId , int start , int end ) { } }
return getPersistence ( ) . findByC_CSO ( CPDefinitionId , CPSpecificationOptionId , start , end ) ;
public class InstanceFleetTimelineMarshaller { /** * Marshall the given parameter object . */ public void marshall ( InstanceFleetTimeline instanceFleetTimeline , ProtocolMarshaller protocolMarshaller ) { } }
if ( instanceFleetTimeline == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( instanceFleetTimeline . getCreationDateTime ( ) , CREATIONDATETIME_BINDING ) ; protocolMarshaller . marshall ( instanceFleetTimeline . getReadyDateTime ( ) , READYDATETIME_BINDING ) ; protocolMarshaller . marshall ( instanceFleetTimeline . getEndDateTime ( ) , ENDDATETIME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AuthzFacadeImpl { /** * / * ( non - Javadoc ) * @ see com . att . authz . facade . AuthzFacade # cacheClear ( com . att . authz . env . AuthzTrans , java . lang . String , java . lang . Integer ) */ @ Override public Result < Void > cacheClear ( AuthzTrans trans , String cname , String segments ) { } }
TimeTaken tt = trans . start ( CACHE_CLEAR + cname + ", segments[" + segments + ']' , Env . SUB | Env . ALWAYS ) ; try { String [ ] segs = segments . split ( "\\s*,\\s*" ) ; int isegs [ ] = new int [ segs . length ] ; for ( int i = 0 ; i < segs . length ; ++ i ) { try { isegs [ i ] = Integer . parseInt ( segs [ i ] ) ; } catch ( NumberFormatException nfe ) { isegs [ i ] = - 1 ; } } return service . cacheClear ( trans , cname , isegs ) ; } catch ( Exception e ) { trans . error ( ) . log ( e , IN , CACHE_CLEAR ) ; return Result . err ( e ) ; } finally { tt . done ( ) ; }
public class ExternalType { /** * { @ inheritDoc } */ @ Override public final void to ( ObjectOutput out ) throws IOException { } }
if ( out == null ) throw new NullPointerException ( ) ; // delegate to the equivalent internal method _to ( out ) ;
public class AmazonAlexaForBusinessClient { /** * Makes a private skill unavailable for enrolled users and prevents them from enabling it on their devices . * @ param disassociateSkillFromUsersRequest * @ return Result of the DisassociateSkillFromUsers operation returned by the service . * @ throws ConcurrentModificationException * There is a concurrent modification of resources . * @ throws NotFoundException * The resource is not found . * @ sample AmazonAlexaForBusiness . DisassociateSkillFromUsers * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / alexaforbusiness - 2017-11-09 / DisassociateSkillFromUsers " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DisassociateSkillFromUsersResult disassociateSkillFromUsers ( DisassociateSkillFromUsersRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDisassociateSkillFromUsers ( request ) ;
public class ImageModerationsImpl { /** * Fuzzily match an image against one of your custom Image Lists . You can create and manage your custom image lists using & lt ; a href = " / docs / services / 578ff44d2703741568569ab9 / operations / 578ff7b12703741568569abe " & gt ; this & lt ; / a & gt ; API . * Returns ID and tags of matching image . & lt ; br / & gt ; * & lt ; br / & gt ; * Note : Refresh Index must be run on the corresponding Image List before additions and removals are reflected in the response . * @ param imageStream The image file . * @ param listId The list Id . * @ param cacheImage Whether to retain the submitted image for future use ; defaults to false if omitted . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the MatchResponse object */ public Observable < ServiceResponse < MatchResponse > > matchFileInputWithServiceResponseAsync ( byte [ ] imageStream , String listId , Boolean cacheImage ) { } }
if ( this . client . baseUrl ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.baseUrl() is required and cannot be null." ) ; } if ( imageStream == null ) { throw new IllegalArgumentException ( "Parameter imageStream is required and cannot be null." ) ; } String parameterizedHost = Joiner . on ( ", " ) . join ( "{baseUrl}" , this . client . baseUrl ( ) ) ; RequestBody imageStreamConverted = RequestBody . create ( MediaType . parse ( "image/gif" ) , imageStream ) ; return service . matchFileInput ( listId , cacheImage , imageStreamConverted , this . client . acceptLanguage ( ) , parameterizedHost , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < MatchResponse > > > ( ) { @ Override public Observable < ServiceResponse < MatchResponse > > call ( Response < ResponseBody > response ) { try { ServiceResponse < MatchResponse > clientResponse = matchFileInputDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class JavaParser { /** * src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1157:1 : andExpression : equalityExpression ( ' & ' equalityExpression ) * ; */ public final void andExpression ( ) throws RecognitionException { } }
int andExpression_StartIndex = input . index ( ) ; try { if ( state . backtracking > 0 && alreadyParsedRule ( input , 115 ) ) { return ; } // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1158:5 : ( equalityExpression ( ' & ' equalityExpression ) * ) // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1158:9 : equalityExpression ( ' & ' equalityExpression ) * { pushFollow ( FOLLOW_equalityExpression_in_andExpression5186 ) ; equalityExpression ( ) ; state . _fsp -- ; if ( state . failed ) return ; // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1158:28 : ( ' & ' equalityExpression ) * loop147 : while ( true ) { int alt147 = 2 ; int LA147_0 = input . LA ( 1 ) ; if ( ( LA147_0 == 34 ) ) { alt147 = 1 ; } switch ( alt147 ) { case 1 : // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1158:30 : ' & ' equalityExpression { match ( input , 34 , FOLLOW_34_in_andExpression5190 ) ; if ( state . failed ) return ; pushFollow ( FOLLOW_equalityExpression_in_andExpression5192 ) ; equalityExpression ( ) ; state . _fsp -- ; if ( state . failed ) return ; } break ; default : break loop147 ; } } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { // do for sure before leaving if ( state . backtracking > 0 ) { memoize ( input , 115 , andExpression_StartIndex ) ; } }
public class dnsaction { /** * Use this API to unset the properties of dnsaction resource . * Properties that need to be unset are specified in args array . */ public static base_response unset ( nitro_service client , dnsaction resource , String [ ] args ) throws Exception { } }
dnsaction unsetresource = new dnsaction ( ) ; unsetresource . actionname = resource . actionname ; return unsetresource . unset_resource ( client , args ) ;
public class ToleranceRangeRule { /** * Validate the Tolerance Range of this IMolecularFormula . * @ param formula Parameter is the IMolecularFormula * @ return A double value meaning 1.0 True , 0.0 False */ @ Override public double validate ( IMolecularFormula formula ) throws CDKException { } }
logger . info ( "Start validation of " , formula ) ; double totalExactMass = MolecularFormulaManipulator . getTotalExactMass ( formula ) ; if ( Math . abs ( totalExactMass - mass ) > tolerance ) return 0.0 ; else return 1.0 ;
public class Server { /** * This is a wrapper around { @ link ReadableByteChannel # read ( ByteBuffer ) } . * If the amount of data is large , it writes to channel in smaller chunks . * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases . There should not be any performance degredation . * @ see ReadableByteChannel # read ( ByteBuffer ) */ private static int channelRead ( ReadableByteChannel channel , ByteBuffer buffer ) throws IOException { } }
return ( buffer . remaining ( ) <= NIO_BUFFER_LIMIT ) ? channel . read ( buffer ) : channelIO ( channel , null , buffer ) ;
public class MessageControllerManager { /** * without jdk8 , @ Repeatable doesn ' t work , so we use @ JsTopicControls annotation and parse it * @ param topic * @ return */ JsTopicMessageController getJsTopicMessageControllerFromJsTopicControls ( String topic ) { } }
logger . debug ( "Looking for messageController for topic '{}' from JsTopicControls annotation" , topic ) ; Instance < JsTopicMessageController < ? > > select = topicMessageController . select ( new JsTopicCtrlsAnnotationLiteral ( ) ) ; if ( select . isUnsatisfied ( ) ) { return null ; } return getJsTopicMessageControllerFromIterable ( topic , select ) ;
public class Base64Utils { /** * Base64 - decode the given byte array from an UTF - 8 String . * @ param src the encoded UTF - 8 String ( may be { @ code null } ) * @ return the original byte array ( or { @ code null } if the input was { @ code null } ) * @ since 2.0 */ public static byte [ ] decodeFromString ( String src ) { } }
if ( src == null ) { return null ; } if ( src . length ( ) == 0 ) { return new byte [ 0 ] ; } byte [ ] result ; try { result = delegate . decode ( src . getBytes ( DEFAULT_CHARSET . displayName ( ) ) ) ; } catch ( UnsupportedEncodingException e ) { // should not happen , UTF - 8 is always supported throw new IllegalStateException ( e ) ; } return result ;
public class AbstractPlane4F { /** * { @ inheritDoc } */ @ Override public void absolute ( ) { } }
this . set ( Math . abs ( getEquationComponentA ( ) ) , Math . abs ( getEquationComponentB ( ) ) , Math . abs ( getEquationComponentC ( ) ) , Math . abs ( getEquationComponentD ( ) ) ) ;
public class ConfigurationContext { /** * Guicey bundle manual disable registration from * { @ link ru . vyarus . dropwizard . guice . GuiceBundle . Builder # disableBundles ( Class [ ] ) } . * @ param bundles modules to disable */ @ SuppressWarnings ( "PMD.UseVarargs" ) public void disableBundle ( final Class < ? extends GuiceyBundle > [ ] bundles ) { } }
for ( Class < ? extends GuiceyBundle > bundle : bundles ) { registerDisable ( ConfigItem . Bundle , bundle ) ; }
public class BeanO { /** * Obtain the < code > Identity < / code > of the bean associated with * this < code > BeanO < / code > . < p > */ @ Override @ Deprecated public java . security . Identity getCallerIdentity ( ) { } }
EJSDeployedSupport s = EJSContainer . getMethodContext ( ) ; // Method not allowed from ejbTimeout . LI2281.07 if ( s != null && s . methodInfo . ivInterface == MethodInterface . TIMED_OBJECT ) { IllegalStateException ise = new IllegalStateException ( "getCallerIdentity() not " + "allowed from ejbTimeout" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getCallerIdentity: " + ise ) ; throw ise ; } EJBSecurityCollaborator < ? > securityCollaborator = container . ivSecurityCollaborator ; if ( securityCollaborator == null ) { return null ; // d740575 } return getCallerIdentity ( securityCollaborator , s ) ;
public class CalendarThinTableModel { /** * Create a CalendarItem Proxy using this field list . * Usually , you use CalendarItemFieldListProxy , or override it . */ public CalendarItem getFieldListProxy ( FieldList fieldList ) { } }
return new CalendarItemFieldListProxy ( fieldList , m_strStartDateTimeField , m_strEndDateTimeField , m_strDescriptionField , m_strStatusField ) ;
public class CommerceNotificationQueueEntryPersistenceImpl { /** * Clears the cache for all commerce notification queue entries . * The { @ link EntityCache } and { @ link FinderCache } are both cleared by this method . */ @ Override public void clearCache ( ) { } }
entityCache . clearCache ( CommerceNotificationQueueEntryImpl . class ) ; finderCache . clearCache ( FINDER_CLASS_NAME_ENTITY ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITH_PAGINATION ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION ) ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public IfcObjectTypeEnum createIfcObjectTypeEnumFromString ( EDataType eDataType , String initialValue ) { } }
IfcObjectTypeEnum result = IfcObjectTypeEnum . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ;
public class CmsFormatterSelectWidget { /** * Updates the select options from the given entity . < p > * @ param entity a top - level content entity */ public void update ( CmsEntity entity ) { } }
String removeAllFlag = CmsEntity . getValueForPath ( entity , m_valuePath ) ; boolean allRemoved = Boolean . valueOf ( removeAllFlag ) . booleanValue ( ) ; replaceOptions ( allRemoved ? m_optionsAllRemoved : m_optionsDefault ) ;
public class JSONTokener { /** * Returns the next { @ code length } characters of the input . * < p > The returned string shares its backing character array with this * tokener ' s input string . If a reference to the returned string may be held * indefinitely , you should use { @ code new String ( result ) } to copy it first * to avoid memory leaks . * @ throws JSONException if the remaining input is not long enough to * satisfy this request . */ public String next ( int length ) throws JSONException { } }
if ( pos + length > in . length ( ) ) { throw syntaxError ( length + " is out of bounds" ) ; } String result = in . substring ( pos , pos + length ) ; pos += length ; return result ;