signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class StreamingMergeSortedGrouper { /** * Returns the minimum buffer capacity required for this grouper . This grouper keeps track read / write indexes * and they cannot point the same array slot at the same time . Since the read / write indexes move circularly , one * extra slot is needed in addition to the read / write slots . Finally , the required minimum buffer capacity is * 3 * record size . * @ return required minimum buffer capacity */ public static < KeyType > int requiredBufferCapacity ( KeySerde < KeyType > keySerde , AggregatorFactory [ ] aggregatorFactories ) { } }
int recordSize = keySerde . keySize ( ) ; for ( AggregatorFactory aggregatorFactory : aggregatorFactories ) { recordSize += aggregatorFactory . getMaxIntermediateSizeWithNulls ( ) ; } return recordSize * 3 ;
public class FeedCommProcessor { /** * Sends a message to the server synchronously . This will return only when the message has been sent . * @ param messageWithData the message to send * @ throws IOException if the message failed to be sent */ public void sendSync ( BasicMessageWithExtraData < ? extends BasicMessage > messageWithData ) throws Exception { } }
if ( ! isConnected ( ) ) { throw new IllegalStateException ( "WebSocket connection was closed. Cannot send any messages" ) ; } BasicMessage message = messageWithData . getBasicMessage ( ) ; configurationAuthentication ( message ) ; if ( messageWithData . getBinaryData ( ) == null ) { String messageString = ApiDeserializer . toHawkularFormat ( message ) ; @ SuppressWarnings ( "resource" ) Buffer buffer = new Buffer ( ) . writeUtf8 ( messageString ) ; RequestBody requestBody = RequestBody . create ( WebSocket . TEXT , buffer . readByteArray ( ) ) ; FeedCommProcessor . this . webSocket . sendMessage ( requestBody ) ; } else { BinaryData messageData = ApiDeserializer . toHawkularFormat ( message , messageWithData . getBinaryData ( ) ) ; RequestBody requestBody = new RequestBody ( ) { @ Override public MediaType contentType ( ) { return WebSocket . BINARY ; } @ Override public void writeTo ( BufferedSink bufferedSink ) throws IOException { emitToSink ( messageData , bufferedSink ) ; } } ; FeedCommProcessor . this . webSocket . sendMessage ( requestBody ) ; }
public class GenericShuffleJXTable { /** * Shuffle selected left rows to right table . */ public void shuffleSelectedLeftRowsToRightTable ( ) { } }
final int [ ] selectedRows = leftTable . getSelectedRows ( ) ; final int lastIndex = selectedRows . length - 1 ; for ( int i = lastIndex ; - 1 < i ; i -- ) { final int selectedRow = selectedRows [ i ] ; final T row = leftTable . getGenericTableModel ( ) . removeAt ( selectedRow ) ; rightTable . getGenericTableModel ( ) . add ( row ) ; }
public class NettyMessagingTransport { /** * Returns a link for the remote address if already cached ; otherwise , returns null . * @ param remoteAddr the remote address * @ return a link if already cached ; otherwise , null */ public < T > Link < T > get ( final SocketAddress remoteAddr ) { } }
final LinkReference linkRef = this . addrToLinkRefMap . get ( remoteAddr ) ; return linkRef != null ? ( Link < T > ) linkRef . getLink ( ) : null ;
public class TopLevelDomainsInner { /** * Gets all legal agreements that user needs to accept before purchasing a domain . * Gets all legal agreements that user needs to accept before purchasing a domain . * @ param name Name of the top - level domain . * @ param agreementOption Domain agreement options . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; TldLegalAgreementInner & gt ; object */ public Observable < ServiceResponse < Page < TldLegalAgreementInner > > > listAgreementsWithServiceResponseAsync ( final String name , final TopLevelDomainAgreementOption agreementOption ) { } }
return listAgreementsSinglePageAsync ( name , agreementOption ) . concatMap ( new Func1 < ServiceResponse < Page < TldLegalAgreementInner > > , Observable < ServiceResponse < Page < TldLegalAgreementInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < TldLegalAgreementInner > > > call ( ServiceResponse < Page < TldLegalAgreementInner > > page ) { String nextPageLink = page . body ( ) . nextPageLink ( ) ; if ( nextPageLink == null ) { return Observable . just ( page ) ; } return Observable . just ( page ) . concatWith ( listAgreementsNextWithServiceResponseAsync ( nextPageLink ) ) ; } } ) ;
public class DeploymentRequestServlet { /** * Update the deployment */ public void handleUpdateDeployment ( String jsonp , HttpServletRequest request , HttpServletResponse response , AuthenticationResult ar ) throws IOException , ServletException { } }
String deployment = request . getParameter ( "deployment" ) ; if ( deployment == null || deployment . length ( ) == 0 ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to get deployment information." ) ) ; response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; return ; } try { DeploymentType newDeployment = m_mapper . readValue ( deployment , DeploymentType . class ) ; if ( newDeployment == null ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to parse deployment information." ) ) ; return ; } DeploymentType currentDeployment = this . getDeployment ( ) ; if ( currentDeployment . getUsers ( ) != null ) { newDeployment . setUsers ( currentDeployment . getUsers ( ) ) ; } // reset the host count so that it wont fail the deployment checks newDeployment . getCluster ( ) . setHostcount ( currentDeployment . getCluster ( ) . getHostcount ( ) ) ; String dep = CatalogUtil . getDeployment ( newDeployment ) ; if ( dep == null || dep . trim ( ) . length ( ) <= 0 ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to build deployment information." ) ) ; return ; } Object [ ] params = new Object [ ] { null , dep } ; SyncCallback cb = new SyncCallback ( ) ; httpClientInterface . callProcedure ( request . getRemoteHost ( ) , ar , BatchTimeoutOverrideType . NO_TIMEOUT , cb , "@UpdateApplicationCatalog" , params ) ; cb . waitForResponse ( ) ; ClientResponseImpl r = ClientResponseImpl . class . cast ( cb . getResponse ( ) ) ; if ( r . getStatus ( ) == ClientResponse . SUCCESS ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . SUCCESS , "Deployment Updated." ) ) ; } else { response . getWriter ( ) . print ( HTTPClientInterface . asJsonp ( jsonp , r . toJSONString ( ) ) ) ; } } catch ( JsonParseException e ) { response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Unparsable JSON" ) ) ; } catch ( Exception ex ) { m_log . error ( "Failed to update deployment from API" , ex ) ; response . setStatus ( HttpServletResponse . SC_INTERNAL_SERVER_ERROR ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , Throwables . getStackTraceAsString ( ex ) ) ) ; }
public class CPInstancePersistenceImpl { /** * Removes all the cp instances where CPDefinitionId = & # 63 ; and displayDate & lt ; & # 63 ; and status = & # 63 ; from the database . * @ param CPDefinitionId the cp definition ID * @ param displayDate the display date * @ param status the status */ @ Override public void removeByC_LtD_S ( long CPDefinitionId , Date displayDate , int status ) { } }
for ( CPInstance cpInstance : findByC_LtD_S ( CPDefinitionId , displayDate , status , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ) { remove ( cpInstance ) ; }
public class UpdateAppRequest { /** * List of server groups in the application to update . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setServerGroups ( java . util . Collection ) } or { @ link # withServerGroups ( java . util . Collection ) } if you want to * override the existing values . * @ param serverGroups * List of server groups in the application to update . * @ return Returns a reference to this object so that method calls can be chained together . */ public UpdateAppRequest withServerGroups ( ServerGroup ... serverGroups ) { } }
if ( this . serverGroups == null ) { setServerGroups ( new java . util . ArrayList < ServerGroup > ( serverGroups . length ) ) ; } for ( ServerGroup ele : serverGroups ) { this . serverGroups . add ( ele ) ; } return this ;
public class IntrinsicNearestNeighborAffinityMatrixBuilder { /** * Compute the sparse pij using the nearest neighbors only . * @ param ids ID range * @ param knnq kNN query * @ param square Use squared distances * @ param numberOfNeighbours Number of neighbors to get * @ param pij Output of distances * @ param indices Output of indexes * @ param initialScale Initial scaling factor */ protected void computePij ( DBIDRange ids , KNNQuery < ? > knnq , boolean square , int numberOfNeighbours , double [ ] [ ] pij , int [ ] [ ] indices , double initialScale ) { } }
Duration timer = LOG . isStatistics ( ) ? LOG . newDuration ( this . getClass ( ) . getName ( ) + ".runtime.neighborspijmatrix" ) . begin ( ) : null ; final double logPerp = FastMath . log ( perplexity ) ; // Scratch arrays , resizable DoubleArray dists = new DoubleArray ( numberOfNeighbours + 10 ) ; IntegerArray inds = new IntegerArray ( numberOfNeighbours + 10 ) ; // Compute nearest - neighbor sparse affinity matrix FiniteProgress prog = LOG . isVerbose ( ) ? new FiniteProgress ( "Finding neighbors and optimizing perplexity" , ids . size ( ) , LOG ) : null ; MeanVariance mv = LOG . isStatistics ( ) ? new MeanVariance ( ) : null ; Mean mid = LOG . isStatistics ( ) ? new Mean ( ) : null ; for ( DBIDArrayIter ix = ids . iter ( ) ; ix . valid ( ) ; ix . advance ( ) ) { dists . clear ( ) ; inds . clear ( ) ; KNNList neighbours = knnq . getKNNForDBID ( ix , numberOfNeighbours + 1 ) ; convertNeighbors ( ids , ix , square , neighbours , dists , inds , mid ) ; double beta = computeSigma ( ix . getOffset ( ) , dists , perplexity , logPerp , pij [ ix . getOffset ( ) ] = new double [ dists . size ( ) ] ) ; if ( mv != null ) { mv . put ( beta > 0 ? FastMath . sqrt ( .5 / beta ) : 0. ) ; // Sigma } indices [ ix . getOffset ( ) ] = inds . toArray ( ) ; LOG . incrementProcessed ( prog ) ; } LOG . ensureCompleted ( prog ) ; if ( mid != null ) { LOG . statistics ( new DoubleStatistic ( getClass ( ) + ".average-original-id" , mid . getMean ( ) ) ) ; } // Sum of the sparse affinity matrix : double sum = 0. ; for ( int i = 0 ; i < pij . length ; i ++ ) { final double [ ] pij_i = pij [ i ] ; for ( int offi = 0 ; offi < pij_i . length ; offi ++ ) { int j = indices [ i ] [ offi ] ; if ( j > i ) { continue ; // Exploit symmetry . } assert ( i != j ) ; int offj = containsIndex ( indices [ j ] , i ) ; if ( offj >= 0 ) { // Found sum += FastMath . sqrt ( pij_i [ offi ] * pij [ j ] [ offj ] ) ; } } } final double scale = initialScale / ( 2 * sum ) ; for ( int i = 0 ; i < pij . length ; i ++ ) { final double [ ] pij_i = pij [ i ] ; for ( int offi = 0 ; offi < pij_i . length ; offi ++ ) { int j = indices [ i ] [ offi ] ; assert ( i != j ) ; int offj = containsIndex ( indices [ j ] , i ) ; if ( offj >= 0 ) { // Found assert ( indices [ j ] [ offj ] == i ) ; // Exploit symmetry : if ( i < j ) { final double val = FastMath . sqrt ( pij_i [ offi ] * pij [ j ] [ offj ] ) ; // Symmetrize pij_i [ offi ] = pij [ j ] [ offj ] = MathUtil . max ( val * scale , MIN_PIJ ) ; } } else { // Not found , so zero . pij_i [ offi ] = 0 ; } } } if ( LOG . isStatistics ( ) ) { // timer ! = null , mv ! = null LOG . statistics ( timer . end ( ) ) ; LOG . statistics ( new DoubleStatistic ( NearestNeighborAffinityMatrixBuilder . class . getName ( ) + ".sigma.average" , mv . getMean ( ) ) ) ; LOG . statistics ( new DoubleStatistic ( NearestNeighborAffinityMatrixBuilder . class . getName ( ) + ".sigma.stddev" , mv . getSampleStddev ( ) ) ) ; }
public class RootDirectory { /** * Relativizes the path specified against the path of this directory . */ @ Override Path relativizeAgainstRoot ( final WatchedDirectory pWatchedDirectory , final Path pPath ) { } }
// Because we are on the last root directory possible we can ignore the // directory key here . return getPath ( ) . relativize ( pPath ) ;
public class ReceiveQueueProxy { /** * Receive the next remote message . * pend ( don ) Need to keep retrying if timeout as some firewalls won ' t wait forever . * @ param strCommand Command to perform remotely . * @ return boolean success . */ public BaseMessage receiveRemoteMessage ( ) throws RemoteException { } }
BaseTransport transport = this . createProxyTransport ( RECEIVE_REMOTE_MESSAGE ) ; Object strReturn = transport . sendMessageAndGetReply ( ) ; Object objReturn = transport . convertReturnObject ( strReturn ) ; return ( BaseMessage ) objReturn ;
public class BELScriptParser { /** * BELScript . g : 81:1 : unset _ statement _ group : ' UNSET ' STATEMENT _ GROUP _ KEYWORD ; */ public final BELScriptParser . unset_statement_group_return unset_statement_group ( ) throws RecognitionException { } }
BELScriptParser . unset_statement_group_return retval = new BELScriptParser . unset_statement_group_return ( ) ; retval . start = input . LT ( 1 ) ; Object root_0 = null ; Token string_literal29 = null ; Token STATEMENT_GROUP_KEYWORD30 = null ; Object string_literal29_tree = null ; Object STATEMENT_GROUP_KEYWORD30_tree = null ; paraphrases . push ( "in unset statement group." ) ; try { // BELScript . g : 84:5 : ( ' UNSET ' STATEMENT _ GROUP _ KEYWORD ) // BELScript . g : 85:5 : ' UNSET ' STATEMENT _ GROUP _ KEYWORD { root_0 = ( Object ) adaptor . nil ( ) ; string_literal29 = ( Token ) match ( input , 26 , FOLLOW_26_in_unset_statement_group340 ) ; string_literal29_tree = ( Object ) adaptor . create ( string_literal29 ) ; adaptor . addChild ( root_0 , string_literal29_tree ) ; STATEMENT_GROUP_KEYWORD30 = ( Token ) match ( input , STATEMENT_GROUP_KEYWORD , FOLLOW_STATEMENT_GROUP_KEYWORD_in_unset_statement_group342 ) ; STATEMENT_GROUP_KEYWORD30_tree = ( Object ) adaptor . create ( STATEMENT_GROUP_KEYWORD30 ) ; adaptor . addChild ( root_0 , STATEMENT_GROUP_KEYWORD30_tree ) ; } retval . stop = input . LT ( - 1 ) ; retval . tree = ( Object ) adaptor . rulePostProcessing ( root_0 ) ; adaptor . setTokenBoundaries ( retval . tree , retval . start , retval . stop ) ; paraphrases . pop ( ) ; } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; retval . tree = ( Object ) adaptor . errorNode ( input , retval . start , input . LT ( - 1 ) , re ) ; } finally { } return retval ;
public class Entity { /** * Return the list of fields including inherited ones . * @ return The list */ public ArrayList < Field > getAllFields ( ) { } }
final ArrayList < Field > r = new ArrayList < Field > ( ) ; final List < String > ids = new ArrayList < String > ( ) ; if ( getFields ( ) != null ) { for ( Field field : getFields ( ) ) { r . add ( field ) ; ids . add ( field . getId ( ) ) ; } } if ( getExtendzEntity ( ) != null ) { for ( Field field : getExtendzEntity ( ) . getAllFields ( ) ) { if ( ! ids . contains ( field . getId ( ) ) ) { r . add ( field ) ; } } } return r ;
public class ForEachStatementTransformer { /** * Helper for creating iterative loops . Note that after calling this method , you should compile and call * gw . internal . gosu . ir . nodes . statement . IRForEachStatement # setBody ( gw . internal . gosu . ir . nodes . IRStatement ) on the * IRForEachStatement . Since the body often depends on symbols introduced in the loop , you must usually compile it * after the loop has been created . Thus it cannot be an argument to this function . */ public static IRForEachStatement makeLoop ( TopLevelTransformationContext cc , IRExpression rootExpression , IType type , Symbol identifier , Symbol indexSymbol ) { } }
return new ForEachStatementTransformer ( cc , null ) . makeLoopImpl ( cc , rootExpression , type , identifier , indexSymbol , null ) ;
public class ConstantValueExpression { /** * Tests if the value is a string that would represent a prefix if used as a LIKE pattern . * The value must end in a ' % ' and contain no other wildcards ( ' _ ' or ' % ' ) . */ public boolean isPrefixPatternString ( ) { } }
String patternString = getValue ( ) ; int length = patternString . length ( ) ; if ( length == 0 ) { return false ; } // ' _ ' is not allowed . int disallowedWildcardPos = patternString . indexOf ( '_' ) ; if ( disallowedWildcardPos != - 1 ) { return false ; } int firstWildcardPos = patternString . indexOf ( '%' ) ; // Indexable filters have only a trailing ' % ' . // NOTE : not bothering to check for silly synonym patterns with multiple trailing ' % ' s . if ( firstWildcardPos != length - 1 ) { return false ; } return true ;
public class XcodeProjectWriter { /** * Add documentation group to map of objects . * @ param objects * object map . * @ param sourceTree * source tree description . * @ return documentation group . */ private PBXObjectRef addDocumentationGroup ( final Map objects , final String sourceTree ) { } }
final List productsList = new ArrayList ( ) ; final PBXObjectRef products = createPBXGroup ( "Documentation" , sourceTree , productsList ) ; objects . put ( products . getID ( ) , products . getProperties ( ) ) ; return products ;
public class UpdateDeviceStatusRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UpdateDeviceStatusRequest updateDeviceStatusRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( updateDeviceStatusRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateDeviceStatusRequest . getAccessToken ( ) , ACCESSTOKEN_BINDING ) ; protocolMarshaller . marshall ( updateDeviceStatusRequest . getDeviceKey ( ) , DEVICEKEY_BINDING ) ; protocolMarshaller . marshall ( updateDeviceStatusRequest . getDeviceRememberedStatus ( ) , DEVICEREMEMBEREDSTATUS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class OptionGroup { /** * Set the selected option of this group to < code > name < / code > . * @ param option the option that is selected * @ throws AlreadySelectedException if an option from this group has * already been selected . */ public void setSelected ( Option option ) throws AlreadySelectedException { } }
if ( option == null ) { // reset the option previously selected selected = null ; return ; } // if no option has already been selected or the // same option is being reselected then set the // selected member variable if ( selected == null || selected . equals ( option . getKey ( ) ) ) { selected = option . getKey ( ) ; } else { throw new AlreadySelectedException ( this , option ) ; }
public class MessageBuilder { /** * Creates a REQUEST message . * @ param request the ByteBuffer represents the request . * @ return a protobuf message . */ public static Message buildRequest ( ByteBuffer request ) { } }
Request req = Request . newBuilder ( ) . setRequest ( ByteString . copyFrom ( request ) ) . build ( ) ; return Message . newBuilder ( ) . setType ( MessageType . REQUEST ) . setRequest ( req ) . build ( ) ;
public class ConcurrentTaskScheduler { /** * Specify the { @ link java . util . concurrent . ScheduledExecutorService } to * delegate to . * Autodetects a JSR - 236 * @ param scheduledExecutor * the new scheduled executor * { @ link javax . enterprise . concurrent . ManagedScheduledExecutorService } * in order to use it for trigger - based scheduling if possible , * instead of Spring ' s local trigger management . * Note : This will only apply to { @ link TaskScheduler } * invocations . If you want the given executor to apply to * { @ link org . springframework . scheduling . SchedulingTaskExecutor } * invocations as well , pass the same executor reference to * { @ link # setConcurrentExecutor } . * @ see # setConcurrentExecutor */ public final void setScheduledExecutor ( ScheduledExecutorService scheduledExecutor ) { } }
if ( scheduledExecutor != null ) { this . scheduledExecutor = scheduledExecutor ; this . enterpriseConcurrentScheduler = managedScheduledExecutorServiceClass != null && managedScheduledExecutorServiceClass . isInstance ( scheduledExecutor ) ; } else { this . scheduledExecutor = Executors . newSingleThreadScheduledExecutor ( ) ; this . enterpriseConcurrentScheduler = false ; }
public class BeanContextServicesSupport { /** * Get a service instance on behalf of the specified child of this context , by calling the registered service provider , or by delegating to the parent * context . * @ param child * the child that request service * @ param requestor * the requestor object * @ param serviceClass * the service class * @ param serviceSelector * the service selectors * @ param bcsrl * the < code > BeanContextServiceRevokedListener < / code > * @ return a service instance on behalf of the specified child of this context * @ throws IllegalArgumentException * if < code > child < / code > is not a child of this context * @ throws TooManyListenersException * @ see com . googlecode . openbeans . beancontext . BeanContextServices # getService ( com . googlecode . openbeans . beancontext . BeanContextChild , java . lang . Object , * java . lang . Class , java . lang . Object , com . googlecode . openbeans . beancontext . BeanContextServiceRevokedListener ) */ public Object getService ( BeanContextChild child , Object requestor , Class serviceClass , Object serviceSelector , BeanContextServiceRevokedListener bcsrl ) throws TooManyListenersException { } }
if ( child == null || requestor == null || serviceClass == null || bcsrl == null ) { throw new NullPointerException ( ) ; } BCSSChild bcssChild = null ; BeanContextServiceProvider provider = null ; Object service = null ; boolean isDelegate = false ; synchronized ( globalHierarchyLock ) { // check child synchronized ( children ) { bcssChild = ( BCSSChild ) children . get ( child ) ; } if ( bcssChild == null ) { throw new IllegalArgumentException ( Messages . getString ( "beans.65" ) ) ; } // try local service provider = getLocalServiceProvider ( serviceClass ) ; if ( provider != null ) { service = provider . getService ( getBeanContextServicesPeer ( ) , requestor , serviceClass , serviceSelector ) ; } // no local service , try delegate if ( service == null && proxy != null ) { provider = proxy ; service = proxy . getService ( getBeanContextServicesPeer ( ) , requestor , serviceClass , serviceSelector , bcsrl ) ; isDelegate = true ; } } if ( service != null ) { // save record synchronized ( child ) { if ( bcssChild . serviceRecords == null ) { bcssChild . serviceRecords = new ArrayList < ServiceRecord > ( ) ; } bcssChild . serviceRecords . add ( new ServiceRecord ( provider , child , requestor , serviceClass , bcsrl , service , isDelegate ) ) ; } } return service ;
public class PojoDataParser { /** * { @ inheritDoc } */ @ NonNull @ Override public List < BaseCell > parseComponent ( JSONArray data , ServiceManager serviceManager ) { } }
return parseComponent ( data , null , serviceManager ) ;
public class JmsFactoryFactoryImpl { /** * For use by the JCA resource adaptor . */ @ Override public QueueConnectionFactory createQueueConnectionFactory ( JmsJcaConnectionFactory jcaConnectionFactory , JmsJcaManagedQueueConnectionFactory jcaManagedQueueConnectionFactory ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "createQueueConnectionFactory" , new Object [ ] { jcaConnectionFactory , jcaManagedQueueConnectionFactory } ) ; QueueConnectionFactory qcf = new JmsQueueConnectionFactoryImpl ( jcaConnectionFactory , jcaManagedQueueConnectionFactory ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "createQueueConnectionFactory" , qcf ) ; return qcf ;
public class DateTime { /** * 调整日期和时间 < br > * 如果此对象为可变对象 , 返回自身 , 否则返回新对象 , 设置是否可变对象见 { @ link # setMutable ( boolean ) } * @ param datePart 调整的部分 { @ link DateField } * @ param offset 偏移量 , 正数为向后偏移 , 负数为向前偏移 * @ return 如果此对象为可变对象 , 返回自身 , 否则返回新对象 */ public DateTime offset ( DateField datePart , int offset ) { } }
final Calendar cal = toCalendar ( ) ; cal . add ( datePart . getValue ( ) , offset ) ; DateTime dt = mutable ? this : ObjectUtil . clone ( this ) ; return dt . setTimeInternal ( cal . getTimeInMillis ( ) ) ;
public class SimplePojoFactory { /** * Implementation of { @ link # newInstance ( Class ) } for regular class . * @ param < POJO > is the generic type of the { @ link net . sf . mmm . util . pojo . api . Pojo } to create . * @ param pojoType is the { @ link Class } reflecting the { @ link net . sf . mmm . util . pojo . api . Pojo } to create . * @ return the new instance of the given { @ code pojoType } . * @ throws InstantiationFailedException if the instantiation failed . */ protected < POJO > POJO newInstanceForClass ( Class < POJO > pojoType ) throws InstantiationFailedException { } }
try { return pojoType . newInstance ( ) ; } catch ( Exception e ) { throw new InstantiationFailedException ( e , pojoType ) ; }
public class HBaseDataHandler { /** * Populate entity from hBase data . * @ param entity * the entity * @ param hbaseData * the hbase data * @ param m * the m * @ param rowKey * the row key * @ return the object */ private Object populateEntityFromHBaseData ( Object entity , HBaseDataWrapper hbaseData , EntityMetadata m , Object rowKey ) { } }
try { Map < String , Object > relations = new HashMap < String , Object > ( ) ; if ( entity . getClass ( ) . isAssignableFrom ( EnhanceEntity . class ) ) { relations = ( ( EnhanceEntity ) entity ) . getRelations ( ) ; entity = ( ( EnhanceEntity ) entity ) . getEntity ( ) ; } MetamodelImpl metaModel = ( MetamodelImpl ) kunderaMetadata . getApplicationMetadata ( ) . getMetamodel ( m . getPersistenceUnit ( ) ) ; EntityType entityType = metaModel . entity ( m . getEntityClazz ( ) ) ; Set < Attribute > attributes = ( ( AbstractManagedType ) entityType ) . getAttributes ( ) ; writeValuesToEntity ( entity , hbaseData , m , metaModel , attributes , m . getRelationNames ( ) , relations , - 1 , "" ) ; if ( ! relations . isEmpty ( ) ) { return new EnhanceEntity ( entity , rowKey , relations ) ; } return entity ; } catch ( PropertyAccessException e1 ) { throw new RuntimeException ( e1 ) ; }
public class ParserTokenStream { /** * Looks ahead in the token stream * @ param distance The number of tokens to look ahead * @ return The token a given distance from the current position in the stream or null if end of stream */ public ParserToken lookAhead ( int distance ) { } }
while ( distance >= buffer . size ( ) && tokenIterator . hasNext ( ) ) { buffer . addLast ( tokenIterator . next ( ) ) ; } return buffer . size ( ) > distance ? buffer . getLast ( ) : null ;
public class CipherRegistry { /** * Register multiple ciphers . * @ param ciphers Ciphers to be registered . * @ return CipherRegistry instance ( for chaining ) . */ public static CipherRegistry registerCiphers ( String [ ] ... ciphers ) { } }
for ( String [ ] cipher : ciphers ) { instance . register ( cipher ) ; } return instance ;
public class AbstractSettings { /** * / * ( non - Javadoc ) * @ see nyla . solutions . core . util . Settings # getPropertyCharacter ( java . lang . Class , java . lang . String , char ) */ @ Override public Character getPropertyCharacter ( Class < ? > aClass , String key , char defaultValue ) { } }
String results = getProperty ( aClass , key , "" ) ; if ( results . length ( ) == 0 ) return Character . valueOf ( defaultValue ) ; else return Character . valueOf ( results . charAt ( 0 ) ) ; // return first character
public class CredentialsUtils { /** * Saves proxy credentials to a file . This method ensures that the stored * proxy is saved with the appropriate file permissions . * @ param proxyFileName * the file where the proxy will be saved * @ param uc * the credential to be saved * @ param encoding * the private key encoding * @ throws IOException * in case of errors writing to the proxy file */ public static void saveProxyCredentials ( String proxyFileName , X509Credential uc , PrivateKeyEncoding encoding ) throws IOException { } }
File f = new File ( proxyFileName ) ; RandomAccessFile raf = new RandomAccessFile ( f , "rws" ) ; FileChannel channel = raf . getChannel ( ) ; FilePermissionHelper . setProxyPermissions ( proxyFileName ) ; channel . truncate ( 0 ) ; ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; saveProxyCredentials ( baos , uc , encoding ) ; baos . close ( ) ; channel . write ( ByteBuffer . wrap ( baos . toByteArray ( ) ) ) ; channel . close ( ) ; raf . close ( ) ;
public class Morphia { /** * Maps a set of classes * @ param entityClasses the classes to map * @ return this */ public synchronized Morphia map ( final Class ... entityClasses ) { } }
if ( entityClasses != null && entityClasses . length > 0 ) { for ( final Class entityClass : entityClasses ) { if ( ! mapper . isMapped ( entityClass ) ) { mapper . addMappedClass ( entityClass ) ; } } } return this ;
public class IntentLogger { /** * private IntentLogger ( ) { } */ public static void dump ( Intent intent ) { } }
if ( intent == null ) { QuickUtils . log . v ( "no intent found" ) ; return ; } Bundle extras = intent . getExtras ( ) ; QuickUtils . log . v ( "Intent[@" + Integer . toHexString ( intent . hashCode ( ) ) + "] content:" ) ; QuickUtils . log . v ( "Action : " + intent . getAction ( ) ) ; QuickUtils . log . v ( "Category : " + intent . getCategories ( ) ) ; QuickUtils . log . v ( "Data : " + intent . getDataString ( ) ) ; QuickUtils . log . v ( "Component: " + intent . getComponent ( ) . getPackageName ( ) + "/" + intent . getComponent ( ) . getClassName ( ) ) ; dumpFlags ( intent . getFlags ( ) ) ; QuickUtils . log . v ( "HasExtras: " + ( extras != null && ! extras . isEmpty ( ) ) ) ; dumpExtras ( extras ) ;
public class CliUtils { /** * Executes the specified command line and blocks until the process has finished or till the * timeout is reached . The output of the process is captured , returned , as well as logged with * info ( stdout ) and error ( stderr ) level , respectively . * @ param cli * the command line * @ param loggerName * the name of the logger to use ( passed to { @ link LoggerFactory # getLogger ( String ) } ) ; * if { @ code null } this class ' name is used * @ param logMessagePrefix * if non - { @ code null } consumed lines are prefix with this string * @ param inputStream * the process input to read from , must be thread safe * @ param timeoutInSeconds * a positive integer to specify timeout , zero and negative integers for no timeout * @ return the process ' output */ public static CliOutput executeCommandLine ( final Commandline cli , final String loggerName , final String logMessagePrefix , final InputStream inputStream , final int timeoutInSeconds ) { } }
try { String cliString = CommandLineUtils . toString ( cli . getShellCommandline ( ) ) ; LOGGER . info ( "Executing command-line: {}" , cliString ) ; LoggingStreamConsumer stdOut = new LoggingStreamConsumer ( loggerName , logMessagePrefix , false ) ; LoggingStreamConsumer stdErr = new LoggingStreamConsumer ( loggerName , logMessagePrefix , true ) ; int exitCode = CommandLineUtils . executeCommandLine ( cli , inputStream , stdOut , stdErr , timeoutInSeconds ) ; return new CliOutput ( stdOut . getOutput ( ) , stdErr . getOutput ( ) , exitCode ) ; } catch ( CommandLineException ex ) { throw new CliException ( "Error executing command-line process." , ex ) ; }
public class SARLRuntime { /** * Replies the bootstrap name declared within the given path , corresponding to a JAR file or a folder . * < p > The SRE bootstrap detection is based on the service definition within META - INF folder . * @ param path the path to test . * @ return the bootstrap or { @ code null } if none . * @ since 0.7 * @ see # containsPackedBootstrap ( IPath ) * @ see # containsUnpackedBootstrap ( IPath ) */ public static String getDeclaredBootstrap ( IPath path ) { } }
try { final IFile location = ResourcesPlugin . getWorkspace ( ) . getRoot ( ) . getFile ( path ) ; if ( location != null ) { final IPath pathLocation = location . getLocation ( ) ; if ( pathLocation != null ) { final File file = pathLocation . toFile ( ) ; if ( file . exists ( ) ) { if ( file . isDirectory ( ) ) { return getDeclaredBootstrapInFolder ( file ) ; } if ( file . isFile ( ) ) { return getDeclaredBootstrapInJar ( file ) ; } return null ; } } } final File file = path . makeAbsolute ( ) . toFile ( ) ; if ( file . exists ( ) ) { if ( file . isDirectory ( ) ) { return getDeclaredBootstrapInJar ( file ) ; } if ( file . isFile ( ) ) { return getDeclaredBootstrapInFolder ( file ) ; } } } catch ( Exception exception ) { } return null ;
public class WatchDir { /** * Static factory method to create a simple { @ link WatchDir } instance that * already spawned an Thread to listen . To close the thread call the * { @ link WatchDir # close ( ) } method . * @ param aDir * The directory to be watched . May not be < code > null < / code > . * @ param bRecursive * < code > true < / code > to watch the directory recursive , * < code > false < / code > to watch just this directory . * @ param aCallback * The callback to be invoked if something changed . May not be * < code > null < / code > . * @ return The newly created { @ link WatchDir } instance and never * < code > null < / code > . * @ throws IOException * In case something goes wrong . */ @ Nonnull @ ReturnsMutableCopy public static WatchDir createAsyncRunningWatchDir ( @ Nonnull final Path aDir , final boolean bRecursive , @ Nonnull final IWatchDirCallback aCallback ) throws IOException { } }
final WatchDir ret = new WatchDir ( aDir , bRecursive ) ; ret . callbacks ( ) . add ( aCallback ) ; ret . runAsync ( ) ; return ret ;
public class CondensedCallStackElement { /** * Returns the signature with the packages names shortened to only include the first character . * @ return the condensed signature . e . g . { @ code void org . jboss . resteasy . plugins . server . servlet . HttpServletDispatcher . service ( HttpServletRequest , HttpServletResponse ) } * would return { @ code void o . j . r . p . s . s . HttpServletDispatcher . service ( HttpServletRequest , * HttpServletResponse ) } */ @ Override public String getSignature ( ) { } }
String signature = super . getSignature ( ) ; if ( signature . indexOf ( '(' ) == - 1 ) { return signature ; } if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Condensing signature [{}]" , signature ) ; } int returnTypeSpace = signature . indexOf ( " " ) ; StringBuilder sb = new StringBuilder ( 100 ) ; sb . append ( signature . substring ( 0 , returnTypeSpace + 1 ) ) ; // Have to replace " . . " to " . " for classes generated by guice . e . g . // Object com . cerner . beadledom . health . resource . AvailabilityResource . . FastClassByGuice . . 77b09000 . newInstance ( int , Object [ ] ) String [ ] parts = signature . replaceAll ( "\\.\\." , "." ) . substring ( returnTypeSpace + 1 ) . split ( "\\." ) ; for ( int i = 0 ; i < parts . length - 2 ; i ++ ) { // Shorten each package name to only include the first character sb . append ( parts [ i ] . charAt ( 0 ) ) . append ( "." ) ; } sb . append ( parts [ parts . length - 2 ] ) . append ( "." ) . append ( parts [ parts . length - 1 ] ) ; return sb . toString ( ) ;
public class FDistort { /** * Specifies the input and output image and sets interpolation to BILINEAR , black image border , cache is off . */ public FDistort init ( ImageBase input , ImageBase output ) { } }
this . input = input ; this . output = output ; inputType = input . getImageType ( ) ; interp ( InterpolationType . BILINEAR ) ; border ( 0 ) ; cached = false ; distorter = null ; outputToInput = null ; return this ;
public class GrpcServerService { /** * A grpc method that requests the user to be authenticated and have the role " ROLE _ GREET " . */ @ Override @ Secured ( "ROLE_TEST" ) public void sayHello ( final HelloRequest req , final StreamObserver < HelloReply > responseObserver ) { } }
final HelloReply reply = HelloReply . newBuilder ( ) . setMessage ( "Hello ==> " + req . getName ( ) ) . build ( ) ; responseObserver . onNext ( reply ) ; responseObserver . onCompleted ( ) ;
public class JobsInner { /** * Exports all jobs for a given Shared Access Signatures ( SAS ) URL . The SAS URL expires within 15 minutes of its creation . * @ param vaultName The name of the Recovery Services vault . * @ param resourceGroupName The name of the resource group associated with the Recovery Services vault . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > exportAsync ( String vaultName , String resourceGroupName , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( exportWithServiceResponseAsync ( vaultName , resourceGroupName ) , serviceCallback ) ;
public class JRubyJellyScript { /** * Invokes other Jelly tag libraries . */ public void invokeTaglib ( final IJRubyContext rcon , JellyContext context , XMLOutput output , String uri , String localName , Map < RubySymbol , ? > attributes , final RubyProc proc ) throws JellyException { } }
TagScript tagScript = createTagScript ( context , uri , localName ) ; if ( attributes != null ) { for ( Entry < RubySymbol , ? > e : attributes . entrySet ( ) ) { tagScript . addAttribute ( e . getKey ( ) . asJavaString ( ) , new ConstantExpression ( e . getValue ( ) ) ) ; } } if ( proc != null ) { final Ruby runtime = ( ( IRubyObject ) rcon ) . getRuntime ( ) ; tagScript . setTagBody ( new Script ( ) { public Script compile ( ) throws JellyException { return this ; } public void run ( JellyContext context , XMLOutput output ) throws JellyTagException { JellyContext oc = rcon . getJellyContext ( ) ; XMLOutput oo = rcon . getOutput ( ) ; try { rcon . setJellyContext ( context ) ; rcon . setOutput ( output ) ; proc . getBlock ( ) . yield ( runtime . getCurrentContext ( ) , null ) ; } finally { rcon . setJellyContext ( oc ) ; rcon . setOutput ( oo ) ; } } } ) ; } tagScript . run ( context , output ) ;
public class LabelOperationMetadata { /** * < code > * . google . cloud . datalabeling . v1beta1 . LabelAudioTranscriptionOperationMetadata audio _ transcription _ details = 10; * < / code > */ public com . google . cloud . datalabeling . v1beta1 . LabelAudioTranscriptionOperationMetadataOrBuilder getAudioTranscriptionDetailsOrBuilder ( ) { } }
if ( detailsCase_ == 10 ) { return ( com . google . cloud . datalabeling . v1beta1 . LabelAudioTranscriptionOperationMetadata ) details_ ; } return com . google . cloud . datalabeling . v1beta1 . LabelAudioTranscriptionOperationMetadata . getDefaultInstance ( ) ;
public class JobMaster { /** * TODO : This method needs a leader session ID */ @ Override public void acknowledgeCheckpoint ( final JobID jobID , final ExecutionAttemptID executionAttemptID , final long checkpointId , final CheckpointMetrics checkpointMetrics , final TaskStateSnapshot checkpointState ) { } }
final CheckpointCoordinator checkpointCoordinator = executionGraph . getCheckpointCoordinator ( ) ; final AcknowledgeCheckpoint ackMessage = new AcknowledgeCheckpoint ( jobID , executionAttemptID , checkpointId , checkpointMetrics , checkpointState ) ; if ( checkpointCoordinator != null ) { getRpcService ( ) . execute ( ( ) -> { try { checkpointCoordinator . receiveAcknowledgeMessage ( ackMessage ) ; } catch ( Throwable t ) { log . warn ( "Error while processing checkpoint acknowledgement message" , t ) ; } } ) ; } else { String errorMessage = "Received AcknowledgeCheckpoint message for job {} with no CheckpointCoordinator" ; if ( executionGraph . getState ( ) == JobStatus . RUNNING ) { log . error ( errorMessage , jobGraph . getJobID ( ) ) ; } else { log . debug ( errorMessage , jobGraph . getJobID ( ) ) ; } }
public class FilterAbstractReplace { /** * This method initializes filterReplaceDialog * @ return org . parosproxy . paros . extension . filter . FilterReplaceDialog */ protected FilterReplaceDialog getFilterReplaceDialog ( ) { } }
if ( filterReplaceDialog == null ) { filterReplaceDialog = new FilterReplaceDialog ( getView ( ) . getMainFrame ( ) , true ) ; } return filterReplaceDialog ;
public class AnalyzerJobBuilder { /** * Builds a temporary list of all listeners , both global and local * @ return */ private List < AnalyzerChangeListener > getAllListeners ( ) { } }
List < AnalyzerChangeListener > globalChangeListeners = getAnalysisJobBuilder ( ) . getAnalyzerChangeListeners ( ) ; List < AnalyzerChangeListener > list = new ArrayList < AnalyzerChangeListener > ( globalChangeListeners . size ( ) + _localChangeListeners . size ( ) ) ; list . addAll ( globalChangeListeners ) ; list . addAll ( _localChangeListeners ) ; return list ;
public class InventoryAggregator { /** * Nested aggregators to further refine aggregation for an inventory type . * @ param aggregators * Nested aggregators to further refine aggregation for an inventory type . */ public void setAggregators ( java . util . Collection < InventoryAggregator > aggregators ) { } }
if ( aggregators == null ) { this . aggregators = null ; return ; } this . aggregators = new com . amazonaws . internal . SdkInternalList < InventoryAggregator > ( aggregators ) ;
public class VersionInfo { /** * Generate version string separated by dots with * the specified digit width . Version digit 0 * after < code > minDigits < / code > will be trimmed off . * @ param minDigits Minimum number of version digits * @ param maxDigits Maximum number of version digits * @ return A tailored version string * @ deprecated This API is ICU internal only . ( For use in CLDR , etc . ) * @ hide original deprecated declaration * @ hide draft / provisional / internal are hidden on Android */ @ Deprecated public String getVersionString ( int minDigits , int maxDigits ) { } }
if ( minDigits < 1 || maxDigits < 1 || minDigits > 4 || maxDigits > 4 || minDigits > maxDigits ) { throw new IllegalArgumentException ( "Invalid min/maxDigits range" ) ; } int [ ] digits = new int [ 4 ] ; digits [ 0 ] = getMajor ( ) ; digits [ 1 ] = getMinor ( ) ; digits [ 2 ] = getMilli ( ) ; digits [ 3 ] = getMicro ( ) ; int numDigits = maxDigits ; while ( numDigits > minDigits ) { if ( digits [ numDigits - 1 ] != 0 ) { break ; } numDigits -- ; } StringBuilder verStr = new StringBuilder ( 7 ) ; verStr . append ( digits [ 0 ] ) ; for ( int i = 1 ; i < numDigits ; i ++ ) { verStr . append ( "." ) ; verStr . append ( digits [ i ] ) ; } return verStr . toString ( ) ;
public class CommitsApi { /** * Get a Pager of the comments of a commit in a project . * < pre > < code > GitLab Endpoint : GET / projects / : id / repository / commits / : sha / comments < / code > < / pre > * @ param projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance * @ param sha a commit hash or name of a branch or tag * @ param itemsPerPage the number of Comment instances that will be fetched per page * @ return a List of Comment instances for the specified project ID / sha pair * @ throws GitLabApiException GitLabApiException if any exception occurs during execution */ public Pager < Comment > getComments ( Object projectIdOrPath , String sha , int itemsPerPage ) throws GitLabApiException { } }
return new Pager < Comment > ( this , Comment . class , itemsPerPage , null , "projects" , getProjectIdOrPath ( projectIdOrPath ) , "repository" , "commits" , sha , "comments" ) ;
public class GetRequestValidatorRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetRequestValidatorRequest getRequestValidatorRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getRequestValidatorRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getRequestValidatorRequest . getRestApiId ( ) , RESTAPIID_BINDING ) ; protocolMarshaller . marshall ( getRequestValidatorRequest . getRequestValidatorId ( ) , REQUESTVALIDATORID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class CropFilter { /** * Reads the image from the requested URL , scales it , crops it , and returns * it in the * Servlet stream . See above for details on parameters . */ protected RenderedImage doFilter ( BufferedImage pImage , ServletRequest pRequest , ImageServletResponse pResponse ) { } }
// Get crop coordinates int x = ServletUtil . getIntParameter ( pRequest , PARAM_CROP_X , - 1 ) ; int y = ServletUtil . getIntParameter ( pRequest , PARAM_CROP_Y , - 1 ) ; int width = ServletUtil . getIntParameter ( pRequest , PARAM_CROP_WIDTH , - 1 ) ; int height = ServletUtil . getIntParameter ( pRequest , PARAM_CROP_HEIGHT , - 1 ) ; boolean uniform = ServletUtil . getBooleanParameter ( pRequest , PARAM_CROP_UNIFORM , false ) ; int units = getUnits ( ServletUtil . getParameter ( pRequest , PARAM_CROP_UNITS , null ) ) ; // Get crop bounds Rectangle bounds = getBounds ( x , y , width , height , units , uniform , pImage ) ; // Return cropped version return pImage . getSubimage ( ( int ) bounds . getX ( ) , ( int ) bounds . getY ( ) , ( int ) bounds . getWidth ( ) , ( int ) bounds . getHeight ( ) ) ; // return scaled . getSubimage ( x , y , width , height ) ;
public class ImportTypeImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case DroolsPackage . IMPORT_TYPE__NAME : setName ( ( String ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class RxBroadcast { /** * Create { @ link Observable } that wraps { @ link BroadcastReceiver } and emits received intents . * < em > This is only useful in conjunction with Ordered Broadcasts , e . g . , * { @ link Context # sendOrderedBroadcast ( Intent , String ) } < / em > * @ param context the context the { @ link BroadcastReceiver } will be created * from * @ param intentFilter the filter for the particular intent * @ param broadcastPermission String naming a permissions that a broadcaster must hold * in order to send an Intent to you . If null , no permission * is required . * @ param handler Handler identifying the thread that will receive the * Intent . If null , the main thread of the process will be used . * @ param orderedBroadcastAbortStrategy the strategy to use for Ordered Broadcasts * @ return { @ link Observable } of { @ link Intent } that matches the filter */ public static Observable < Intent > fromBroadcast ( Context context , IntentFilter intentFilter , String broadcastPermission , Handler handler , OrderedBroadcastAbortStrategy orderedBroadcastAbortStrategy ) { } }
BroadcastWithPermissionsRegistrar broadcastWithPermissionsRegistrar = new BroadcastWithPermissionsRegistrar ( context , intentFilter , broadcastPermission , handler ) ; return createBroadcastObservable ( broadcastWithPermissionsRegistrar , orderedBroadcastAbortStrategy ) ;
public class ClobImpl { /** * Retrieves the character position at which the specified string < code > searchstr < / code > begins * within the < code > CLOB < / code > value that this < code > Clob < / code > object represents . The search for * < code > searchstr < / code > begins at position < code > start < / code > . * @ param searchstr the byte array for which to search * @ param start the position at which to begin searching ; the first position is 1 * @ return the position at which the pattern appears , else - 1 * @ exception SQLException if there is an error accessing the < code > CLOB < / code > */ @ Override public long position ( String searchstr , long start ) throws SQLException { } }
return stringData . indexOf ( searchstr , ( int ) start ) ;
public class LogTransferListener { /** * ( non - Javadoc ) * @ see org . sonatype . aether . util . listener . AbstractTransferListener # transferSucceeded * ( org . sonatype . aether . transfer . TransferEvent ) */ @ Override public void transferSucceeded ( TransferEvent event ) { } }
TransferResource resource = event . getResource ( ) ; downloads . remove ( resource ) ; long contentLength = event . getTransferredBytes ( ) ; if ( contentLength >= 0 ) { long duration = System . currentTimeMillis ( ) - resource . getTransferStartTime ( ) ; double kbPerSec = ( contentLength / 1024.0 ) / ( duration / 1000.0 ) ; StringBuilder sb = new StringBuilder ( ) . append ( "Completed" ) . append ( event . getRequestType ( ) == TransferEvent . RequestType . PUT ? " upload of " : " download of " ) . append ( resource . getResourceName ( ) ) . append ( event . getRequestType ( ) == TransferEvent . RequestType . PUT ? " into " : " from " ) . append ( resource . getRepositoryUrl ( ) ) . append ( ", transferred " ) . append ( contentLength >= 1024 ? toKB ( contentLength ) + " KB" : contentLength + " B" ) . append ( " at " ) . append ( new DecimalFormat ( "0.0" , new DecimalFormatSymbols ( Locale . ENGLISH ) ) . format ( kbPerSec ) ) . append ( "KB/sec" ) ; log . fine ( sb . toString ( ) ) ; }
public class AppearancePreferenceFragment { /** * Creates and returns a listener , which allows to adapt the elevation of a preference * fragment ' s button bar , when the value of the corresponding preference has been changed . * @ return The listener , which has been created , as an instance of the type { @ link * OnPreferenceChangeListener } */ private OnPreferenceChangeListener createPreferenceFragmentButtonBarElevationChangeListener ( ) { } }
return new OnPreferenceChangeListener ( ) { @ Override public boolean onPreferenceChange ( final Preference preference , final Object newValue ) { int elevation = Integer . valueOf ( ( String ) newValue ) ; setButtonBarElevation ( elevation ) ; return true ; } } ;
public class StringUtils { /** * Removes the leading delimiter from a string . * @ param str String to process . * @ param delimiter Delimiter to remove . * @ return The string with the leading delimiter removed . */ public static String removeLeadingDelimiter ( String str , String delimiter ) { } }
if ( ! str . startsWith ( delimiter ) ) { return str ; } else { return str . substring ( delimiter . length ( ) , str . length ( ) ) ; }
public class SessionImpl { /** * { @ inheritDoc } */ public void setNamespacePrefix ( String prefix , String uri ) throws NamespaceException , RepositoryException { } }
checkLive ( ) ; NamespaceRegistryImpl nrg = ( NamespaceRegistryImpl ) workspace . getNamespaceRegistry ( ) ; if ( ! nrg . isUriRegistered ( uri ) ) { throw new NamespaceException ( "The specified uri:" + uri + " is not among " + "those registered in the NamespaceRegistry" ) ; } if ( nrg . isPrefixMaped ( prefix ) ) { throw new NamespaceException ( "A prefix '" + prefix + "' is currently already mapped to " + nrg . getURI ( prefix ) + " URI persistently in the repository NamespaceRegistry " + "and cannot be remapped to a new URI using this method, since this would make any " + "content stored using the old URI unreadable." ) ; } if ( namespaces . containsKey ( prefix ) ) { throw new NamespaceException ( "A prefix '" + prefix + "' is currently already mapped to " + namespaces . get ( prefix ) + " URI transiently within this Session and cannot be " + "remapped to a new URI using this method, since this would make any " + "content stored using the old URI unreadable." ) ; } nrg . validateNamespace ( prefix , uri ) ; namespaces . put ( prefix , uri ) ; prefixes . put ( uri , prefix ) ;
public class MethodCompiler { /** * Invoke instance method ; special handling for superclass , private , and instance initialization method invocations * < p > Stack : . . . , objectref , [ arg1 , [ arg2 . . . ] ] = & gt ; . . . * @ param cls * @ param name * @ param parameters * @ throws IOException */ public void invokespecial ( Class < ? > cls , String name , Class < ? > ... parameters ) throws IOException { } }
invokespecial ( El . getMethod ( cls , name , parameters ) ) ;
public class MaterialPathAnimator { /** * Helper method to apply the path animator with callback . * @ param source Source widget to apply the Path Animator * @ param target Target widget to apply the Path Animator * @ param callback The callback method to be called when the path animator is applied */ public static void animate ( Widget source , Widget target , Functions . Func callback ) { } }
animate ( source . getElement ( ) , target . getElement ( ) , callback ) ;
public class StandardBullhornData { /** * Makes the " fast find " api call * HTTP Method : GET * @ param query fast find query string * @ param params optional FastFindParams . * @ return a ListWrapper containing the records plus some additional information */ protected FastFindListWrapper handleFastFindForEntities ( String query , FastFindParams params ) { } }
Map < String , String > uriVariables = restUriVariablesFactory . getUriVariablesForFastFind ( query , params ) ; String url = restUrlFactory . assembleFastFindUrl ( params ) ; String jsonString = this . performGetRequest ( url , String . class , uriVariables ) ; return restJsonConverter . jsonToEntityDoNotUnwrapRoot ( jsonString , FastFindListWrapper . class ) ;
public class BatchGetDevEndpointsRequest { /** * The list of DevEndpoint names , which may be the names returned from the < code > ListDevEndpoint < / code > operation . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDevEndpointNames ( java . util . Collection ) } or { @ link # withDevEndpointNames ( java . util . Collection ) } if you * want to override the existing values . * @ param devEndpointNames * The list of DevEndpoint names , which may be the names returned from the < code > ListDevEndpoint < / code > * operation . * @ return Returns a reference to this object so that method calls can be chained together . */ public BatchGetDevEndpointsRequest withDevEndpointNames ( String ... devEndpointNames ) { } }
if ( this . devEndpointNames == null ) { setDevEndpointNames ( new java . util . ArrayList < String > ( devEndpointNames . length ) ) ; } for ( String ele : devEndpointNames ) { this . devEndpointNames . add ( ele ) ; } return this ;
public class ExceptionalStream { /** * It ' s user ' s responsibility to close the input < code > resultSet < / code > after the stream is finished . * @ param resultSet * @ param columnIndex starts from 0 , not 1. * @ return */ public static < T > ExceptionalStream < T , SQLException > rows ( final ResultSet resultSet , final int columnIndex ) { } }
N . checkArgNotNull ( resultSet , "resultSet" ) ; N . checkArgNotNegative ( columnIndex , "columnIndex" ) ; final ExceptionalIterator < T , SQLException > iter = new ExceptionalIterator < T , SQLException > ( ) { private final int newColumnIndex = columnIndex + 1 ; private boolean hasNext = false ; @ Override public boolean hasNext ( ) throws SQLException { if ( hasNext == false ) { hasNext = resultSet . next ( ) ; } return hasNext ; } @ Override public T next ( ) throws SQLException { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( "No more rows" ) ; } final T next = ( T ) JdbcUtil . getColumnValue ( resultSet , newColumnIndex ) ; hasNext = false ; return next ; } @ Override public void skip ( long n ) throws SQLException { N . checkArgNotNegative ( n , "n" ) ; final long m = hasNext ? n - 1 : n ; JdbcUtil . skip ( resultSet , m ) ; hasNext = false ; } } ; return newStream ( iter ) ;
public class JMJson { /** * To json file file . * @ param < D > the type parameter * @ param dataObject the data object * @ param returnJsonFile the return json file * @ return the file */ public static < D > File toJsonFile ( D dataObject , File returnJsonFile ) { } }
try { jsonMapper . writeValue ( returnJsonFile , dataObject ) ; return returnJsonFile ; } catch ( Exception e ) { return JMExceptionManager . handleExceptionAndReturnNull ( log , e , "toJsonFile" , dataObject ) ; }
public class WebFacesConfigDescriptorImpl { /** * Returns all < code > referenced - bean < / code > elements * @ return list of < code > referenced - bean < / code > */ public List < FacesConfigReferencedBeanType < WebFacesConfigDescriptor > > getAllReferencedBean ( ) { } }
List < FacesConfigReferencedBeanType < WebFacesConfigDescriptor > > list = new ArrayList < FacesConfigReferencedBeanType < WebFacesConfigDescriptor > > ( ) ; List < Node > nodeList = model . get ( "referenced-bean" ) ; for ( Node node : nodeList ) { FacesConfigReferencedBeanType < WebFacesConfigDescriptor > type = new FacesConfigReferencedBeanTypeImpl < WebFacesConfigDescriptor > ( this , "referenced-bean" , model , node ) ; list . add ( type ) ; } return list ;
public class CPDefinitionInventoryUtil { /** * Returns the last cp definition inventory in the ordered set where uuid = & # 63 ; . * @ param uuid the uuid * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching cp definition inventory , or < code > null < / code > if a matching cp definition inventory could not be found */ public static CPDefinitionInventory fetchByUuid_Last ( String uuid , OrderByComparator < CPDefinitionInventory > orderByComparator ) { } }
return getPersistence ( ) . fetchByUuid_Last ( uuid , orderByComparator ) ;
public class Configuration { /** * { @ inheritDoc } */ @ Override public void load ( Properties properties ) { } }
String storageConfigurationClassName = properties . getProperty ( "configuration.storageConfiguration" ) ; try { storageConfiguration = ConfigurableFactory . getConfiguration ( ( Class < StorageConfiguration > ) Class . forName ( storageConfigurationClassName ) ) ; } catch ( ClassNotFoundException ex ) { throw new RuntimeException ( ex ) ; } concurrencyConfiguration = ConfigurableFactory . getConfiguration ( ConcurrencyConfiguration . class ) ;
public class CmsInheritanceContainerEditor { /** * Toggles the visibility of hidden elements . < p > */ protected void toggleElementVisibility ( ) { } }
if ( CmsDomUtil . hasClass ( HIDE_ELEMENTS_CLASS , getGroupContainerWidget ( ) . getElement ( ) ) ) { getGroupContainerWidget ( ) . removeStyleName ( HIDE_ELEMENTS_CLASS ) ; } else { getGroupContainerWidget ( ) . addStyleName ( HIDE_ELEMENTS_CLASS ) ; } getGroupContainerWidget ( ) . refreshHighlighting ( ) ;
public class MapTileGame { /** * MapTile */ @ Override public void create ( int tileWidth , int tileHeight , int widthInTile , int heightInTile ) { } }
Check . superiorStrict ( tileWidth , 0 ) ; Check . superiorStrict ( tileHeight , 0 ) ; Check . superiorStrict ( widthInTile , 0 ) ; Check . superiorStrict ( heightInTile , 0 ) ; clear ( ) ; this . tileWidth = tileWidth ; this . tileHeight = tileHeight ; this . widthInTile = widthInTile ; this . heightInTile = heightInTile ; radius = ( int ) Math . ceil ( StrictMath . sqrt ( widthInTile * widthInTile + heightInTile * ( double ) heightInTile ) ) ; tiles = new ArrayList < > ( heightInTile ) ; for ( int v = 0 ; v < heightInTile ; v ++ ) { tiles . add ( v , new ArrayList < Tile > ( widthInTile ) ) ; for ( int h = 0 ; h < widthInTile ; h ++ ) { tiles . get ( v ) . add ( h , null ) ; } }
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getDXD ( ) { } }
if ( dxdEClass == null ) { dxdEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 232 ) ; } return dxdEClass ;
public class Elvis { /** * called by the Elvis operator from generated bytecode * @ param pc * @ param scope * @ param varNames * @ return */ public static boolean operate ( PageContext pc , double scope , Collection . Key [ ] varNames ) { } }
return _operate ( pc , scope , varNames , 0 ) ;
public class _SharedRendererUtils { /** * This method is different in the two versions of _ SharedRendererUtils . */ private static void log ( FacesContext context , String msg , Exception e ) { } }
log . log ( Level . SEVERE , msg , e ) ;
public class OfferService { /** * Creates an offer via the API . * @ param amount Amount in cents & rsaquo ; 0. * @ param currency ISO 4217 formatted currency code . * @ param interval Defining how often the { @ link Client } should be charged . * @ param name Your name for this offer * @ param trialPeriodDays Give it a try or charge directly . Can be < code > null < / code > . * @ return { @ link Offer } object with id , which represents a PAYMILL offer . */ public Offer create ( Integer amount , String currency , Interval . Period interval , String name , Integer trialPeriodDays ) { } }
ValidationUtils . validatesAmount ( amount ) ; ValidationUtils . validatesCurrency ( currency ) ; ValidationUtils . validatesIntervalPeriod ( interval ) ; ValidationUtils . validatesName ( name ) ; ValidationUtils . validatesTrialPeriodDays ( trialPeriodDays ) ; ParameterMap < String , String > params = new ParameterMap < String , String > ( ) ; params . add ( "amount" , String . valueOf ( amount ) ) ; params . add ( "currency" , currency ) ; params . add ( "interval" , interval . toString ( ) ) ; params . add ( "name" , name ) ; if ( trialPeriodDays != null ) params . add ( "trial_period_days" , String . valueOf ( trialPeriodDays ) ) ; return RestfulUtils . create ( OfferService . PATH , params , Offer . class , super . httpClient ) ;
public class FileUtil { /** * Creates a file from the supplied root using the specified child components . For example : * < code > fromPath ( new File ( " dir " ) , " subdir " , " anotherdir " , " file . txt " ) < / code > creates a file * with the Unix path < code > dir / subdir / anotherdir / file . txt < / code > . */ public static File newFile ( File root , String ... parts ) { } }
File path = root ; for ( String part : parts ) { path = new File ( path , part ) ; } return path ;
public class Rapids { /** * Parse and return the next expression from the rapids string . * ' ( ' a nested function application expression ' ) * ' { ' a nested function definition expression ' } ' * ' [ ' a numeric list expression , till ' ] ' * ' " ' a String ( double quote ) : attached _ token * " ' " a String ( single quote ) : attached _ token * digits : a double * letters or other specials : an ID */ private AstRoot parseNext ( ) { } }
switch ( skipWS ( ) ) { case '(' : return parseFunctionApplication ( ) ; case '{' : return parseFunctionDefinition ( ) ; case '[' : return parseList ( ) ; case '\"' : case '\'' : return new AstStr ( string ( ) ) ; case ' ' : throw new IllegalASTException ( "Expected an expression but ran out of text" ) ; default : return parseNumberOrId ( ) ; }
public class JobID { /** * Construct a JobId object from given string * @ return constructed JobId object or null if the given String is null * @ throws IllegalArgumentException if the given string is malformed */ public static JobID forName ( String str ) throws IllegalArgumentException { } }
if ( str == null ) return null ; try { String [ ] parts = str . split ( "_" ) ; if ( parts . length == 3 ) { if ( parts [ 0 ] . equals ( JOB ) ) { return new org . apache . hadoop . mapred . JobID ( parts [ 1 ] , Integer . parseInt ( parts [ 2 ] ) ) ; } } } catch ( Exception ex ) { // fall below } throw new IllegalArgumentException ( "JobId string : " + str + " is not properly formed" ) ;
public class ColumnPrefixDistributedRowLock { /** * Delete locks columns . Set force = true to remove locks that haven ' t * expired yet . * This operation first issues a read to cassandra and then deletes columns * in the response . * @ param force - Force delete of non expired locks as well * @ return Map of locks released * @ throws Exception */ public Map < String , Long > releaseLocks ( boolean force ) throws Exception { } }
Map < String , Long > locksToDelete = readLockColumns ( ) ; MutationBatch m = keyspace . prepareMutationBatch ( ) . setConsistencyLevel ( consistencyLevel ) ; ColumnListMutation < String > row = m . withRow ( columnFamily , key ) ; long now = getCurrentTimeMicros ( ) ; for ( Entry < String , Long > c : locksToDelete . entrySet ( ) ) { if ( force || ( c . getValue ( ) > 0 && c . getValue ( ) < now ) ) { row . deleteColumn ( c . getKey ( ) ) ; } } m . execute ( ) ; return locksToDelete ;
public class NodeTypeManagerImpl { /** * JSR - 170 stuff = = = = = */ public NodeType findNodeType ( InternalQName nodeTypeName ) throws NoSuchNodeTypeException , RepositoryException { } }
NodeTypeData ntdata = typesManager . getNodeType ( nodeTypeName ) ; if ( ntdata != null ) { return new NodeTypeImpl ( ntdata , typesManager , this , locationFactory , valueFactory , dataManager ) ; } throw new NoSuchNodeTypeException ( "Nodetype not found " + nodeTypeName . getAsString ( ) ) ;
public class E { /** * Throws out an { @ link InvalidArgException } with error message specified * when ` tester ` is ` true ` . * @ param tester * when ` true ` then throws out the exception . * @ param msg * the error message format pattern . * @ param args * the error message format arguments . */ public static void invalidArgIf ( boolean tester , String msg , Object ... args ) { } }
if ( tester ) { throw invalidArg ( msg , args ) ; }
public class CollectionUtil { /** * < p > toVector . < / p > * @ param objects a T object . * @ param < T > a T object . * @ return a { @ link java . util . Vector } object . */ public static < T > Vector < T > toVector ( T ... objects ) { } }
return new Vector < T > ( Arrays . asList ( objects ) ) ;
public class LocalisationManager { /** * Method getXmitQueuePoint * < p > Return the itemstream representing a transmit queue to a remote ME * @ param meUuid * @ return */ public PtoPXmitMsgsItemStream getXmitQueuePoint ( SIBUuid8 meUuid ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getXmitQueuePoint" , meUuid ) ; PtoPXmitMsgsItemStream stream = null ; if ( _xmitQueuePoints != null ) { synchronized ( _xmitQueuePoints ) { stream = ( PtoPXmitMsgsItemStream ) _xmitQueuePoints . get ( meUuid ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getXmitQueuePoint" , stream ) ; return stream ;
public class RootMetricContext { /** * Start all { @ link ContextAwareReporter } s managed by the { @ link RootMetricContext } . */ public void startReporting ( ) { } }
this . reportingStarted = true ; for ( ContextAwareReporter reporter : this . reporters ) { try { reporter . start ( ) ; } catch ( Throwable throwable ) { log . error ( String . format ( "Failed to start reporter with class %s" , reporter . getClass ( ) . getCanonicalName ( ) ) , throwable ) ; } }
public class HttpServerImpl { /** * Add a HttpHandler to Jetty Handler . * @ param httpHandler */ @ Override public void addHttpHandler ( final HttpHandler httpHandler ) { } }
LOG . log ( Level . INFO , "addHttpHandler: {0}" , httpHandler . getUriSpecification ( ) ) ; jettyHandler . addHandler ( httpHandler ) ;
public class WhileyFileParser { /** * Parse a sequence of arguments separated by commas that ends in a * right - brace : * < pre > * ArgumentList : : = [ Expr ( ' , ' Expr ) * ] ' ) ' * < / pre > * Note , when this function is called we ' re assuming the left brace was * already parsed . * @ param scope * The enclosing scope for this statement , which determines the * set of visible ( i . e . declared ) variables and also the current * indentation level . * @ param terminated * This indicates that the expression is known to be terminated * ( or not ) . An expression that ' s known to be terminated is one * which is guaranteed to be followed by something . This is * important because it means that we can ignore any newline * characters encountered in parsing this expression , and that * we ' ll never overrun the end of the expression ( i . e . because * there ' s guaranteed to be something which terminates this * expression ) . A classic situation where terminated is true is * when parsing an expression surrounded in braces . In such case , * we know the right - brace will always terminate this expression . * @ return */ private Tuple < Expr > parseInvocationArguments ( EnclosingScope scope ) { } }
boolean firstTime = true ; ArrayList < Expr > args = new ArrayList < > ( ) ; while ( eventuallyMatch ( RightBrace ) == null ) { if ( ! firstTime ) { match ( Comma ) ; } else { firstTime = false ; } // NOTE : we require the following expression be a " non - tuple " // expression . That is , it cannot be composed using ' , ' unless // braces enclose the entire expression . This is because the outer // invocation expression is used ' , ' to distinguish arguments . // However , expression is guaranteed to be terminated either by ' ) ' // or by ' , ' . Expr e = parseExpression ( scope , true ) ; args . add ( e ) ; } return new Tuple < > ( args ) ;
public class ShardedCounterServiceImpl { /** * Does the work of incrementing or decrementing the value of a single shard for the counter named * { @ code counterName } . * @ param counterName * @ param amount The amount to mutate a counter shard by . This value will be negative for a decrement , and positive * for an increment . * @ param incrementOperationId * @ param optShardNumber An optionally specified shard number to increment . If not specified , a random shard number * will be chosen . * @ return An instance of { @ link CounterOperation } with information about the increment / decrement . */ private CounterOperation mutateCounterShard ( final String counterName , final long amount , final Optional < Integer > optShardNumber , final UUID incrementOperationId ) { } }
// Precondition Checks are performed by calling methods . // This get is transactional and strongly consistent , but we perform it here so that the increment doesn ' t have // to be part of an XG transaction . Since the CounterData isn ' t expected to mutate that often , we want // increment / decrement operations to be as speedy as possibly . In this way , the IncrementWork can take place in // a non - XG transaction . final CounterDataGetCreateContainer counterDataGetCreateContainer = this . getOrCreateCounterData ( counterName ) ; final CounterData counterData = counterDataGetCreateContainer . getCounterData ( ) ; // Create the Work to be done for this increment , which will be done inside of a TX . See // " https : / / developers . google . com / appengine / docs / java / datastore / transactions # Java _ Isolation _ and _ consistency " final Work < CounterOperation > atomicIncrementShardWork = new IncrementShardWork ( counterData , incrementOperationId , optShardNumber , amount , counterDataGetCreateContainer . isNewCounterDataCreated ( ) ) ; // Note that this operation is idempotent from the perspective of a ConcurrentModificationException . In that // case , the increment operation will fail and will not have been applied . An Objectify retry of the // increment / decrement will occur , however and a successful increment / decrement will only ever happen once ( if // the Appengine datastore is functioning properly ) . See the Javadoc in the API about DatastoreTimeoutException // or // DatastoreFailureException in cases where transactions have been committed and eventually will be applied // successfully . " // We use the " counterShardOperationInTx " to force this thread to wait until the work inside of // " atomicIncrementShardWork " completes . This is because we don ' t want to increment memcache ( below ) until after // that operation ' s transaction successfully commits . final CounterOperation counterShardOperationInTx = ObjectifyService . ofy ( ) . transact ( atomicIncrementShardWork ) ; // Try to increment this counter in memcache atomically , but only if we ' re not inside of a parent caller ' s // transaction . If that ' s the case , then we can ' t know if the parent TX will fail upon commit , which would // happen after our call to memcache . if ( isParentTransactionActive ( ) ) { // If a parent - transaction is active , then don ' t update memcache . Instead , clear it out since we can ' t know // if the parent commit will actually stick . this . memcacheSafeDelete ( counterName ) ; } else { // Otherwise , try to increment memcache . If the memcache operation fails , it ' s ok because memcache is merely // a cache of the actual count data , and will eventually become accurate when the cache is reloaded via a // call to # getCount . long amountToMutateCache = counterShardOperationInTx . getAppliedAmount ( ) ; if ( amount < 0 ) { amountToMutateCache *= - 1L ; } this . incrementMemcacheAtomic ( counterName , amountToMutateCache ) ; } return counterShardOperationInTx ;
public class DisassociateQualificationFromWorkerRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DisassociateQualificationFromWorkerRequest disassociateQualificationFromWorkerRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( disassociateQualificationFromWorkerRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( disassociateQualificationFromWorkerRequest . getWorkerId ( ) , WORKERID_BINDING ) ; protocolMarshaller . marshall ( disassociateQualificationFromWorkerRequest . getQualificationTypeId ( ) , QUALIFICATIONTYPEID_BINDING ) ; protocolMarshaller . marshall ( disassociateQualificationFromWorkerRequest . getReason ( ) , REASON_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FutureShell { /** * Set the Future to delegate calls to * @ param delegate the delegate future */ public synchronized void setDelegate ( Future < V > delegate ) { } }
if ( delegateHolder . isCancelled ( ) ) { delegate . cancel ( mayInterruptWhenCancellingDelegate ) ; } else { this . delegate = delegate ; delegateHolder . complete ( delegate ) ; }
public class ListPolicyAttachmentsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListPolicyAttachmentsRequest listPolicyAttachmentsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listPolicyAttachmentsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listPolicyAttachmentsRequest . getDirectoryArn ( ) , DIRECTORYARN_BINDING ) ; protocolMarshaller . marshall ( listPolicyAttachmentsRequest . getPolicyReference ( ) , POLICYREFERENCE_BINDING ) ; protocolMarshaller . marshall ( listPolicyAttachmentsRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listPolicyAttachmentsRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( listPolicyAttachmentsRequest . getConsistencyLevel ( ) , CONSISTENCYLEVEL_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AVIMConversation { /** * 查询消息记录 , 上拉时使用 。 * @ param msgId 消息id , 从消息id开始向前查询 * @ param timestamp 查询起始的时间戳 , 返回小于这个时间的记录 。 * 客户端时间不可靠 , 请用 0 代替 System . currentTimeMillis ( ) * @ param limit 返回条数限制 * @ param callback */ public void queryMessages ( final String msgId , final long timestamp , final int limit , final AVIMMessagesQueryCallback callback ) { } }
if ( StringUtil . isEmpty ( msgId ) && timestamp == 0 ) { this . queryMessages ( limit , callback ) ; return ; } // 如果屏蔽了本地缓存则全部走网络 if ( ! AVIMOptions . getGlobalOptions ( ) . isMessageQueryCacheEnabled ( ) ) { queryMessagesFromServer ( msgId , timestamp , limit , null , 0 , new AVIMMessagesQueryCallback ( ) { @ Override public void done ( List < AVIMMessage > messages , AVIMException e ) { if ( callback != null ) { if ( e != null ) { callback . internalDone ( e ) ; } else { callback . internalDone ( messages , null ) ; } } } } ) ; return ; } // 先去本地缓存查询消息 storage . getMessage ( msgId , timestamp , conversationId , new AVIMMessageStorage . StorageMessageCallback ( ) { @ Override public void done ( final AVIMMessage indicatorMessage , final boolean isIndicateMessageBreakPoint ) { if ( indicatorMessage == null || isIndicateMessageBreakPoint ) { String startMsgId = msgId ; long startTS = timestamp ; int requestLimit = limit ; queryMessagesFromServer ( startMsgId , startTS , requestLimit , null , 0 , new AVIMMessagesQueryCallback ( ) { @ Override public void done ( List < AVIMMessage > messages , AVIMException e ) { if ( e != null ) { callback . internalDone ( e ) ; } else { List < AVIMMessage > cachedMsgs = new LinkedList < AVIMMessage > ( ) ; if ( indicatorMessage != null ) { // add indicatorMessage to remove breakpoint . cachedMsgs . add ( indicatorMessage ) ; } if ( messages != null ) { cachedMsgs . addAll ( messages ) ; } processContinuousMessages ( cachedMsgs ) ; queryMessagesFromCache ( msgId , timestamp , limit , callback ) ; } } } ) ; } else { // 本地缓存过而且不是breakPoint storage . getMessages ( msgId , timestamp , limit , conversationId , new AVIMMessageStorage . StorageQueryCallback ( ) { @ Override public void done ( List < AVIMMessage > messages , List < Boolean > breakpoints ) { processStorageQueryResult ( messages , breakpoints , msgId , timestamp , limit , callback ) ; } } ) ; } } } ) ;
public class WaitQueue { /** * Signal one waiting thread */ public boolean signal ( ) { } }
if ( ! hasWaiters ( ) ) return false ; while ( true ) { RegisteredSignal s = queue . poll ( ) ; if ( s == null || s . signal ( ) != null ) return s != null ; }
public class MetaTypeRegistry { /** * - - - old api */ RegistryEntry getRegistryEntry ( ConfigElement element ) { } }
if ( element . getParent ( ) == null || element . childAttributeName == null ) return getRegistryEntryByPidOrAlias ( element . getNodeName ( ) ) ; Map < String , RegistryEntry > parentMap = childAliasMap . get ( element . childAttributeName ) ; if ( parentMap == null ) return null ; return parentMap . get ( element . getParent ( ) . getNodeName ( ) ) ;
public class ExpressionEvaluatorManager { /** * Invokes the evaluate ( ) method on the " active " ExpressionEvaluator for the given pageContext . */ public static Object evaluate ( String attributeName , String expression , Class expectedType , Tag tag , PageContext pageContext ) throws JspException { } }
// delegate the call return ( EVALUATOR . evaluate ( attributeName , expression , expectedType , tag , pageContext ) ) ;
public class ImageComponent { /** * Preforms all necessary actions to ensure that the viewer is resized to its proper size . It does that by invoking * { @ code validate ( ) } on the viewer ' s validateRoot . It also issues a { @ code repaint ( ) } . */ private void resizeNow ( ) { } }
invalidate ( ) ; // find the validate root ; adapted from the package - private SwingUtilities . getValidateRoot Container root = null ; Container c = this ; for ( ; c != null ; c = c . getParent ( ) ) { if ( ! c . isDisplayable ( ) || c instanceof CellRendererPane ) { return ; } if ( c . isValidateRoot ( ) ) { root = c ; break ; } } if ( root == null ) return ; for ( ; c != null ; c = c . getParent ( ) ) { if ( ! c . isDisplayable ( ) || ! c . isVisible ( ) ) { return ; } if ( c instanceof Window || c instanceof Applet ) { break ; } } if ( c == null ) return ; root . validate ( ) ; repaint ( ) ;
public class HttpServer { /** * Creates a new downstream connection as { @ link LinkedIOSubchannel } * of the network connection , a { @ link HttpRequestDecoder } and a * { @ link HttpResponseEncoder } . * @ param event * the accepted event */ @ Handler ( channels = NetworkChannel . class ) public void onAccepted ( Accepted event , IOSubchannel netChannel ) { } }
new WebAppMsgChannel ( event , netChannel ) ;
public class TCPChannelFactory { /** * @ see com . ibm . wsspi . channelfw . ChannelFactory # destroy ( ) */ public void destroy ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "destroy" ) ; } // Go through the hashtable and destroy all long - lived , single instance , // resources related to a given TCP Channel type ( NIO , AIO , etc . . . ) for ( ChannelTermination ct : this . terminationList . values ( ) ) { ct . terminate ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "destroy" ) ; }
public class coparameter { /** * Use this API to unset the properties of coparameter resource . * Properties that need to be unset are specified in args array . */ public static base_response unset ( nitro_service client , coparameter resource , String [ ] args ) throws Exception { } }
coparameter unsetresource = new coparameter ( ) ; return unsetresource . unset_resource ( client , args ) ;
public class AuthenticationProtocol { /** * Get a list of available authentication methods for the user . It is * advisable to call this method after contsructing the protocol instance * and setting a < a href = " BannerDisplay . html " > BannerDisplay < / a > . If the * server has a banner message to display it is most likely that the server * will send it before completing this list . * @ param username * @ param servicename * @ return a comma delimited list of authentication methods . * @ throws SshException */ public String getAuthenticationMethods ( String username , String servicename ) throws SshException { } }
sendRequest ( username , servicename , "none" , null ) ; try { readMessage ( ) ; transport . disconnect ( TransportProtocol . PROTOCOL_ERROR , "Unexpected response received from Authentication Protocol" ) ; throw new SshException ( "Unexpected response received from Authentication Protocol" , SshException . PROTOCOL_VIOLATION ) ; } catch ( AuthenticationResult result ) { state = result . getResult ( ) ; EventServiceImplementation . getInstance ( ) . fireEvent ( ( new Event ( this , J2SSHEventCodes . EVENT_AUTHENTICATION_METHODS_RECEIVED , true ) ) . addAttribute ( J2SSHEventCodes . ATTRIBUTE_AUTHENTICATION_METHODS , result . getAuthenticationMethods ( ) ) ) ; return result . getAuthenticationMethods ( ) ; }
public class GraphInferenceGrpcClient { /** * This method is suited for use of custom OperandsAdapters * @ param adapter * @ param < T > * @ return */ public < T > T output ( long graphId , T value , OperandsAdapter < T > adapter ) { } }
return adapter . output ( this . output ( graphId , adapter . input ( value ) ) ) ;
public class Packer { /** * Put native long in variable length format ( support negative value , but size is longer ) * @ param value * @ return * @ see # getVLong ( ) */ public Packer putVLong ( long value ) { } }
ensureCapacity ( bufPosition + 8 + 2 ) ; // org . apache . lucene . util . packed . AbstractBlockPackedWriter int i = 0 ; while ( ( ( value & ~ 0x7FL ) != 0L ) && ( ( i += 7 ) < 64 ) ) { buf [ bufPosition ++ ] = ( ( byte ) ( ( value & 0x7FL ) | 0x80L ) ) ; value >>>= 7 ; } buf [ bufPosition ++ ] = ( ( byte ) value ) ; return this ;
public class BulkProcessor { /** * Logs flushing messages and performs backoff waiting if there is a wait time for retry . */ private void initFlushOperation ( String bulkLoggingID , boolean retryOperation , long retriedDocs , long waitTime ) { } }
if ( retryOperation ) { if ( waitTime > 0L ) { debugLog ( bulkLoggingID , "Retrying [%d] entries after backing off for [%s] ms" , retriedDocs , TimeValue . timeValueMillis ( waitTime ) ) ; try { Thread . sleep ( waitTime ) ; } catch ( InterruptedException e ) { debugLog ( bulkLoggingID , "Thread interrupted - giving up on retrying..." ) ; throw new EsHadoopException ( "Thread interrupted - giving up on retrying..." , e ) ; } } else { debugLog ( bulkLoggingID , "Retrying [%d] entries immediately (without backoff)" , retriedDocs ) ; } } else { debugLog ( bulkLoggingID , "Sending batch of [%d] bytes/[%s] entries" , data . length ( ) , dataEntries ) ; }
public class Smb2CloseRequest { /** * { @ inheritDoc } * @ see jcifs . internal . smb2 . ServerMessageBlock2 # createResponse ( jcifs . Configuration , * jcifs . internal . smb2 . ServerMessageBlock2) */ @ Override protected Smb2CloseResponse createResponse ( CIFSContext tc , ServerMessageBlock2Request < Smb2CloseResponse > req ) { } }
return new Smb2CloseResponse ( tc . getConfig ( ) , this . fileId , this . fileName ) ;
public class CmsGitCheckin { /** * Adds a module to the modules that should be exported . * If called at least once , the explicitly added modules will be exported * instead of the default modules . * @ param moduleName the name of the module to export . */ public void addModuleToExport ( final String moduleName ) { } }
if ( m_modulesToExport == null ) { m_modulesToExport = new HashSet < String > ( ) ; } m_modulesToExport . add ( moduleName ) ;