signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class DSClientFactory { /** * Gets the custom retry policy . * @ param props * the props * @ return the custom retry policy * @ throws ClassNotFoundException * @ throws IllegalAccessException * @ throws NoSuchMethodException * @ throws InvocationTargetException * @ throws Exception * the exception */ private com . datastax . driver . core . policies . RetryPolicy getCustomRetryPolicy ( Properties props ) { } }
String customRetryPolicy = ( String ) props . get ( CUSTOM_RETRY_POLICY ) ; Class < ? > clazz = null ; Method getter = null ; try { clazz = Class . forName ( customRetryPolicy ) ; com . datastax . driver . core . policies . RetryPolicy retryPolicy = ( com . datastax . driver . core . policies . RetryPolicy ) KunderaCoreUtils . createNewInstance ( clazz ) ; if ( retryPolicy != null ) { return retryPolicy ; } getter = clazz . getDeclaredMethod ( GET_INSTANCE ) ; return ( com . datastax . driver . core . policies . RetryPolicy ) getter . invoke ( null , ( Object [ ] ) null ) ; } catch ( ClassNotFoundException e ) { logger . error ( e . getMessage ( ) ) ; throw new KunderaException ( "Please make sure class " + customRetryPolicy + " set in property file exists in classpath " + e . getMessage ( ) ) ; } catch ( IllegalAccessException e ) { logger . error ( e . getMessage ( ) ) ; throw new KunderaException ( "Method " + getter . getName ( ) + " must be declared public " + e . getMessage ( ) ) ; } catch ( NoSuchMethodException e ) { logger . error ( e . getMessage ( ) ) ; throw new KunderaException ( "Please make sure getter method of " + clazz . getSimpleName ( ) + " is named \"getInstance()\"" ) ; } catch ( InvocationTargetException e ) { logger . error ( e . getMessage ( ) ) ; throw new KunderaException ( "Error while executing \"getInstance()\" method of Class " + clazz . getSimpleName ( ) + ": " + e . getMessage ( ) ) ; }
public class AmazonElasticFileSystemClient { /** * Enables lifecycle management by creating a new < code > LifecycleConfiguration < / code > object . A * < code > LifecycleConfiguration < / code > object defines when files in an Amazon EFS file system are automatically * transitioned to the lower - cost EFS Infrequent Access ( IA ) storage class . A < code > LifecycleConfiguration < / code > * applies to all files in a file system . * Each Amazon EFS file system supports one lifecycle configuration , which applies to all files in the file system . * If a < code > LifecycleConfiguration < / code > object already exists for the specified file system , a * < code > PutLifecycleConfiguration < / code > call modifies the existing configuration . A * < code > PutLifecycleConfiguration < / code > call with an empty < code > LifecyclePolicies < / code > array in the request * body deletes any existing < code > LifecycleConfiguration < / code > and disables lifecycle management . * < note > * You can enable lifecycle management only for EFS file systems created after the release of EFS infrequent access . * < / note > * In the request , specify the following : * < ul > * < li > * The ID for the file system for which you are creating a lifecycle management configuration . * < / li > * < li > * A < code > LifecyclePolicies < / code > array of < code > LifecyclePolicy < / code > objects that define when files are moved * to the IA storage class . The array can contain only one < code > " TransitionToIA " : " AFTER _ 30 _ DAYS " < / code > * < code > LifecyclePolicy < / code > item . * < / li > * < / ul > * This operation requires permissions for the < code > elasticfilesystem : PutLifecycleConfiguration < / code > operation . * To apply a < code > LifecycleConfiguration < / code > object to an encrypted file system , you need the same AWS Key * Management Service ( AWS KMS ) permissions as when you created the encrypted file system . * @ param putLifecycleConfigurationRequest * @ return Result of the PutLifecycleConfiguration operation returned by the service . * @ throws BadRequestException * Returned if the request is malformed or contains an error such as an invalid parameter value or a missing * required parameter . * @ throws InternalServerErrorException * Returned if an error occurred on the server side . * @ throws FileSystemNotFoundException * Returned if the specified < code > FileSystemId < / code > value doesn ' t exist in the requester ' s AWS account . * @ throws IncorrectFileSystemLifeCycleStateException * Returned if the file system ' s lifecycle state is not " available " . * @ sample AmazonElasticFileSystem . PutLifecycleConfiguration * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / elasticfilesystem - 2015-02-01 / PutLifecycleConfiguration " * target = " _ top " > AWS API Documentation < / a > */ @ Override public PutLifecycleConfigurationResult putLifecycleConfiguration ( PutLifecycleConfigurationRequest request ) { } }
request = beforeClientExecution ( request ) ; return executePutLifecycleConfiguration ( request ) ;
public class FastUnsafeOffHeapMemoryInitialiser { /** * protected for test purposes ( this is also why the method cannot be static ) */ protected void unsafeZeroMemoryOptimisedForSmallBuffer ( long address , long sizeInBytes ) { } }
long endAddress = address + sizeInBytes ; long endOfLastLong = address + ( ( sizeInBytes >> 3 ) << 3 ) ; for ( long i = address ; i < endOfLastLong ; i += 8L ) { UNSAFE . putLong ( i , 0L ) ; } for ( long i = endOfLastLong ; i < endAddress ; i ++ ) { UNSAFE . putByte ( i , ( byte ) 0 ) ; }
public class ServiceStatistics { /** * Returns < code > true < / code > if the given < code > throwable < / code > or one of * its cause is an instance of one of the given < code > throwableTypes < / code > . */ public static boolean containsThrowableOfType ( Throwable throwable , Class < ? > ... throwableTypes ) { } }
List < Throwable > alreadyProcessedThrowables = new ArrayList < Throwable > ( ) ; while ( true ) { if ( throwable == null ) { // end of the list of causes return false ; } else if ( alreadyProcessedThrowables . contains ( throwable ) ) { // infinite loop in causes return false ; } else { for ( Class < ? > throwableType : throwableTypes ) { if ( throwableType . isAssignableFrom ( throwable . getClass ( ) ) ) { return true ; } } alreadyProcessedThrowables . add ( throwable ) ; throwable = throwable . getCause ( ) ; } }
public class PriorityQueue { /** * Reconstitutes the { @ code PriorityQueue } instance from a stream * ( that is , deserializes it ) . * @ param s the stream * @ throws ClassNotFoundException if the class of a serialized object * could not be found * @ throws java . io . IOException if an I / O error occurs */ private void readObject ( java . io . ObjectInputStream s ) throws java . io . IOException , ClassNotFoundException { } }
// Read in size , and any hidden stuff s . defaultReadObject ( ) ; // Read in ( and discard ) array length s . readInt ( ) ; queue = new Object [ size ] ; // Read in all elements . for ( int i = 0 ; i < size ; i ++ ) queue [ i ] = s . readObject ( ) ; // Elements are guaranteed to be in " proper order " , but the // spec has never explained what that might be . heapify ( ) ;
public class CmsClientSitemapEntry { /** * Updates all entry properties apart from it ' s position - info and sub - entries . < p > * @ param source the source entry to update from */ public void update ( CmsClientSitemapEntry source ) { } }
copyMembers ( source ) ; // position values < 0 are considered as not set if ( source . getPosition ( ) >= 0 ) { setPosition ( source . getPosition ( ) ) ; }
public class PrcCheckOut { /** * < p > Retrieve new orders to redone . < / p > * @ param pRqVs request scoped vars * @ param pBur buyer * @ return purchase with new orders to redone * @ throws Exception - an exception */ public final Purch retNewOrds ( final Map < String , Object > pRqVs , final OnlineBuyer pBur ) throws Exception { } }
Set < String > ndFl = new HashSet < String > ( ) ; ndFl . add ( "itsId" ) ; ndFl . add ( "itsVersion" ) ; String tbn = CustOrder . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; List < CustOrder > orders = getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CustOrder . class , "where STAT=0 and BUYER=" + pBur . getItsId ( ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; for ( CustOrder cuOr : orders ) { // redo all lines : // itsOwner and other data will be set farther only for used lines ! ! ! // unused lines will be removed from DB tbn = CustOrderTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setTaxes ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CustOrderTxLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CustOrderGdLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setGoods ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CustOrderGdLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CustOrderSrvLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setServs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CustOrderSrvLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrGdTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; for ( CustOrderGdLn gl : cuOr . getGoods ( ) ) { gl . setItTxs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrGdTxLn . class , "where ITSOWNER=" + gl . getItsId ( ) ) ) ; } pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrSrTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; for ( CustOrderSrvLn sl : cuOr . getServs ( ) ) { sl . setItTxs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSrTxLn . class , "where ITSOWNER=" + sl . getItsId ( ) ) ) ; } pRqVs . remove ( tbn + "neededFields" ) ; } tbn = CuOrSe . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; List < CuOrSe > sorders = getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSe . class , "where STAT=0 and BUYER=" + pBur . getItsId ( ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; for ( CuOrSe cuOr : sorders ) { tbn = CuOrSeTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setTaxes ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSeTxLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrSeGdLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setGoods ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSeGdLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrSeSrLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; cuOr . setServs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSeSrLn . class , "where ITSOWNER=" + cuOr . getItsId ( ) ) ) ; pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrSeGdTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; for ( CuOrSeGdLn gl : cuOr . getGoods ( ) ) { gl . setItTxs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSeGdTxLn . class , "where ITSOWNER=" + gl . getItsId ( ) ) ) ; } pRqVs . remove ( tbn + "neededFields" ) ; tbn = CuOrSeSrTxLn . class . getSimpleName ( ) ; pRqVs . put ( tbn + "neededFields" , ndFl ) ; for ( CuOrSeSrLn sl : cuOr . getServs ( ) ) { sl . setItTxs ( getSrvOrm ( ) . retrieveListWithConditions ( pRqVs , CuOrSeSrTxLn . class , "where ITSOWNER=" + sl . getItsId ( ) ) ) ; } pRqVs . remove ( tbn + "neededFields" ) ; } Purch pur = new Purch ( ) ; pur . setOrds ( orders ) ; pur . setSords ( sorders ) ; return pur ;
public class Vector3d { /** * / * ( non - Javadoc ) * @ see org . joml . Vector3dc # reflect ( org . joml . Vector3dc , org . joml . Vector3d ) */ public Vector3d reflect ( Vector3dc normal , Vector3d dest ) { } }
return reflect ( normal . x ( ) , normal . y ( ) , normal . z ( ) , dest ) ;
public class InitReactorRunner { /** * Aggregates all the listeners into one and returns it . * At this point plugins are not loaded yet , so we fall back to the META - INF / services look up to discover implementations . * As such there ' s no way for plugins to participate into this process . */ private ReactorListener buildReactorListener ( ) throws IOException { } }
List < ReactorListener > r = Lists . newArrayList ( ServiceLoader . load ( InitReactorListener . class , Thread . currentThread ( ) . getContextClassLoader ( ) ) ) ; r . add ( new ReactorListener ( ) { final Level level = Level . parse ( Configuration . getStringConfigParameter ( "initLogLevel" , "FINE" ) ) ; public void onTaskStarted ( Task t ) { LOGGER . log ( level , "Started {0}" , getDisplayName ( t ) ) ; } public void onTaskCompleted ( Task t ) { LOGGER . log ( level , "Completed {0}" , getDisplayName ( t ) ) ; } public void onTaskFailed ( Task t , Throwable err , boolean fatal ) { LOGGER . log ( SEVERE , "Failed " + getDisplayName ( t ) , err ) ; } public void onAttained ( Milestone milestone ) { Level lv = level ; String s = "Attained " + milestone . toString ( ) ; if ( milestone instanceof InitMilestone ) { lv = Level . INFO ; // noteworthy milestones - - - at least while we debug problems further onInitMilestoneAttained ( ( InitMilestone ) milestone ) ; s = milestone . toString ( ) ; } LOGGER . log ( lv , s ) ; } } ) ; return new ReactorListener . Aggregator ( r ) ;
public class ProjectFile { /** * Find the earliest task start date . We treat this as the * start date for the project . * @ return start date */ public Date getStartDate ( ) { } }
Date startDate = null ; for ( Task task : m_tasks ) { // If a hidden " summary " task is present we ignore it if ( NumberHelper . getInt ( task . getUniqueID ( ) ) == 0 ) { continue ; } // Select the actual or forecast start date . Note that the // behaviour is different for milestones . The milestone end date // is always correct , the milestone start date may be different // to reflect a missed deadline . Date taskStartDate ; if ( task . getMilestone ( ) == true ) { taskStartDate = task . getActualFinish ( ) ; if ( taskStartDate == null ) { taskStartDate = task . getFinish ( ) ; } } else { taskStartDate = task . getActualStart ( ) ; if ( taskStartDate == null ) { taskStartDate = task . getStart ( ) ; } } if ( taskStartDate != null ) { if ( startDate == null ) { startDate = taskStartDate ; } else { if ( taskStartDate . getTime ( ) < startDate . getTime ( ) ) { startDate = taskStartDate ; } } } } return ( startDate ) ;
public class KDTree { /** * Assigns instances to centers using KDTree . * @ param centersthe current centers * @ param assignmentsthe centerindex for each instance * @ param pcthe threshold value for pruning . * @ throws Exception If there is some problem * assigning instances to centers . */ public void centerInstances ( Instances centers , int [ ] assignments , double pc ) throws Exception { } }
int [ ] centList = new int [ centers . numInstances ( ) ] ; for ( int i = 0 ; i < centers . numInstances ( ) ; i ++ ) centList [ i ] = i ; determineAssignments ( m_Root , centers , centList , assignments , pc ) ;
public class ELKIServiceRegistry { /** * Try loading alternative names . * @ param restrictionClass Context class , for prepending a package name . * @ param value Class name requested * @ param e Cache entry , may be null * @ param < C > Generic type * @ return Class , or null */ private static < C > Class < ? > tryAlternateNames ( Class < ? super C > restrictionClass , String value , Entry e ) { } }
StringBuilder buf = new StringBuilder ( value . length ( ) + 100 ) ; // Try with FACTORY _ POSTFIX first : Class < ? > clazz = tryLoadClass ( buf . append ( value ) . append ( FACTORY_POSTFIX ) . toString ( ) ) ; if ( clazz != null ) { return clazz ; } clazz = tryLoadClass ( value ) ; // Without FACTORY _ POSTFIX . if ( clazz != null ) { return clazz ; } buf . setLength ( 0 ) ; // Try prepending the package name : clazz = tryLoadClass ( buf . append ( restrictionClass . getPackage ( ) . getName ( ) ) . append ( '.' ) . append ( value ) . append ( FACTORY_POSTFIX ) . toString ( ) ) ; if ( clazz != null ) { return clazz ; } // Remove FACTORY _ POSTFIX again . buf . setLength ( buf . length ( ) - FACTORY_POSTFIX . length ( ) ) ; String value2 = buf . toString ( ) ; // Will also be used below . clazz = tryLoadClass ( value2 ) ; if ( clazz != null ) { return clazz ; } // Last , try aliases : if ( e != null && e . aliaslen > 0 ) { for ( int i = 0 ; i < e . aliaslen ; i += 2 ) { if ( e . aliases [ i ] . equalsIgnoreCase ( value ) || e . aliases [ i ] . equalsIgnoreCase ( value2 ) ) { return findImplementation ( restrictionClass , e . aliases [ ++ i ] ) ; } } } return null ;
public class BinaryJedis { /** * Set a timeout on the specified key . After the timeout the key will be automatically deleted by * the server . A key with an associated timeout is said to be volatile in Redis terminology . * Volatile keys are stored on disk like the other keys , the timeout is persistent too like all the * other aspects of the dataset . Saving a dataset containing expires and stopping the server does * not stop the flow of time as Redis stores on disk the time when the key will no longer be * available as Unix time , and not the remaining milliseconds . * Since Redis 2.1.3 you can update the value of the timeout of a key already having an expire * set . It is also possible to undo the expire at all turning the key into a normal key using the * { @ link # persist ( byte [ ] ) PERSIST } command . * Time complexity : O ( 1) * @ see < a href = " http : / / redis . io / commands / pexpire " > PEXPIRE Command < / a > * @ param key * @ param milliseconds * @ return Integer reply , specifically : 1 : the timeout was set . 0 : the timeout was not set since * the key already has an associated timeout ( this may happen only in Redis versions < * 2.1.3 , Redis > = 2.1.3 will happily update the timeout ) , or the key does not exist . */ @ Override public Long pexpire ( final byte [ ] key , final long milliseconds ) { } }
checkIsInMultiOrPipeline ( ) ; client . pexpire ( key , milliseconds ) ; return client . getIntegerReply ( ) ;
public class TheMovieDbApi { /** * Get the images that have been tagged with a specific person id . * We return all of the image results with a media object mapped for each * image . * @ param personId personId * @ param page page * @ param language language * @ return * @ throws com . omertron . themoviedbapi . MovieDbException */ public ResultList < ArtworkMedia > getPersonTaggedImages ( int personId , Integer page , String language ) throws MovieDbException { } }
return tmdbPeople . getPersonTaggedImages ( personId , page , language ) ;
public class AipFace { /** * 获取用户列表接口 * @ param groupId - 用户组id ( 由数字 、 字母 、 下划线组成 ) , 长度限制128B * @ param options - 可选参数对象 , key : value都为string类型 * options - options列表 : * start 默认值0 , 起始序号 * length 返回数量 , 默认值100 , 最大值1000 * @ return JSONObject */ public JSONObject getGroupUsers ( String groupId , HashMap < String , String > options ) { } }
AipRequest request = new AipRequest ( ) ; preOperation ( request ) ; request . addBody ( "group_id" , groupId ) ; if ( options != null ) { request . addBody ( options ) ; } request . setUri ( FaceConsts . GROUP_GETUSERS ) ; request . setBodyFormat ( EBodyFormat . RAW_JSON ) ; postOperation ( request ) ; return requestServer ( request ) ;
public class SmileIOUtil { /** * Creates a { @ link SmileParser } from the inputstream with the supplied buf { @ code inBuffer } to use . */ public static SmileParser newSmileParser ( InputStream in , byte [ ] buf , int offset , int limit ) throws IOException { } }
return newSmileParser ( in , buf , offset , limit , false , new IOContext ( DEFAULT_SMILE_FACTORY . _getBufferRecycler ( ) , in , false ) ) ;
public class StampedCommonCache { /** * { @ inheritDoc } */ @ Override public V get ( final Object key ) { } }
return doWithReadLock ( c -> c . get ( key ) ) ;
public class LineNumberingClassAdapter { /** * Visits the specified method , adding line numbering . */ @ Override public MethodVisitor visitMethod ( int access , final String name , String desc , String signature , String [ ] exceptions ) { } }
MethodVisitor mv = cv . visitMethod ( access | Opcodes . ACC_SYNTHETIC , name , desc , signature , exceptions ) ; return new LineNumberingMethodAdapter ( mv , access | Opcodes . ACC_SYNTHETIC , name , desc ) { @ Override protected void onMethodEnter ( ) { this . lineNumbers = LineNumberingClassAdapter . this . lineNumbers ; super . onMethodEnter ( ) ; } } ;
public class ReduceOps { /** * Constructs a { @ code TerminalOp } that implements a mutable reduce on * { @ code double } values . * @ param < R > the type of the result * @ param supplier a factory to produce a new accumulator of the result type * @ param accumulator a function to incorporate an int into an * accumulator * @ param combiner a function to combine an accumulator into another * @ return a { @ code TerminalOp } implementing the reduction */ public static < R > TerminalOp < Double , R > makeDouble ( Supplier < R > supplier , ObjDoubleConsumer < R > accumulator , BinaryOperator < R > combiner ) { } }
Objects . requireNonNull ( supplier ) ; Objects . requireNonNull ( accumulator ) ; Objects . requireNonNull ( combiner ) ; class ReducingSink extends Box < R > implements AccumulatingSink < Double , R , ReducingSink > , Sink . OfDouble { @ Override public void begin ( long size ) { state = supplier . get ( ) ; } @ Override public void accept ( double t ) { accumulator . accept ( state , t ) ; } @ Override public void combine ( ReducingSink other ) { state = combiner . apply ( state , other . state ) ; } } return new ReduceOp < Double , R , ReducingSink > ( StreamShape . DOUBLE_VALUE ) { @ Override public ReducingSink makeSink ( ) { return new ReducingSink ( ) ; } } ;
public class DataSource { /** * Returns the { @ link org . apache . flink . api . java . io . SplitDataProperties } for the * { @ link org . apache . flink . core . io . InputSplit } s of this DataSource * for configurations . * < p > SplitDataProperties can help to generate more efficient execution plans . * < p > < b > * IMPORTANT : Incorrect configuration of SplitDataProperties can cause wrong results ! * @ return The SplitDataProperties for the InputSplits of this DataSource . */ @ PublicEvolving public SplitDataProperties < OUT > getSplitDataProperties ( ) { } }
if ( this . splitDataProperties == null ) { this . splitDataProperties = new SplitDataProperties < OUT > ( this ) ; } return this . splitDataProperties ;
public class MongoDBCallback { /** * / * tests : * names at different depths * maps w / other objects as keys * relatedMongoObjectMap relatedMongoObjectMap Map < > * relatedMongoObjectMap . foo relatedMongoObjectMap . RelatedObject * nestedSimpleMongoObject nestedSimpleMongoObject SimpleObject * nestedSimpleMongoObject . relatedMongoObjectMap nestedSimpleMongoObject . relatedObjectMap Map < > * nestedSimpleMongoObject . relatedMongoObjectMap . bar nestedSimpleMongoObject . relatedObjectMap . RelatedObject */ private Class deriveClass1 ( String path , String lastPathPart , boolean array ) { } }
Class containerClass ; if ( path . equals ( lastPathPart ) ) { containerClass = rootClass ; } else { containerClass = classCache . get ( rootClass . getSimpleName ( ) + "." + path . substring ( 0 , path . lastIndexOf ( '.' ) ) ) ; } if ( containerClass != null && DBObject . class . isAssignableFrom ( containerClass ) ) { try { Method m = containerClass . getMethod ( "__getPreEnhancedClass" ) ; containerClass = ( Class ) m . invoke ( null ) ; } catch ( Exception e ) { logger . warn ( "DBObject without __getPreEnhancedClass() method. Was the container class enhanced?" ) ; } } // If we don ' t have a container class at this point , we are in a part of the result document that // does not correspond to the object model . Return a basic MongoDB object . if ( containerClass == null ) { return array ? BasicDBList . class : BasicDBObject . class ; } Method getter ; try { // noinspection ConstantConditions getter = containerClass . getMethod ( "get" + Character . toUpperCase ( lastPathPart . charAt ( 0 ) ) + lastPathPart . substring ( 1 ) ) ; } catch ( NoSuchMethodException e ) { if ( ! EXPLAIN_PATHS_TO_IGNORE . contains ( lastPathPart ) && ! lastPathPart . startsWith ( "__" ) ) { logger . warn ( "No getter for: {} ({})" , lastPathPart , e . getMessage ( ) ) ; } return array ? BasicDBList . class : BasicDBObject . class ; } Type returnType = getter . getGenericReturnType ( ) ; String qualifiedPath = rootClass . getSimpleName ( ) + "." + path ; if ( Class . class . isAssignableFrom ( returnType . getClass ( ) ) ) { // noinspection unchecked Class enhancedClass = EnhancerHelper . getDirtyableDBObjectEnhancer ( ( Class ) returnType ) . getEnhancedClass ( ) ; classCache . put ( qualifiedPath , enhancedClass ) ; return enhancedClass ; } else if ( ParameterizedType . class . isAssignableFrom ( returnType . getClass ( ) ) ) { ParameterizedType parameterizedType = ( ParameterizedType ) returnType ; Class rawType = ( Class ) parameterizedType . getRawType ( ) ; Class rawClass ; Class enhancedClass ; if ( Map . class . isAssignableFrom ( rawType ) ) { rawClass = ( Class ) parameterizedType . getActualTypeArguments ( ) [ 1 ] ; } else if ( Iterable . class . isAssignableFrom ( rawType ) ) { rawClass = ( Class ) parameterizedType . getActualTypeArguments ( ) [ 0 ] ; } else { throw new RuntimeException ( "unknown type: " + rawType ) ; } classCache . put ( qualifiedPath , rawType ) ; if ( ! DBObjectUtil . needsNoConversion ( rawClass ) ) { // noinspection unchecked enhancedClass = EnhancerHelper . getDirtyableDBObjectEnhancer ( rawClass ) . getEnhancedClass ( ) ; classCache . put ( qualifiedPath + "." , enhancedClass ) ; } return rawType ; } else { throw new RuntimeException ( "Don't know how to handle: " + qualifiedPath ) ; }
public class CorporationApi { /** * Get all corporation ALSC logs ( asynchronously ) Returns logs recorded in * the past seven days from all audit log secure containers ( ALSC ) owned by * a given corporation - - - This route is cached for up to 600 seconds - - - * Requires one of the following EVE corporation role ( s ) : Director SSO * Scope : esi - corporations . read _ container _ logs . v1 * @ param corporationId * An EVE corporation ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ param callback * The callback to be executed when the API call finishes * @ return The request call * @ throws ApiException * If fail to process the API call , e . g . serializing the request * body object */ public com . squareup . okhttp . Call getCorporationsCorporationIdContainersLogsAsync ( Integer corporationId , String datasource , String ifNoneMatch , Integer page , String token , final ApiCallback < List < CorporationContainersLogsResponse > > callback ) throws ApiException { } }
com . squareup . okhttp . Call call = getCorporationsCorporationIdContainersLogsValidateBeforeCall ( corporationId , datasource , ifNoneMatch , page , token , callback ) ; Type localVarReturnType = new TypeToken < List < CorporationContainersLogsResponse > > ( ) { } . getType ( ) ; apiClient . executeAsync ( call , localVarReturnType , callback ) ; return call ;
public class LBufferAPI { /** * Read the given source byte array , then overwrite this buffer ' s contents * @ param src source byte array * @ param srcOffset offset in source byte array to read from * @ param destOffset offset in this buffer to read to * @ param length max number of bytes to read * @ return the number of bytes read */ public int readFrom ( byte [ ] src , int srcOffset , long destOffset , int length ) { } }
int readLen = ( int ) Math . min ( src . length - srcOffset , Math . min ( size ( ) - destOffset , length ) ) ; ByteBuffer b = toDirectByteBuffer ( destOffset , readLen ) ; b . position ( 0 ) ; b . put ( src , srcOffset , readLen ) ; return readLen ;
public class HumanTaskConfig { /** * Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task . * @ param taskKeywords * Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task . */ public void setTaskKeywords ( java . util . Collection < String > taskKeywords ) { } }
if ( taskKeywords == null ) { this . taskKeywords = null ; return ; } this . taskKeywords = new java . util . ArrayList < String > ( taskKeywords ) ;
public class system { /** * Toggles the SoftKeyboard Input be careful where you call this from as if you want to * hide the keyboard and its already hidden it will be shown */ public static void toggleKeyboard ( ) { } }
InputMethodManager imm = ( ( InputMethodManager ) QuickUtils . getContext ( ) . getSystemService ( Context . INPUT_METHOD_SERVICE ) ) ; imm . toggleSoftInput ( 0 , 0 ) ;
public class LocalizationMessages { /** * Unknown parameter ( s ) for { 0 } . { 1 } method annotated with @ OnError annotation : { 2 } . This method will be ignored . */ public static String ENDPOINT_UNKNOWN_PARAMS ( Object arg0 , Object arg1 , Object arg2 ) { } }
return localizer . localize ( localizableENDPOINT_UNKNOWN_PARAMS ( arg0 , arg1 , arg2 ) ) ;
public class FuncSystemProperty { /** * Retrieve a propery bundle from a specified file * @ param file The string name of the property file . The name * should already be fully qualified as path / filename * @ param target The target property bag the file will be placed into . */ public void loadPropertyFile ( String file , Properties target ) { } }
try { // Use SecuritySupport class to provide priveleged access to property file SecuritySupport ss = SecuritySupport . getInstance ( ) ; InputStream is = ss . getResourceAsStream ( ObjectFactory . findClassLoader ( ) , file ) ; // get a buffered version BufferedInputStream bis = new BufferedInputStream ( is ) ; target . load ( bis ) ; // and load up the property bag from this bis . close ( ) ; // close out after reading } catch ( Exception ex ) { // ex . printStackTrace ( ) ; throw new org . apache . xml . utils . WrappedRuntimeException ( ex ) ; }
public class ModuleDepInfo { /** * A return value of null means that the associated module should be included in the expanded * dependency list unconditionally . A return value consisting of an empty list means that it * should not be included at all . * If the list is not empty , then the list elements are the has ! plugin prefixes that should be * used with this module . One module id per list entry specifying the same module name should be * used . * { @ link TreeSet } is used to obtain predictable ordering of terms in compound has conditionals , * mostly for unit tests . * @ param formulaCache * the formula cache * @ return The list of has ! plugin prefixes for this module . */ public Collection < String > getHasPluginPrefixes ( Map < ? , ? > formulaCache ) { } }
formula = formula . simplify ( formulaCache ) ; if ( formula . isTrue ( ) ) { return null ; } if ( formula . isFalse ( ) ) { return Collections . emptySet ( ) ; } Set < String > result = new HashSet < String > ( ) ; for ( BooleanTerm term : formula ) { StringBuffer sb = new StringBuffer ( pluginName ) . append ( "!" ) ; // $ NON - NLS - 1 $ for ( BooleanVar featureVar : new TreeSet < BooleanVar > ( term ) ) { sb . append ( featureVar . name ) . append ( "?" ) ; // $ NON - NLS - 1 $ if ( ! featureVar . state ) { sb . append ( ":" ) ; // $ NON - NLS - 1 $ } } result . add ( sb . toString ( ) ) ; } return result ;
public class MockHttpSession { /** * Serialize the attributes of this session into an object that can be turned * into a byte array with standard Java serialization . * @ return a representation of this session ' s serialized state */ @ Nonnull public Serializable serializeState ( ) { } }
final ICommonsMap < String , Object > aState = new CommonsHashMap < > ( ) ; for ( final Map . Entry < String , Object > entry : m_aAttributes . entrySet ( ) ) { final String sName = entry . getKey ( ) ; final Object aValue = entry . getValue ( ) ; if ( aValue instanceof Serializable ) { aState . put ( sName , aValue ) ; } else { // Not serializable . . . Servlet containers usually automatically // unbind the attribute in this case . if ( aValue instanceof HttpSessionBindingListener ) { ( ( HttpSessionBindingListener ) aValue ) . valueUnbound ( new HttpSessionBindingEvent ( this , sName , aValue ) ) ; } } } m_aAttributes . clear ( ) ; return aState ;
public class DatabaseStoreService { /** * Declarative Services method for setting the ResourceConfigFactory service reference * @ param ref reference to service object ; type of service object is verified */ protected void setResourceConfigFactory ( ServiceReference < ResourceConfigFactory > ref ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && LoggingUtil . SESSION_LOGGER_WAS . isLoggable ( Level . FINE ) ) { LoggingUtil . SESSION_LOGGER_WAS . logp ( Level . FINE , methodClassName , "setResourceConfigFactory" , "setting " + ref ) ; } resourceConfigFactoryRef . setReference ( ref ) ;
public class RemoteQueueSession { /** * ( non - Javadoc ) * @ see javax . jms . QueueSession # createReceiver ( javax . jms . Queue , java . lang . String ) */ @ Override public QueueReceiver createReceiver ( Queue queue , String messageSelector ) throws JMSException { } }
externalAccessLock . readLock ( ) . lock ( ) ; try { checkNotClosed ( ) ; RemoteQueueReceiver receiver = new RemoteQueueReceiver ( idProvider . createID ( ) , this , DestinationTools . asRef ( queue ) , messageSelector ) ; registerConsumer ( receiver ) ; receiver . remoteInit ( ) ; return receiver ; } finally { externalAccessLock . readLock ( ) . unlock ( ) ; }
public class Interval { /** * A { @ link Comparator } that only considers the end points of the intervals . It can not and must * not be used as a standalone { @ link Comparator } . It only serves to create a more readable and * modular code . */ private int compareEnds ( Interval < T > other ) { } }
if ( end == null && other . end == null ) return 0 ; if ( end == null ) return 1 ; if ( other . end == null ) return - 1 ; int compare = end . compareTo ( other . end ) ; if ( compare != 0 ) return compare ; if ( isEndInclusive ^ other . isEndInclusive ) return isEndInclusive ? 1 : - 1 ; return 0 ;
public class AmazonECSWaiters { /** * Builds a TasksRunning waiter by using custom parameters waiterParameters and other parameters defined in the * waiters specification , and then polls until it determines whether the resource entered the desired state or not , * where polling criteria is bound by either default polling strategy or custom polling strategy . */ public Waiter < DescribeTasksRequest > tasksRunning ( ) { } }
return new WaiterBuilder < DescribeTasksRequest , DescribeTasksResult > ( ) . withSdkFunction ( new DescribeTasksFunction ( client ) ) . withAcceptors ( new TasksRunning . IsSTOPPEDMatcher ( ) , new TasksRunning . IsMISSINGMatcher ( ) , new TasksRunning . IsRUNNINGMatcher ( ) ) . withDefaultPollingStrategy ( new PollingStrategy ( new MaxAttemptsRetryStrategy ( 100 ) , new FixedDelayStrategy ( 6 ) ) ) . withExecutorService ( executorService ) . build ( ) ;
public class GeometryMergeService { private void merge ( final GeometryFunction callback ) { } }
GeometryOperationService operationService = new GeometryOperationServiceImpl ( ) ; UnionInfo unionInfo = new UnionInfo ( ) ; unionInfo . setUsePrecisionAsBuffer ( true ) ; unionInfo . setPrecision ( precision ) ; operationService . union ( geometries , unionInfo , new Callback < Geometry , Throwable > ( ) { public void onSuccess ( Geometry result ) { callback . execute ( result ) ; } public void onFailure ( Throwable reason ) { reason . printStackTrace ( ) ; } } ) ;
public class Operation { /** * Returns an identity matrix */ public static Info eye ( final Variable A , ManagerTempVariables manager ) { } }
Info ret = new Info ( ) ; final VariableMatrix output = manager . createMatrix ( ) ; ret . output = output ; if ( A instanceof VariableMatrix ) { ret . op = new Operation ( "eye-m" ) { @ Override public void process ( ) { DMatrixRMaj mA = ( ( VariableMatrix ) A ) . matrix ; output . matrix . reshape ( mA . numRows , mA . numCols ) ; CommonOps_DDRM . setIdentity ( output . matrix ) ; } } ; } else if ( A instanceof VariableInteger ) { ret . op = new Operation ( "eye-i" ) { @ Override public void process ( ) { int N = ( ( VariableInteger ) A ) . value ; output . matrix . reshape ( N , N ) ; CommonOps_DDRM . setIdentity ( output . matrix ) ; } } ; } else { throw new RuntimeException ( "Unsupported variable type " + A ) ; } return ret ;
public class Assignment { /** * Deserializes an assignment string * e . g . [ din ] str1 - > inStr , [ din ] inStrConst = TheString , [ dout ] outStr1 - > str1 * @ param sAssignment * @ return Assignment */ public static Assignment deserialize ( AssignmentData assignmentData , String sAssignment ) { } }
if ( sAssignment == null || sAssignment . isEmpty ( ) ) { return null ; } // Parse the assignment string VariableType assignmentType = null ; if ( sAssignment . startsWith ( INPUT_ASSIGNMENT_PREFIX ) ) { assignmentType = VariableType . INPUT ; sAssignment = sAssignment . substring ( INPUT_ASSIGNMENT_PREFIX . length ( ) ) ; } else if ( sAssignment . startsWith ( OUTPUT_ASSIGNMENT_PREFIX ) ) { assignmentType = VariableType . OUTPUT ; sAssignment = sAssignment . substring ( OUTPUT_ASSIGNMENT_PREFIX . length ( ) ) ; } String variableName = null ; String processVariableName = null ; String constant = null ; if ( sAssignment . contains ( ASSIGNMENT_OPERATOR_TOVARIABLE ) ) { int i = sAssignment . indexOf ( ASSIGNMENT_OPERATOR_TOVARIABLE ) ; if ( assignmentType == VariableType . INPUT ) { processVariableName = sAssignment . substring ( 0 , i ) ; variableName = sAssignment . substring ( i + ASSIGNMENT_OPERATOR_TOVARIABLE . length ( ) ) ; } else { variableName = sAssignment . substring ( 0 , i ) ; processVariableName = sAssignment . substring ( i + ASSIGNMENT_OPERATOR_TOVARIABLE . length ( ) ) ; } } else if ( sAssignment . contains ( ASSIGNMENT_OPERATOR_TOCONSTANT ) ) { int i = sAssignment . indexOf ( ASSIGNMENT_OPERATOR_TOCONSTANT ) ; variableName = sAssignment . substring ( 0 , i ) ; constant = stringUtils . urlDecode ( sAssignment . substring ( i + ASSIGNMENT_OPERATOR_TOCONSTANT . length ( ) ) ) ; } // Create the new assignment return new Assignment ( assignmentData , variableName , assignmentType , processVariableName , constant ) ;
public class BaseProfile { /** * generate ant build . xml * @ param def Definition * @ param outputDir output directory */ void generateAntXml ( Definition def , String outputDir ) { } }
try { FileWriter antfw = Utils . createFile ( "build.xml" , outputDir ) ; BuildXmlGen bxGen = new BuildXmlGen ( ) ; bxGen . generate ( def , antfw ) ; antfw . close ( ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; }
public class CPOptionCategoryPersistenceImpl { /** * Returns all the cp option categories . * @ return the cp option categories */ @ Override public List < CPOptionCategory > findAll ( ) { } }
return findAll ( QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ;
public class ResourceBundleMessageSource { /** * Build the cache used to store resolved messages . * @ return The cache . */ @ Nonnull protected Map < MessageKey , Optional < String > > buildMessageCache ( ) { } }
return new ConcurrentLinkedHashMap . Builder < MessageKey , Optional < String > > ( ) . maximumWeightedCapacity ( 100 ) . build ( ) ;
public class Logger { /** * Logs a message and stack trace if DEBUG logging is enabled * or a formatted message and exception description if WARN logging is enabled . * @ param cause an exception to print stack trace of if DEBUG logging is enabled * @ param message a message */ public final void warnDebug ( final Throwable cause , final String message ) { } }
logDebug ( Level . WARN , cause , message ) ;
public class ArrayUtils { /** * Checks if the given array contains the specified value . < br > * This method works with strict reference comparison . * @ param < T > * Type of array elements and < code > value < / code > * @ param array * Array to examine * @ param value * Value to search * @ return < code > true < / code > if < code > array < / code > contains * < code > value < / code > , < code > false < / code > otherwise */ public static < T > boolean arrayContainsRef ( T [ ] array , T value ) { } }
for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] == value ) { return true ; } } return false ;
public class AnnotationTypeWriterImpl { /** * Add gap between navigation bar elements . * @ param liNav the content tree to which the gap will be added */ protected void addNavGap ( Content liNav ) { } }
liNav . addContent ( getSpace ( ) ) ; liNav . addContent ( "|" ) ; liNav . addContent ( getSpace ( ) ) ;
public class BigtableDataGrpcClient { /** * { @ inheritDoc } */ @ Override public ReadModifyWriteRowResponse readModifyWriteRow ( ReadModifyWriteRowRequest request ) { } }
if ( shouldOverrideAppProfile ( request . getAppProfileId ( ) ) ) { request = request . toBuilder ( ) . setAppProfileId ( clientDefaultAppProfileId ) . build ( ) ; } return createUnaryListener ( request , readWriteModifyRpc , request . getTableName ( ) ) . getBlockingResult ( ) ;
public class AuthenticationAPIClient { /** * Creates a user in a DB connection using < a href = " https : / / auth0 . com / docs / api / authentication # signup " > ' / dbconnections / signup ' endpoint < / a > * Example usage : * < pre > * { @ code * client . createUser ( " { email } " , " { password } " , " { username } " , " { database connection name } " ) * . start ( new BaseCallback < DatabaseUser > ( ) { * { @ literal } Override * public void onSuccess ( DatabaseUser payload ) { } * { @ literal } @ Override * public void onFailure ( AuthenticationException error ) { } * < / pre > * @ param email of the user and must be non null * @ param password of the user and must be non null * @ param username of the user and must be non null * @ param connection of the database to create the user on * @ return a request to start */ @ SuppressWarnings ( "WeakerAccess" ) public DatabaseConnectionRequest < DatabaseUser , AuthenticationException > createUser ( @ NonNull String email , @ NonNull String password , @ NonNull String username , @ NonNull String connection ) { } }
HttpUrl url = HttpUrl . parse ( auth0 . getDomainUrl ( ) ) . newBuilder ( ) . addPathSegment ( DB_CONNECTIONS_PATH ) . addPathSegment ( SIGN_UP_PATH ) . build ( ) ; final Map < String , Object > parameters = ParameterBuilder . newBuilder ( ) . set ( USERNAME_KEY , username ) . set ( EMAIL_KEY , email ) . set ( PASSWORD_KEY , password ) . setConnection ( connection ) . setClientId ( getClientId ( ) ) . asDictionary ( ) ; final ParameterizableRequest < DatabaseUser , AuthenticationException > request = factory . POST ( url , client , gson , DatabaseUser . class , authErrorBuilder ) . addParameters ( parameters ) ; return new DatabaseConnectionRequest < > ( request ) ;
public class ConfigUtils { /** * Method is used to return an array of bytes representing the password stored in the config . * @ param config Config to read from * @ param key Key to read from * @ return byte array containing the password */ public static byte [ ] passwordBytes ( AbstractConfig config , String key ) { } }
return passwordBytes ( config , key , Charsets . UTF_8 ) ;
public class IntList { /** * Creates and returns an unmodifiable view of the given int array that * requires only a small object allocation . * @ param array the array to wrap into an unmodifiable list * @ param length the number of values of the array to use , starting from zero * @ return an unmodifiable list view of the array */ public static List < Integer > unmodifiableView ( int [ ] array , int length ) { } }
return Collections . unmodifiableList ( view ( array , length ) ) ;
public class Logo { /** * { @ inheritDoc } */ @ Override public void deserialize ( String jsonString ) { } }
final GsonBuilder builder = new GsonBuilder ( ) ; builder . excludeFieldsWithoutExposeAnnotation ( ) ; final Gson gson = builder . create ( ) ; Logo w = gson . fromJson ( jsonString , Logo . class ) ; this . nid = w . nid ; this . brand = w . brand ;
public class AmazonConnectClient { /** * Returns a < code > UserHierarchyGroupSummaryList < / code > , which is an array of < code > HierarchyGroupSummary < / code > * objects that contain information about the hierarchy groups in your instance . * @ param listUserHierarchyGroupsRequest * @ return Result of the ListUserHierarchyGroups operation returned by the service . * @ throws InvalidRequestException * The request is not valid . * @ throws InvalidParameterException * One or more of the parameters provided to the operation are not valid . * @ throws ResourceNotFoundException * The specified resource was not found . * @ throws ThrottlingException * The throttling limit has been exceeded . * @ throws InternalServiceException * Request processing failed due to an error or failure with the service . * @ sample AmazonConnect . ListUserHierarchyGroups * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / connect - 2017-08-08 / ListUserHierarchyGroups " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ListUserHierarchyGroupsResult listUserHierarchyGroups ( ListUserHierarchyGroupsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListUserHierarchyGroups ( request ) ;
public class Collectors { /** * Collect to hash set * @ param < T > Streaming type * @ return Hash set */ public static < T > Collector < T , Set < T > > toSet ( ) { } }
return new Collector < T , Set < T > > ( ) { @ Override public Set < T > collect ( Stream < ? extends T > stream ) { return Sets . newHashSet ( stream . iterator ( ) ) ; } } ;
public class MolecularFormulaManipulator { /** * Occurrences of a given element from an isotope in a molecular formula . * @ param formula the formula * @ param isotope isotope of an element * @ return number of the times the element occurs * @ see # getElementCount ( IMolecularFormula , IElement ) */ public static int getElementCount ( IMolecularFormula formula , IIsotope isotope ) { } }
return getElementCount ( formula , formula . getBuilder ( ) . newInstance ( IElement . class , isotope ) ) ;
public class FunctionArgumentInjector { /** * We consider an expression trivial if it doesn ' t contain a conditional expression or * a function . */ static boolean mayHaveConditionalCode ( Node n ) { } }
for ( Node c = n . getFirstChild ( ) ; c != null ; c = c . getNext ( ) ) { switch ( c . getToken ( ) ) { case FUNCTION : case AND : case OR : case HOOK : return true ; default : break ; } if ( mayHaveConditionalCode ( c ) ) { return true ; } } return false ;
public class MercadoBitcoinAdapters { /** * Adapts a Transaction [ ] to a Trades Object * @ param transactions The Mercado Bitcoin transactions * @ param currencyPair ( e . g . BTC / BRL or LTC / BRL ) * @ return The XChange Trades */ public static Trades adaptTrades ( MercadoBitcoinTransaction [ ] transactions , CurrencyPair currencyPair ) { } }
List < Trade > trades = new ArrayList < > ( ) ; long lastTradeId = 0 ; for ( MercadoBitcoinTransaction tx : transactions ) { final long tradeId = tx . getTid ( ) ; if ( tradeId > lastTradeId ) { lastTradeId = tradeId ; } trades . add ( new Trade ( toOrderType ( tx . getType ( ) ) , tx . getAmount ( ) , currencyPair , tx . getPrice ( ) , DateUtils . fromMillisUtc ( tx . getDate ( ) * 1000L ) , String . valueOf ( tradeId ) ) ) ; } return new Trades ( trades , lastTradeId , Trades . TradeSortType . SortByID ) ;
public class responderpolicy { /** * Use this API to rename a responderpolicy resource . */ public static base_response rename ( nitro_service client , responderpolicy resource , String new_name ) throws Exception { } }
responderpolicy renameresource = new responderpolicy ( ) ; renameresource . name = resource . name ; return renameresource . rename_resource ( client , new_name ) ;
public class Static { /** * Creates a new array instance in a type safe way . * @ param template The original array * @ param length The length of the new array * @ param < T > The base type of the new array * @ return The new array */ @ SuppressWarnings ( "unchecked" ) public static < T > T [ ] newArrayInstance ( T [ ] template , int length ) { } }
return newArrayInstance ( ( Class < T > ) template . getClass ( ) . getComponentType ( ) , length ) ;
public class MultipleEpochsIterator { /** * Resets the iterator back to the beginning */ @ Override public void reset ( ) { } }
if ( ! iter . resetSupported ( ) ) { throw new IllegalStateException ( "Cannot reset MultipleEpochsIterator with base iter that does not support reset" ) ; } epochs = 0 ; lastBatch = batch ; batch = 0 ; iterationsCounter . set ( 0 ) ; iter . reset ( ) ;
public class DatabaseAccountsInner { /** * Changes the failover priority for the Azure Cosmos DB database account . A failover priority of 0 indicates a write region . The maximum value for a failover priority = ( total number of regions - 1 ) . Failover priority values must be unique for each of the regions in which the database account exists . * @ param resourceGroupName Name of an Azure resource group . * @ param accountName Cosmos DB database account name . * @ param failoverPolicies List of failover policies . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < ServiceResponse < Void > > failoverPriorityChangeWithServiceResponseAsync ( String resourceGroupName , String accountName , List < FailoverPolicy > failoverPolicies ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( accountName == null ) { throw new IllegalArgumentException ( "Parameter accountName is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } if ( failoverPolicies == null ) { throw new IllegalArgumentException ( "Parameter failoverPolicies is required and cannot be null." ) ; } Validator . validate ( failoverPolicies ) ; FailoverPolicies failoverParameters = new FailoverPolicies ( ) ; failoverParameters . withFailoverPolicies ( failoverPolicies ) ; Observable < Response < ResponseBody > > observable = service . failoverPriorityChange ( this . client . subscriptionId ( ) , resourceGroupName , accountName , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , failoverParameters , this . client . userAgent ( ) ) ; return client . getAzureClient ( ) . getPostOrDeleteResultAsync ( observable , new TypeToken < Void > ( ) { } . getType ( ) ) ;
public class PathBasedCacheExpirationFilter { /** * / * ( non - Javadoc ) * @ see org . springframework . web . filter . GenericFilterBean # initFilterBean ( ) */ @ Override protected void initFilterBean ( ) throws ServletException { } }
if ( this . resourcesElementsProvider == null ) { final ServletContext servletContext = this . getServletContext ( ) ; this . resourcesElementsProvider = ResourcesElementsProviderUtils . getOrCreateResourcesElementsProvider ( servletContext ) ; }
public class XMLFormatter { /** * Removes both XML declaration and trims all elements . * @ param xml XML to trim . * @ return trimmed version . */ public static String trim ( String xml ) { } }
String content = removeDeclaration ( xml ) ; return trimElements ( content ) ;
public class FunctionalType { /** * Returns the functional types accepted by { @ code methodName } on { @ code type } . */ public static List < FunctionalType > functionalTypesAcceptedByMethod ( DeclaredType type , String methodName , Elements elements , Types types ) { } }
TypeElement typeElement = asElement ( type ) ; return methodsOn ( typeElement , elements , errorType -> { } ) . stream ( ) . filter ( method -> method . getSimpleName ( ) . contentEquals ( methodName ) && method . getParameters ( ) . size ( ) == 1 ) . flatMap ( method -> { ExecutableType methodType = ( ExecutableType ) types . asMemberOf ( type , method ) ; TypeMirror parameter = getOnlyElement ( methodType . getParameterTypes ( ) ) ; return maybeFunctionalType ( parameter , elements , types ) . map ( Stream :: of ) . orElse ( Stream . of ( ) ) ; } ) . collect ( toList ( ) ) ;
public class InterfaceService { /** * Resizes the current view , if present . * @ param width new width of the screen . * @ param height new height of the screen . */ public void resize ( final int width , final int height ) { } }
if ( currentController != null ) { currentController . resize ( width , height ) ; } messageDispatcher . postMessage ( AutumnMessage . GAME_RESIZED ) ;
public class IndexElasticsearchUpdater { /** * Update index settings in Elasticsearch . Read also _ update _ settings . json if exists . * @ param client Elasticsearch client * @ param root dir within the classpath * @ param index Index name * @ throws Exception if the elasticsearch API call is failing */ @ Deprecated public static void updateSettings ( Client client , String root , String index ) throws Exception { } }
String settings = IndexSettingsReader . readUpdateSettings ( root , index ) ; updateIndexWithSettingsInElasticsearch ( client , index , settings ) ;
public class JmsConnectionImpl { /** * Remove a TemporaryDestination from the list of temporary destinations * created by sessions under this connection * @ param tempDest - the temporary destination to be removed from the List */ protected void removeTemporaryDestination ( JmsTemporaryDestinationInternal tempDest ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "removeTemporaryDestination" , System . identityHashCode ( tempDest ) ) ; synchronized ( temporaryDestinations ) { // synchronize against addTemporaryDestination temporaryDestinations . remove ( tempDest ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "removeTemporaryDestination" ) ;
public class GenericCollectionTypeResolver { /** * Extract the generic type from the given Type object . * @ param type the Type to check * @ param source the source collection / map Class that we check * @ param typeIndex the index of the actual type argument * @ param nestingLevel the nesting level of the target type * @ param currentLevel the current nested level * @ return the generic type as Class , or { @ code null } if none */ private static Class < ? > extractType ( Type type , Class < ? > source , int typeIndex , Map < TypeVariable , Type > typeVariableMap , Map < Integer , Integer > typeIndexesPerLevel , int nestingLevel , int currentLevel ) { } }
Type resolvedType = type ; if ( type instanceof TypeVariable && typeVariableMap != null ) { Type mappedType = typeVariableMap . get ( type ) ; if ( mappedType != null ) { resolvedType = mappedType ; } } if ( resolvedType instanceof ParameterizedType ) { return extractTypeFromParameterizedType ( ( ParameterizedType ) resolvedType , source , typeIndex , typeVariableMap , typeIndexesPerLevel , nestingLevel , currentLevel ) ; } else if ( resolvedType instanceof Class ) { return extractTypeFromClass ( ( Class ) resolvedType , source , typeIndex , typeVariableMap , typeIndexesPerLevel , nestingLevel , currentLevel ) ; } else if ( resolvedType instanceof GenericArrayType ) { Type compType = ( ( GenericArrayType ) resolvedType ) . getGenericComponentType ( ) ; return extractType ( compType , source , typeIndex , typeVariableMap , typeIndexesPerLevel , nestingLevel , currentLevel + 1 ) ; } else { return null ; }
public class ClassDocImpl { /** * Return inner classes within this class . * @ param filter include only the included inner classes if filter = = true . * @ return an array of ClassDocImpl for representing the visible * classes defined in this class . Anonymous and local classes * are not included . */ public ClassDoc [ ] innerClasses ( boolean filter ) { } }
ListBuffer < ClassDocImpl > innerClasses = new ListBuffer < > ( ) ; for ( Symbol sym : tsym . members ( ) . getSymbols ( NON_RECURSIVE ) ) { if ( sym != null && sym . kind == TYP ) { ClassSymbol s = ( ClassSymbol ) sym ; if ( ( s . flags_field & Flags . SYNTHETIC ) != 0 ) continue ; if ( ! filter || env . isVisible ( s ) ) { innerClasses . prepend ( env . getClassDoc ( s ) ) ; } } } // # # # Cache classes here ? return innerClasses . toArray ( new ClassDocImpl [ innerClasses . length ( ) ] ) ;
public class Buffer { /** * Get next data bytes with length encoded prefix . * @ return the raw binary data */ public byte [ ] getLengthEncodedBytes ( ) { } }
int type = this . buf [ this . position ++ ] & 0xff ; int length ; switch ( type ) { case 251 : return null ; case 252 : length = 0xffff & readShort ( ) ; break ; case 253 : length = 0xffffff & read24bitword ( ) ; break ; case 254 : length = ( int ) ( ( buf [ position ++ ] & 0xff ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 8 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 16 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 24 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 32 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 40 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 48 ) + ( ( long ) ( buf [ position ++ ] & 0xff ) << 56 ) ) ; break ; default : length = type ; break ; } byte [ ] tmpBuf = new byte [ length ] ; System . arraycopy ( buf , position , tmpBuf , 0 , length ) ; position += length ; return tmpBuf ;
public class ConfigFactory { /** * Sets a converter for the given type . Setting a converter via this method will override any default converters * but not { @ link Config . ConverterClass } annotations . * @ param type the type for which to set a converter . * @ param converter the converter class to use for the specified type . * @ since 1.0.10 */ public static void setTypeConverter ( Class < ? > type , Class < ? extends Converter < ? > > converter ) { } }
INSTANCE . setTypeConverter ( type , converter ) ;
public class DiscountCurveInterpolation { /** * Returns the zero rate for a given maturity , i . e . , - ln ( df ( T ) ) / T where T is the given maturity and df ( T ) is * the discount factor at time $ T $ . * @ param maturity The given maturity . * @ return The zero rate . */ public double getZeroRate ( double maturity ) { } }
if ( maturity == 0 ) { return this . getZeroRate ( 1.0E-14 ) ; } return - Math . log ( getDiscountFactor ( null , maturity ) ) / maturity ;
public class InsertDependsAction { /** * Get result . * @ return result */ @ Override public String getResult ( ) { } }
final List < String > result = new ArrayList < > ( ) ; for ( final Value t : value ) { final String token = t . value . trim ( ) ; // Pieces which are surrounded with braces are extension points . if ( token . startsWith ( "{" ) && token . endsWith ( "}" ) ) { final String extension = token . substring ( 1 , token . length ( ) - 1 ) ; final String extensionInputs = Integrator . getValue ( featureTable , extension ) ; if ( extensionInputs != null ) { result . add ( extensionInputs ) ; } } else { result . add ( token ) ; } } if ( ! result . isEmpty ( ) ) { return StringUtils . join ( result , "," ) ; } else { return "" ; }
public class AbstractSessionHandler { /** * Create an entirely new Session . * @ param id identity of session to create * @ return the new session object */ @ Override public Session newSession ( String id ) { } }
long created = System . currentTimeMillis ( ) ; Session session = sessionCache . newSession ( id , created , ( defaultMaxIdleSecs > 0 ? defaultMaxIdleSecs * 1000L : - 1 ) ) ; try { sessionCache . put ( id , session ) ; sessionsCreatedStats . increment ( ) ; for ( SessionListener listener : sessionListeners ) { listener . sessionCreated ( session ) ; } return session ; } catch ( Exception e ) { log . warn ( "Failed to create a new session" , e ) ; return null ; }
public class ARCWriter { /** * Write out the ARCMetaData . * < p > Generate ARC file meta data . Currently we only do version 1 of the * ARC file formats or version 1.1 when metadata has been supplied ( We * write it into the body of the first record in the arc file ) . * < p > Version 1 metadata looks roughly like this : * < pre > filedesc : / / testWriteRecord - JunitIAH20040110013326-2 . arc 0.0.0.0 \ \ * 20040110013326 text / plain 77 * 1 0 InternetArchive * URL IP - address Archive - date Content - type Archive - length * < / pre > * < p > If compress is set , then we generate a header that has been gzipped * in the Internet Archive manner . Such a gzipping enables the FEXTRA * flag in the FLG field of the gzip header . It then appends an extra * header field : ' 8 ' , ' 0 ' , ' L ' , ' X ' , ' 0 ' , ' 0 ' , ' 0 ' , ' 0 ' . The first two * bytes are the length of the field and the last 6 bytes the Internet * Archive header . To learn about GZIP format , see RFC1952 . To learn * about the Internet Archive extra header field , read the source for * av _ ziparc which can be found at * < code > alexa / vista / alexa - tools - 1.2 / src / av _ ziparc . cc < / code > . * < p > We do things in this roundabout manner because the java * GZIPOutputStream does not give access to GZIP header fields . * @ param date Date to put into the ARC metadata ; if 17 - digit will be * truncated to traditional 14 - digits * @ return Byte array filled w / the arc header . * @ throws IOException */ private byte [ ] generateARCFileMetaData ( String date ) throws IOException { } }
if ( date != null && date . length ( ) > 14 ) { date = date . substring ( 0 , 14 ) ; } int metadataBodyLength = getMetadataLength ( ) ; // If metadata body , then the minor part of the version is ' 1 ' rather // than ' 0 ' . String metadataHeaderLinesTwoAndThree = getMetadataHeaderLinesTwoAndThree ( "1 " + ( ( metadataBodyLength > 0 ) ? "1" : "0" ) ) ; int recordLength = metadataBodyLength + metadataHeaderLinesTwoAndThree . getBytes ( DEFAULT_ENCODING ) . length ; String metadataHeaderStr = ARC_MAGIC_NUMBER + getBaseFilename ( ) + " 0.0.0.0 " + date + " text/plain " + recordLength + metadataHeaderLinesTwoAndThree ; ByteArrayOutputStream metabaos = new ByteArrayOutputStream ( recordLength ) ; // Write the metadata header . metabaos . write ( metadataHeaderStr . getBytes ( DEFAULT_ENCODING ) ) ; // Write the metadata body , if anything to write . if ( metadataBodyLength > 0 ) { writeMetaData ( metabaos ) ; } // Write out a LINE _ SEPARATORs to end this record . metabaos . write ( LINE_SEPARATOR ) ; // Now get bytes of all just written and compress if flag set . byte [ ] bytes = metabaos . toByteArray ( ) ; if ( isCompressed ( ) ) { // GZIP the header but catch the gzipping into a byte array so we // can add the special IA GZIP header to the product . After // manipulations , write to the output stream ( The JAVA GZIP // implementation does not give access to GZIP header . It // produces a ' default ' header only ) . We can get away w / these // maniupulations because the GZIP ' default ' header doesn ' t // do the ' optional ' CRC ' ing of the header . byte [ ] gzippedMetaData = ArchiveUtils . gzip ( bytes ) ; if ( gzippedMetaData [ 3 ] != 0 ) { throw new IOException ( "The GZIP FLG header is unexpectedly " + " non-zero. Need to add smarter code that can deal " + " when already extant extra GZIP header fields." ) ; } // Set the GZIP FLG header to ' 4 ' which says that the GZIP header // has extra fields . Then insert the alex { ' L ' , ' X ' , ' 0 ' , ' 0 ' , ' 0, // '0 ' } ' extra ' field . The IA GZIP header will also set byte // 9 ( zero - based ) , the OS byte , to 3 ( Unix ) . We ' ll do the same . gzippedMetaData [ 3 ] = 4 ; gzippedMetaData [ 9 ] = 3 ; byte [ ] assemblyBuffer = new byte [ gzippedMetaData . length + ARC_GZIP_EXTRA_FIELD . length ] ; // '10 ' in the below is a pointer past the following bytes of the // GZIP header : ID1 ID2 CM FLG + MTIME ( 4 - bytes ) XFL OS . See // RFC1952 for explaination of the abbreviations just used . System . arraycopy ( gzippedMetaData , 0 , assemblyBuffer , 0 , 10 ) ; System . arraycopy ( ARC_GZIP_EXTRA_FIELD , 0 , assemblyBuffer , 10 , ARC_GZIP_EXTRA_FIELD . length ) ; System . arraycopy ( gzippedMetaData , 10 , assemblyBuffer , 10 + ARC_GZIP_EXTRA_FIELD . length , gzippedMetaData . length - 10 ) ; bytes = assemblyBuffer ; } return bytes ;
public class XMLSocketReceiver { /** * Does the actual shutting down by closing the server socket * and any connected sockets that have been created . */ private synchronized void doShutdown ( ) { } }
active = false ; getLogger ( ) . debug ( "{} doShutdown called" , getName ( ) ) ; // close the server socket closeServerSocket ( ) ; // close all of the accepted sockets closeAllAcceptedSockets ( ) ; if ( advertiseViaMulticastDNS ) { zeroConf . unadvertise ( ) ; }
public class RateLimitingDecoratorFactoryFunction { /** * Creates a new decorator with the specified { @ code parameter } . */ @ Override public Function < Service < HttpRequest , HttpResponse > , ? extends Service < HttpRequest , HttpResponse > > newDecorator ( RateLimitingDecorator parameter ) { } }
return ThrottlingHttpService . newDecorator ( new RateLimitingThrottlingStrategy < > ( parameter . value ( ) , DefaultValues . isSpecified ( parameter . name ( ) ) ? parameter . name ( ) : null ) ) ;
public class LazyCanvasResizer { /** * React to a component resize event . */ @ Override public void componentResized ( ComponentEvent e ) { } }
if ( e . getComponent ( ) == component ) { double newRatio = getCurrentRatio ( ) ; if ( Math . abs ( newRatio - activeRatio ) > threshold ) { activeRatio = newRatio ; executeResize ( newRatio ) ; } }
public class DefaultGroovyMethods { /** * Sorts all array members into groups determined by the supplied mapping closure . * The closure should return the key that this item should be grouped by . The returned * LinkedHashMap will have an entry for each distinct key returned from the closure , * with each value being a list of items for that group . * Example usage : * < pre class = " groovyTestCase " > * Integer [ ] items = [ 1,2,3,4,5,6] * assert [ 0 : [ 2,4,6 ] , 1 : [ 1,3,5 ] ] = = items . groupBy { it % 2 } * < / pre > * @ param self an array to group * @ param closure a closure mapping entries on keys * @ return a new Map grouped by keys * @ see # groupBy ( Iterable , Closure ) * @ since 2.2.0 */ public static < K , T > Map < K , List < T > > groupBy ( T [ ] self , @ ClosureParams ( FirstParam . Component . class ) Closure < K > closure ) { } }
return groupBy ( ( Iterable < T > ) Arrays . asList ( self ) , closure ) ;
public class CmsPropertyTable { /** * Filters the table according to given search string . < p > * @ param search string to be looked for . */ public void filterTable ( String search ) { } }
m_container . removeAllContainerFilters ( ) ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( search ) ) { m_container . addContainerFilter ( new Or ( new SimpleStringFilter ( TableColumn . Name , search , true , false ) ) ) ; }
public class ByteArrayWrapper { /** * Ensure that the internal byte array is at least of length capacity . * If the byte array is null or its length is less than capacity , a new * byte array of length capacity will be allocated . * The contents of the array ( between 0 and size ) remain unchanged . * @ param capacity minimum length of internal byte array . * @ return this ByteArrayWrapper */ public ByteArrayWrapper ensureCapacity ( int capacity ) { } }
if ( bytes == null || bytes . length < capacity ) { byte [ ] newbytes = new byte [ capacity ] ; if ( bytes != null ) { copyBytes ( bytes , 0 , newbytes , 0 , size ) ; } bytes = newbytes ; } return this ;
public class GridFTPClient { /** * Sets the checksum values ahead of the transfer * @ param algorithm the checksume algorithm * @ param value the checksum value as hexadecimal number * @ exception ServerException if an error occured . */ public void setChecksum ( ChecksumAlgorithm algorithm , String value ) throws IOException , ServerException { } }
String arguments = algorithm . toFtpCmdArgument ( ) + " " + value ; Command cmd = new Command ( "SCKS" , arguments ) ; try { controlChannel . execute ( cmd ) ; } catch ( UnexpectedReplyCodeException urce ) { throw ServerException . embedUnexpectedReplyCodeException ( urce ) ; } catch ( FTPReplyParseException rpe ) { throw ServerException . embedFTPReplyParseException ( rpe ) ; }
public class SimpleHadoopFilesystemConfigStore { /** * A helper to resolve System properties and Environment variables in includes paths * The method loads the list of unresolved < code > includes < / code > into an in - memory { @ link Config } object and reolves * with a fallback on { @ link ConfigFactory # defaultOverrides ( ) } * @ param includes list of unresolved includes * @ return a list of resolved includes */ @ VisibleForTesting public static List < String > resolveIncludesList ( List < String > includes , Optional < Config > runtimeConfig ) { } }
// Create a TypeSafe Config object with Key INCLUDES _ KEY _ NAME and value an array of includes StringBuilder includesBuilder = new StringBuilder ( ) ; for ( String include : includes ) { // Skip comments if ( StringUtils . isNotBlank ( include ) && ! StringUtils . startsWith ( include , "#" ) ) { includesBuilder . append ( INCLUDES_KEY_NAME ) . append ( "+=" ) . append ( include ) . append ( "\n" ) ; } } // Resolve defaultOverrides and environment variables . if ( includesBuilder . length ( ) > 0 ) { if ( runtimeConfig . isPresent ( ) ) { return ConfigFactory . parseString ( includesBuilder . toString ( ) ) . withFallback ( ConfigFactory . defaultOverrides ( ) ) . withFallback ( ConfigFactory . systemEnvironment ( ) ) . withFallback ( runtimeConfig . get ( ) ) . resolve ( ) . getStringList ( INCLUDES_KEY_NAME ) ; } else { return ConfigFactory . parseString ( includesBuilder . toString ( ) ) . withFallback ( ConfigFactory . defaultOverrides ( ) ) . withFallback ( ConfigFactory . systemEnvironment ( ) ) . resolve ( ) . getStringList ( INCLUDES_KEY_NAME ) ; } } return Collections . emptyList ( ) ;
public class CommonRangeBoundaries { /** * Search the range index of input record . */ private int binarySearch ( T record ) { } }
int low = 0 ; int high = this . boundaries . length - 1 ; typeComparator . extractKeys ( record , keys , 0 ) ; while ( low <= high ) { final int mid = ( low + high ) >>> 1 ; final int result = compareKeys ( flatComparators , keys , this . boundaries [ mid ] ) ; if ( result > 0 ) { low = mid + 1 ; } else if ( result < 0 ) { high = mid - 1 ; } else { return mid ; } } // key not found , but the low index is the target // bucket , since the boundaries are the upper bound return low ;
public class MtasSolrCollectionCache { /** * Verify . * @ param version the version * @ param time the time * @ return true , if successful */ private boolean verify ( String version , Long time ) { } }
if ( versionToItem . containsKey ( version ) ) { Path path = collectionCachePath . resolve ( version ) ; File file = path . toFile ( ) ; if ( file . exists ( ) && file . canRead ( ) && file . canWrite ( ) ) { if ( time != null ) { if ( ! file . setLastModified ( time ) ) { log . debug ( "couldn't change filetime " + file . getAbsolutePath ( ) ) ; } else { expirationVersion . put ( version , time + ( 1000 * lifeTime ) ) ; } } return true ; } else { return false ; } } else { return false ; }
public class ThirdPartyAudienceSegment { /** * Sets the licenseType value for this ThirdPartyAudienceSegment . * @ param licenseType * Specifies the license type of the external segment . This attribute * is read - only . */ public void setLicenseType ( com . google . api . ads . admanager . axis . v201811 . LicenseType licenseType ) { } }
this . licenseType = licenseType ;
public class Completable { /** * < strong > This method requires advanced knowledge about building operators , please consider * other standard composition methods first ; < / strong > * Returns a { @ code Completable } which , when subscribed to , invokes the { @ link CompletableOperator # apply ( CompletableObserver ) apply ( CompletableObserver ) } method * of the provided { @ link CompletableOperator } for each individual downstream { @ link Completable } and allows the * insertion of a custom operator by accessing the downstream ' s { @ link CompletableObserver } during this subscription phase * and providing a new { @ code CompletableObserver } , containing the custom operator ' s intended business logic , that will be * used in the subscription process going further upstream . * < img width = " 640 " height = " 313 " src = " https : / / raw . github . com / wiki / ReactiveX / RxJava / images / rx - operators / Completable . lift . png " alt = " " > * Generally , such a new { @ code CompletableObserver } will wrap the downstream ' s { @ code CompletableObserver } and forwards the * { @ code onError } and { @ code onComplete } events from the upstream directly or according to the * emission pattern the custom operator ' s business logic requires . In addition , such operator can intercept the * flow control calls of { @ code dispose } and { @ code isDisposed } that would have traveled upstream and perform * additional actions depending on the same business logic requirements . * Example : * < pre > < code > * / / Step 1 : Create the consumer type that will be returned by the CompletableOperator . apply ( ) : * public final class CustomCompletableObserver implements CompletableObserver , Disposable { * / / The downstream ' s CompletableObserver that will receive the onXXX events * final CompletableObserver downstream ; * / / The connection to the upstream source that will call this class ' onXXX methods * Disposable upstream ; * / / The constructor takes the downstream subscriber and usually any other parameters * public CustomCompletableObserver ( CompletableObserver downstream ) { * this . downstream = downstream ; * / / In the subscription phase , the upstream sends a Disposable to this class * / / and subsequently this class has to send a Disposable to the downstream . * / / Note that relaying the upstream ' s Disposable directly is not allowed in RxJava * & # 64 ; Override * public void onSubscribe ( Disposable d ) { * if ( upstream ! = null ) { * d . dispose ( ) ; * } else { * upstream = d ; * downstream . onSubscribe ( this ) ; * / / Some operators may handle the upstream ' s error while others * / / could just forward it to the downstream . * & # 64 ; Override * public void onError ( Throwable throwable ) { * downstream . onError ( throwable ) ; * / / When the upstream completes , usually the downstream should complete as well . * / / In completable , this could also mean doing some side - effects * & # 64 ; Override * public void onComplete ( ) { * System . out . println ( " Sequence completed " ) ; * downstream . onComplete ( ) ; * / / Some operators may use their own resources which should be cleaned up if * / / the downstream disposes the flow before it completed . Operators without * / / resources can simply forward the dispose to the upstream . * / / In some cases , a disposed flag may be set by this method so that other parts * / / of this class may detect the dispose and stop sending events * / / to the downstream . * & # 64 ; Override * public void dispose ( ) { * upstream . dispose ( ) ; * / / Some operators may simply forward the call to the upstream while others * / / can return the disposed flag set in dispose ( ) . * & # 64 ; Override * public boolean isDisposed ( ) { * return upstream . isDisposed ( ) ; * / / Step 2 : Create a class that implements the CompletableOperator interface and * / / returns the custom consumer type from above in its apply ( ) method . * / / Such class may define additional parameters to be submitted to * / / the custom consumer type . * final class CustomCompletableOperator implements CompletableOperator { * & # 64 ; Override * public CompletableObserver apply ( CompletableObserver upstream ) { * return new CustomCompletableObserver ( upstream ) ; * / / Step 3 : Apply the custom operator via lift ( ) in a flow by creating an instance of it * / / or reusing an existing one . * Completable . complete ( ) * . lift ( new CustomCompletableOperator ( ) ) * . test ( ) * . assertResult ( ) ; * < / code > < / pre > * Creating custom operators can be complicated and it is recommended one consults the * < a href = " https : / / github . com / ReactiveX / RxJava / wiki / Writing - operators - for - 2.0 " > RxJava wiki : Writing operators < / a > page about * the tools , requirements , rules , considerations and pitfalls of implementing them . * Note that implementing custom operators via this { @ code lift ( ) } method adds slightly more overhead by requiring * an additional allocation and indirection per assembled flows . Instead , extending the abstract { @ code Completable } * class and creating a { @ link CompletableTransformer } with it is recommended . * Note also that it is not possible to stop the subscription phase in { @ code lift ( ) } as the { @ code apply ( ) } method * requires a non - null { @ code CompletableObserver } instance to be returned , which is then unconditionally subscribed to * the upstream { @ code Completable } . For example , if the operator decided there is no reason to subscribe to the * upstream source because of some optimization possibility or a failure to prepare the operator , it still has to * return a { @ code CompletableObserver } that should immediately dispose the upstream ' s { @ code Disposable } in its * { @ code onSubscribe } method . Again , using a { @ code CompletableTransformer } and extending the { @ code Completable } is * a better option as { @ link # subscribeActual } can decide to not subscribe to its upstream after all . * < dl > * < dt > < b > Scheduler : < / b > < / dt > * < dd > { @ code lift } does not operate by default on a particular { @ link Scheduler } , however , the * { @ link CompletableOperator } may use a { @ code Scheduler } to support its own asynchronous behavior . < / dd > * < / dl > * @ param onLift the { @ link CompletableOperator } that receives the downstream ' s { @ code CompletableObserver } and should return * a { @ code CompletableObserver } with custom behavior to be used as the consumer for the current * { @ code Completable } . * @ return the new Completable instance * @ see < a href = " https : / / github . com / ReactiveX / RxJava / wiki / Writing - operators - for - 2.0 " > RxJava wiki : Writing operators < / a > * @ see # compose ( CompletableTransformer ) */ @ CheckReturnValue @ SchedulerSupport ( SchedulerSupport . NONE ) public final Completable lift ( final CompletableOperator onLift ) { } }
ObjectHelper . requireNonNull ( onLift , "onLift is null" ) ; return RxJavaPlugins . onAssembly ( new CompletableLift ( this , onLift ) ) ;
public class ApiOvhTelephony { /** * Reinitialize the phone configuration * REST : POST / telephony / { billingAccount } / line / { serviceName } / phone / resetConfig * @ param ip [ required ] The public ip phone allowed for reset * @ param billingAccount [ required ] The name of your billingAccount * @ param serviceName [ required ] */ public OvhResetPhoneInfo billingAccount_line_serviceName_phone_resetConfig_POST ( String billingAccount , String serviceName , String ip ) throws IOException { } }
String qPath = "/telephony/{billingAccount}/line/{serviceName}/phone/resetConfig" ; StringBuilder sb = path ( qPath , billingAccount , serviceName ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "ip" , ip ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhResetPhoneInfo . class ) ;
public class AgentRegistrationInformationsInner { /** * Regenerate a primary or secondary agent registration key . * @ param resourceGroupName Name of an Azure Resource group . * @ param automationAccountName The name of the automation account . * @ param parameters The name of the agent registration key to be regenerated * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < AgentRegistrationInner > regenerateKeyAsync ( String resourceGroupName , String automationAccountName , AgentRegistrationRegenerateKeyParameter parameters , final ServiceCallback < AgentRegistrationInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( regenerateKeyWithServiceResponseAsync ( resourceGroupName , automationAccountName , parameters ) , serviceCallback ) ;
public class StreamTransactionMetadataTasks { /** * Initializes stream writers for commit and abort streams . * This method should be called immediately after creating StreamTransactionMetadataTasks object . * @ param clientFactory Client factory reference . * @ param config Controller event processor configuration . */ @ Synchronized public void initializeStreamWriters ( final EventStreamClientFactory clientFactory , final ControllerEventProcessorConfig config ) { } }
if ( ! commitWriterFuture . isDone ( ) ) { commitWriterFuture . complete ( clientFactory . createEventWriter ( config . getCommitStreamName ( ) , ControllerEventProcessors . COMMIT_EVENT_SERIALIZER , EventWriterConfig . builder ( ) . build ( ) ) ) ; } if ( ! abortWriterFuture . isDone ( ) ) { abortWriterFuture . complete ( clientFactory . createEventWriter ( config . getAbortStreamName ( ) , ControllerEventProcessors . ABORT_EVENT_SERIALIZER , EventWriterConfig . builder ( ) . build ( ) ) ) ; } this . setReady ( ) ;
public class ClassUtil { /** * Returns an immutable entity property name List by the specified class . * @ param cls * @ return */ public static List < String > getPropNameList ( final Class < ? > cls ) { } }
List < String > propNameList = entityDeclaredPropNameListPool . get ( cls ) ; if ( propNameList == null ) { loadPropGetSetMethodList ( cls ) ; propNameList = entityDeclaredPropNameListPool . get ( cls ) ; } return propNameList ;
public class AnonymousOdsDocument { /** * Create a new anonymous ODS document . * @ param logger the logger * @ param xmlUtil a util for XML writing * @ param odsElements the ods elements ( file entries in zip archive ) */ static AnonymousOdsDocument create ( final Logger logger , final XMLUtil xmlUtil , final OdsElements odsElements ) { } }
return new AnonymousOdsDocument ( logger , xmlUtil , odsElements , new CommonOdsDocument ( odsElements ) ) ;
public class druidGParser { /** * druidG . g : 71:1 : deleteStmnt returns [ DeleteMeta dMeta ] : DELETE WS FROM WS ( id = ID WS ) WHERE WS i = intervalClause ; */ public final DeleteMeta deleteStmnt ( ) throws RecognitionException { } }
DeleteMeta dMeta = null ; Token id = null ; List < Interval > i = null ; dMeta = new DeleteMeta ( ) ; try { // druidG . g : 73:2 : ( DELETE WS FROM WS ( id = ID WS ) WHERE WS i = intervalClause ) // druidG . g : 73:3 : DELETE WS FROM WS ( id = ID WS ) WHERE WS i = intervalClause { match ( input , DELETE , FOLLOW_DELETE_in_deleteStmnt283 ) ; match ( input , WS , FOLLOW_WS_in_deleteStmnt285 ) ; match ( input , FROM , FOLLOW_FROM_in_deleteStmnt287 ) ; match ( input , WS , FOLLOW_WS_in_deleteStmnt289 ) ; // druidG . g : 73:21 : ( id = ID WS ) // druidG . g : 73:22 : id = ID WS { id = ( Token ) match ( input , ID , FOLLOW_ID_in_deleteStmnt294 ) ; dMeta . dataSource = ( id != null ? id . getText ( ) : null ) ; match ( input , WS , FOLLOW_WS_in_deleteStmnt298 ) ; } match ( input , WHERE , FOLLOW_WHERE_in_deleteStmnt303 ) ; match ( input , WS , FOLLOW_WS_in_deleteStmnt305 ) ; pushFollow ( FOLLOW_intervalClause_in_deleteStmnt309 ) ; i = intervalClause ( ) ; state . _fsp -- ; // We set this later after granularitySpec object is fully formed . if ( i != null && ! i . isEmpty ( ) ) { dMeta . interval = i . get ( 0 ) ; // We already checked for list ' s emptiness ( it is safe to access get ( 0 ) . } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { // do for sure before leaving } return dMeta ;
public class BuildsInner { /** * Patch the build properties . * @ param resourceGroupName The name of the resource group to which the container registry belongs . * @ param registryName The name of the container registry . * @ param buildId The build ID . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the BuildInner object if successful . */ public BuildInner beginUpdate ( String resourceGroupName , String registryName , String buildId ) { } }
return beginUpdateWithServiceResponseAsync ( resourceGroupName , registryName , buildId ) . toBlocking ( ) . single ( ) . body ( ) ;
public class StreamEx { /** * Returns a new { @ code StreamEx } of { @ code int [ ] } arrays containing all the possible combinations of length { @ code * k } consisting of numbers from 0 to { @ code n - 1 } in lexicographic order . * Example : { @ code StreamEx . ofCombinations ( 3 , 2 ) } returns the stream of three elements : { @ code [ 0 , 1 ] } , { @ code [ 0, * 2 ] } and { @ code [ 1 , 2 ] } in this order . * @ param n number of possible distinct elements * @ param k number of elements in each combination * @ return new sequential stream of possible combinations . Returns an empty stream if { @ code k } is bigger * than { @ code n } . * @ throws IllegalArgumentException if n or k is negative or number of possible combinations exceeds { @ code * Long . MAX _ VALUE } . * @ since 0.6.7 */ public static StreamEx < int [ ] > ofCombinations ( int n , int k ) { } }
checkNonNegative ( "k" , k ) ; checkNonNegative ( "n" , n ) ; if ( k > n ) { return StreamEx . empty ( ) ; } if ( k == 0 ) { return StreamEx . of ( new int [ 0 ] ) ; } long size = CombinationSpliterator . cnk ( n , k ) ; int [ ] value = new int [ k ] ; for ( int i = 0 ; i < k ; i ++ ) { value [ i ] = i ; } return StreamEx . of ( new CombinationSpliterator ( n , size , 0 , value ) ) ;
public class UniqueKeyRangeManager { /** * Request an immediate update to the persistent state * @ param generator to be updated * @ return the value that was stored prior to the update * @ throws PersistenceException */ public long updateEntry ( UniqueKeyGenerator generator ) throws PersistenceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "updateEntry" , "GeneratorName=" + generator . getName ( ) ) ; long currentLimit = 0L ; // Do we know of this generator ? if ( _generators . containsKey ( generator . getName ( ) ) ) { Transaction transaction = null ; try { transaction = _objectManager . getTransaction ( ) ; // Replace the ManagedObject for this generator UniqueKeyGeneratorManagedObject mo = ( UniqueKeyGeneratorManagedObject ) _generators . get ( generator . getName ( ) ) ; // Lock the token so we can make our changes . transaction . lock ( mo ) ; // Update the value in the managed object to the // new increased limit . synchronized ( mo ) { currentLimit = mo . getGeneratorKeyLimit ( ) ; mo . setGeneratorKeyLimit ( currentLimit + generator . getRange ( ) ) ; } // Use replace to update the contents . transaction . replace ( mo ) ; transaction . commit ( false ) ; } catch ( ObjectManagerException ome ) { com . ibm . ws . ffdc . FFDCFilter . processException ( ome , "com.ibm.ws.sib.msgstore.persistence.objectManager.UniqueKeyRangeManager.updateEntry" , "1:286:1.8" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . event ( tc , "Exception caught increasing range of unique key generator!" , ome ) ; if ( transaction != null ) { try { // Clean up our ObjectManager work . transaction . backout ( false ) ; } catch ( ObjectManagerException e ) { com . ibm . ws . ffdc . FFDCFilter . processException ( e , "com.ibm.ws.sib.msgstore.persistence.objectManager.UniqueKeyRangeManager.updateEntry" , "1:298:1.8" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . event ( tc , "Exception caught backing out unique key generator update!" , e ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "updateEntry" ) ; throw new PersistenceException ( "Exception caught increasing range of unique key generator!" , ome ) ; } } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . event ( tc , "No UniqueKeyGenerator matching: " + generator . getName ( ) + " found to update!" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "updateEntry" ) ; throw new PersistenceException ( "No UniqueKeyGenerator matching: " + generator . getName ( ) + " found to update!" ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "updateEntry" , "return=" + currentLimit ) ; return currentLimit ;
public class BRGImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case AfplibPackage . BRG__RGRP_NAME : return RGRP_NAME_EDEFAULT == null ? rGrpName != null : ! RGRP_NAME_EDEFAULT . equals ( rGrpName ) ; case AfplibPackage . BRG__TRIPLETS : return triplets != null && ! triplets . isEmpty ( ) ; } return super . eIsSet ( featureID ) ;
public class Inflector { /** * Returns a copy of the input with the first character converted to uppercase and the remainder to lowercase . * @ param words the word to be capitalized * @ return the string with the first character capitalized and the remaining characters lowercased */ public String capitalize ( String words ) { } }
if ( words == null ) return null ; String result = words . trim ( ) ; if ( result . length ( ) == 0 ) return "" ; if ( result . length ( ) == 1 ) return result . toUpperCase ( ) ; return "" + Character . toUpperCase ( result . charAt ( 0 ) ) + result . substring ( 1 ) . toLowerCase ( ) ;
public class NetworkMessageEntity { /** * Add an action . * @ param element The action type . * @ param value The action value . */ public void addAction ( M element , double value ) { } }
actions . put ( element , Double . valueOf ( value ) ) ;
public class REWildcardStringParser { /** * Converts wildcard string mask to regular expression . * This method should reside in som utility class , but I don ' t know how proprietary the regular expression format is . . . * @ return the corresponding regular expression or { @ code null } if an error occurred . */ private String convertWildcardExpressionToRegularExpression ( final String pWildcardExpression ) { } }
if ( pWildcardExpression == null ) { if ( mDebugging ) { out . println ( DebugUtil . getPrefixDebugMessage ( this ) + "wildcard expression is null - also returning null as regexp!" ) ; } return null ; } StringBuilder regexpBuffer = new StringBuilder ( ) ; boolean convertingError = false ; for ( int i = 0 ; i < pWildcardExpression . length ( ) ; i ++ ) { if ( convertingError ) { return null ; } // Free - range character ' * ' char stringMaskChar = pWildcardExpression . charAt ( i ) ; if ( isFreeRangeCharacter ( stringMaskChar ) ) { regexpBuffer . append ( "(([a-�A-�0-9]|.|_|-)*)" ) ; } // Free - pass character ' ? ' else if ( isFreePassCharacter ( stringMaskChar ) ) { regexpBuffer . append ( "([a-�A_�0-9]|.|_|-)" ) ; } // Valid characters else if ( isInAlphabet ( stringMaskChar ) ) { regexpBuffer . append ( stringMaskChar ) ; } // Invalid character - aborting else { if ( mDebugging ) { out . println ( DebugUtil . getPrefixDebugMessage ( this ) + "one or more characters in string mask are not legal characters - returning null as regexp!" ) ; } convertingError = true ; } } return regexpBuffer . toString ( ) ;
public class Oracle9iLobHandler { /** * Frees the temporary LOBs when an exception is raised in the application * or when the LOBs are no longer needed . If the LOBs are not freed , the * space used by these LOBs are not reclaimed . * @ param clob CLOB - wrapper to free or null * @ param blob BLOB - wrapper to free or null */ private static void freeTempLOB ( ClobWrapper clob , BlobWrapper blob ) { } }
try { if ( clob != null ) { // If the CLOB is open , close it if ( clob . isOpen ( ) ) { clob . close ( ) ; } // Free the memory used by this CLOB clob . freeTemporary ( ) ; } if ( blob != null ) { // If the BLOB is open , close it if ( blob . isOpen ( ) ) { blob . close ( ) ; } // Free the memory used by this BLOB blob . freeTemporary ( ) ; } } catch ( Exception e ) { logger . error ( "Error during temporary LOB release" , e ) ; }
public class AbstractFacetAndHighlightQueryDecorator { /** * ( non - Javadoc ) * @ see org . springframework . data . solr . core . query . HighlightQuery # setHighlightOptions ( org . springframework . data . solr . core . query . HighlightOptions ) */ @ Override public < T extends SolrDataQuery > T setHighlightOptions ( HighlightOptions highlightOptions ) { } }
return query . setHighlightOptions ( highlightOptions ) ;
public class StringUtil { /** * Get the item after one char delim if the delim is found ( else null ) . * This operation is a simplified and optimized * version of { @ link String # split ( String , int ) } . */ public static String substringAfter ( String value , char delim ) { } }
int pos = value . indexOf ( delim ) ; if ( pos >= 0 ) { return value . substring ( pos + 1 ) ; } return null ;
public class DefaultDOManager { /** * Update the registry and deployment cache to reflect the latest state of * reality . * @ param obj * DOReader of a service deployment object */ private synchronized void updateDeploymentMap ( DigitalObject obj , Connection c , boolean isPurge ) throws SQLException { } }
String sDep = obj . getPid ( ) ; Set < RelationshipTuple > sDefs = obj . getRelationships ( Constants . MODEL . IS_DEPLOYMENT_OF , null ) ; Set < RelationshipTuple > models = obj . getRelationships ( Constants . MODEL . IS_CONTRACTOR_OF , null ) ; /* Read in the new deployment map from the object */ Set < ServiceContext > newContext = new HashSet < ServiceContext > ( ) ; if ( ! isPurge ) { for ( RelationshipTuple sDefTuple : sDefs ) { String sDef = sDefTuple . getObjectPID ( ) ; for ( RelationshipTuple cModelTuple : models ) { String cModel = cModelTuple . getObjectPID ( ) ; newContext . add ( ServiceContext . getInstance ( cModel , sDef ) ) ; } } } /* Read in the old deployment map from the cache */ Set < ServiceContext > oldContext = m_cModelDeploymentMap . getContextFor ( sDep ) ; /* Remove any obsolete deployments from the registry / cache */ for ( ServiceContext o : oldContext ) { if ( ! newContext . contains ( o ) ) { removeDeployment ( o , obj , c ) ; } } /* Add any new deployments from the registry / cache */ for ( ServiceContext n : newContext ) { if ( ! oldContext . contains ( n ) ) { addDeployment ( n , obj , c ) ; } else { updateDeployment ( n , obj , c ) ; } }
public class HTTP { /** * Pushes request data to the open http connection * @ param connection - the open connection of the http call * @ param request - the parameters to be passed to the endpoint for the service * call */ private void writeJsonDataRequest ( HttpURLConnection connection , Request request ) { } }
try ( OutputStreamWriter wr = new OutputStreamWriter ( connection . getOutputStream ( ) ) ) { wr . write ( request . getJsonPayload ( ) . toString ( ) ) ; wr . flush ( ) ; } catch ( IOException e ) { log . error ( e ) ; }