signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class NewRelicManager { /** * Synchronise the Synthetics configuration with the cache . * @ param cache The provider cache * @ return < CODE > true < / CODE > if the operation was successful */ public boolean syncMonitors ( NewRelicCache cache ) { } }
boolean ret = true ; if ( apiClient == null ) throw new IllegalArgumentException ( "null API client" ) ; // Get the Synthetics configuration using the REST API if ( cache . isSyntheticsEnabled ( ) ) { ret = false ; logger . info ( "Getting the monitors" ) ; cache . monitors ( ) . add ( syntheticsApiClient . monitors ( ) . list ( ) ) ; cache . setUpdatedAt ( ) ; ret = true ; } return ret ;
public class CPOptionCategoryUtil { /** * Returns a range of all the cp option categories where groupId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPOptionCategoryModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param groupId the group ID * @ param start the lower bound of the range of cp option categories * @ param end the upper bound of the range of cp option categories ( not inclusive ) * @ return the range of matching cp option categories */ public static List < CPOptionCategory > findByGroupId ( long groupId , int start , int end ) { } }
return getPersistence ( ) . findByGroupId ( groupId , start , end ) ;
public class UniversalProjectReader { /** * Determine if the start of the buffer matches a fingerprint byte array . * @ param buffer bytes from file * @ param fingerprint fingerprint bytes * @ return true if the file matches the fingerprint */ private boolean matchesFingerprint ( byte [ ] buffer , byte [ ] fingerprint ) { } }
return Arrays . equals ( fingerprint , Arrays . copyOf ( buffer , fingerprint . length ) ) ;
public class Assert { /** * Assert a boolean expression , throwing an { @ code IllegalStateException } if the expression * evaluates to { @ code false } . * Call { @ link # isTrue } if you wish to throw an { @ code IllegalArgumentException } on an assertion * failure . * < pre class = " code " > * Assert . state ( id = = null , * ( ) - & gt ; " ID for " + entity . getName ( ) + " must not already be initialized " ) ; * < / pre > * @ param expression a boolean expression * @ param messageSupplier a supplier for the exception message to use if the assertion fails * @ throws IllegalStateException if { @ code expression } is { @ code false } * @ since 5.0 */ public static void state ( final boolean expression , final Supplier < String > messageSupplier ) { } }
if ( ! expression ) { throw new IllegalStateException ( Assert . nullSafeGet ( messageSupplier ) ) ; }
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcElectricDistributionPoint ( ) { } }
if ( ifcElectricDistributionPointEClass == null ) { ifcElectricDistributionPointEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 187 ) ; } return ifcElectricDistributionPointEClass ;
public class Offering { /** * Sanitizes a name by replacing every not alphanumeric character with ' _ ' * @ param name the name of the offerings * @ return the sanitized name */ public static String sanitizeName ( String name ) { } }
if ( name == null ) throw new NullPointerException ( "Parameter name cannot be null" ) ; name = name . trim ( ) ; if ( name . length ( ) == 0 ) throw new IllegalArgumentException ( "Parameter name cannot be empty" ) ; StringBuilder ret = new StringBuilder ( "" ) ; for ( int i = 0 ; i < name . length ( ) ; i ++ ) { char ch = name . charAt ( i ) ; if ( Character . isLetter ( ch ) || Character . isDigit ( ch ) ) { ret . append ( ch ) ; } else { ret . append ( "_" ) ; } } return ret . toString ( ) ;
public class WorkerPools { /** * region getCurrentInstanceProvider overloads */ public < T > Provider < T > getCurrentInstanceProvider ( Class < T > type ) { } }
return getCurrentInstanceProvider ( Key . get ( type ) ) ;
public class XCostExtension { /** * Assigns ( to the given event ) multiple amounts given their key lists . The * i - th element in the key list should correspond to an i - level attribute * with the prescribed key . Note that as a side effect this method creates * attributes when it does not find an attribute with the proper key . * For example , the call : * < pre > * assignAmounts ( event , [ [ [ a ] 10.00 ] [ [ a b ] 20.00 ] [ [ a c ] 30.00 ] [ [ b ] 15.00 ] [ [ c ] 25.00 ] ] ) * < / pre > * should result into the following XES fragment : * < pre > * { @ code * < event > * < string key = " a " value = " " > * < float key = " cost : amount " value = " 10.00 " / > * < string key = " b " value = " " > * < float key = " cost : amount " value = " 20.00 " / > * < / string > * < string key = " c " value = " " > * < float key = " cost : amount " value = " 30.00 " / > * < / string > * < / string > * < string key = " b " value = " " > * < float key = " cost : amount " value = " 15.00 " / > * < / string > * < string key = " c " value = " " > * < float key = " cost : amount " value = " 25.00 " / > * < / string > * < / event > * < / pre > * @ param event * Event to assign the amounts to . * @ param amounts * Mapping from key lists to amounts which are to be assigned . */ public void assignNestedAmounts ( XEvent event , Map < List < String > , Double > amounts ) { } }
XCostAmount . instance ( ) . assignNestedValues ( event , amounts ) ;
public class CmsDomUtil { /** * Checks if the given color value is transparent . < p > * @ param backgroundColor the color value * @ return < code > true < / code > if transparent */ private static boolean isTransparent ( String backgroundColor ) { } }
// not only check ' transparent ' but also ' rgba ( 0 , 0 , 0 , 0 ) ' as returned by chrome return StyleValue . transparent . toString ( ) . equalsIgnoreCase ( backgroundColor ) || "rgba(0, 0, 0, 0)" . equalsIgnoreCase ( backgroundColor ) ;
public class RNAUtils { /** * method to get the rna sequence of the given PolymerNotation * @ param one * PolymerNotation * @ return sequence * @ throws RNAUtilsException if the polymer is not a RNA / DNA * @ throws HELM2HandledException if it contains helm2 specific features can not be casted to HELM1 Format * @ throws ChemistryException if chemistry engine can not be initialized */ public static String getSequence ( PolymerNotation one ) throws RNAUtilsException , HELM2HandledException , ChemistryException { } }
checkRNA ( one ) ; List < Nucleotide > nucleotideList = getNucleotideList ( one ) ; StringBuffer sb = new StringBuffer ( ) ; for ( int i = 0 ; i < nucleotideList . size ( ) ; i ++ ) { sb . append ( nucleotideList . get ( i ) . getNaturalAnalog ( ) ) ; } return sb . toString ( ) ;
public class User { /** * Provides the { @ link Extension } with the given URN * @ param urn The URN of the extension * @ return The extension for the given URN * @ throws IllegalArgumentException If urn is null or empty * @ throws NoSuchElementException If extension with given urn is not available */ public Extension getExtension ( String urn ) { } }
if ( urn == null || urn . isEmpty ( ) ) { throw new IllegalArgumentException ( "urn must be neither null nor empty" ) ; } if ( ! extensions . containsKey ( urn ) ) { throw new NoSuchElementException ( "extension " + urn + " is not available" ) ; } return extensions . get ( urn ) ;
public class ScriptableObject { /** * Utility method to add properties to arbitrary Scriptable object . * If destination is instance of ScriptableObject , calls * defineProperty there , otherwise calls put in destination * ignoring attributes * @ param destination ScriptableObject to define the property on * @ param propertyName the name of the property to define . * @ param value the initial value of the property * @ param attributes the attributes of the JavaScript property */ public static void defineProperty ( Scriptable destination , String propertyName , Object value , int attributes ) { } }
if ( ! ( destination instanceof ScriptableObject ) ) { destination . put ( propertyName , destination , value ) ; return ; } ScriptableObject so = ( ScriptableObject ) destination ; so . defineProperty ( propertyName , value , attributes ) ;
public class LoadBalancerFrontendIPConfigurationsInner { /** * Gets all the load balancer frontend IP configurations . * @ param resourceGroupName The name of the resource group . * @ param loadBalancerName The name of the load balancer . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; FrontendIPConfigurationInner & gt ; object */ public Observable < Page < FrontendIPConfigurationInner > > listAsync ( final String resourceGroupName , final String loadBalancerName ) { } }
return listWithServiceResponseAsync ( resourceGroupName , loadBalancerName ) . map ( new Func1 < ServiceResponse < Page < FrontendIPConfigurationInner > > , Page < FrontendIPConfigurationInner > > ( ) { @ Override public Page < FrontendIPConfigurationInner > call ( ServiceResponse < Page < FrontendIPConfigurationInner > > response ) { return response . body ( ) ; } } ) ;
public class AmazonCloudWatchEventsClient { /** * Displays the external AWS accounts that are permitted to write events to your account using your account ' s event * bus , and the associated policy . To enable your account to receive events from other accounts , use * < a > PutPermission < / a > . * @ param describeEventBusRequest * @ return Result of the DescribeEventBus operation returned by the service . * @ throws ResourceNotFoundException * An entity that you specified does not exist . * @ throws InternalException * This exception occurs due to unexpected causes . * @ sample AmazonCloudWatchEvents . DescribeEventBus * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / events - 2015-10-07 / DescribeEventBus " target = " _ top " > AWS API * Documentation < / a > */ @ Override public DescribeEventBusResult describeEventBus ( DescribeEventBusRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeEventBus ( request ) ;
public class Version { /** * Creates a new Version from the three provided components . The version ' s pre release * and build meta data fields will be empty . Neither value must be lower than 0 and at * least one must be greater than zero * @ param major The major version . * @ param minor The minor version . * @ param patch The patch version . * @ return The version instance . */ public static final Version create ( int major , int minor , int patch ) { } }
return new Version ( major , minor , patch , EMPTY_ARRAY , EMPTY_ARRAY ) ;
public class Captions { /** * Source files for the input sidecar captions used during the transcoding process . To omit all sidecar captions , * leave < code > CaptionSources < / code > blank . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setCaptionSources ( java . util . Collection ) } or { @ link # withCaptionSources ( java . util . Collection ) } if you want * to override the existing values . * @ param captionSources * Source files for the input sidecar captions used during the transcoding process . To omit all sidecar * captions , leave < code > CaptionSources < / code > blank . * @ return Returns a reference to this object so that method calls can be chained together . */ @ Deprecated public Captions withCaptionSources ( CaptionSource ... captionSources ) { } }
if ( this . captionSources == null ) { setCaptionSources ( new com . amazonaws . internal . SdkInternalList < CaptionSource > ( captionSources . length ) ) ; } for ( CaptionSource ele : captionSources ) { this . captionSources . add ( ele ) ; } return this ;
public class ConciseSet { /** * { @ inheritDoc } */ @ Override public ConciseSet intersection ( IntSet other ) { } }
if ( isEmpty ( ) || other == null || other . isEmpty ( ) ) { return empty ( ) ; } if ( other == this ) { return clone ( ) ; } return performOperation ( convert ( other ) , Operator . AND ) ;
public class AWSRoboMakerClient { /** * Describes a simulation job . * @ param describeSimulationJobRequest * @ return Result of the DescribeSimulationJob operation returned by the service . * @ throws ResourceNotFoundException * The specified resource does not exist . * @ throws InvalidParameterException * A parameter specified in a request is not valid , is unsupported , or cannot be used . The returned message * provides an explanation of the error value . * @ throws InternalServerException * AWS RoboMaker experienced a service issue . Try your call again . * @ throws ThrottlingException * AWS RoboMaker is temporarily unable to process the request . Try your call again . * @ sample AWSRoboMaker . DescribeSimulationJob * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / robomaker - 2018-06-29 / DescribeSimulationJob " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeSimulationJobResult describeSimulationJob ( DescribeSimulationJobRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeSimulationJob ( request ) ;
public class DefaultCurieProvider { /** * ( non - Javadoc ) * @ see org . springframework . hateoas . hal . CurieProvider # getNamespacedRelFrom ( java . lang . String ) */ @ Override public HalLinkRelation getNamespacedRelFor ( LinkRelation relation ) { } }
HalLinkRelation result = HalLinkRelation . of ( relation ) ; return defaultCurie == null ? result : result . curieIfUncuried ( defaultCurie ) ;
public class DiscoveryMulticastResponder { /** * Start the responder ( if not already started ) */ public synchronized void start ( ) throws IOException { } }
if ( listenerThreads . size ( ) == 0 ) { List < InetAddress > addresses = hostAddress == null ? NetworkUtil . getMulticastAddresses ( ) : Arrays . asList ( hostAddress ) ; if ( addresses . size ( ) == 0 ) { logHandler . info ( "No suitable address found for listening on multicast discovery requests" ) ; return ; } // We start a thread for every address found for ( InetAddress addr : addresses ) { try { MulticastSocketListenerThread thread = new MulticastSocketListenerThread ( "JolokiaDiscoveryListenerThread-" + addr . getHostAddress ( ) , addr , detailsHolder , restrictor , logHandler ) ; thread . start ( ) ; listenerThreads . add ( thread ) ; } catch ( IOException exp ) { logHandler . info ( "Couldn't start discovery thread for " + addr + ": " + exp ) ; } } }
public class RandomUtil { /** * Picks a random object from the supplied iterator ( which must iterate over exactly * < code > count < / code > objects . The specified skip object will be skipped when selecting a * random value . The skipped object must exist exactly once in the set of objects returned by * the iterator . * @ return a randomly selected item . * @ exception NoSuchElementException thrown if the iterator provides fewer than * < code > count < / code > elements . */ public static < T > T pickRandom ( Iterator < T > iter , int count , T skip ) { } }
if ( count < 2 ) { throw new IllegalArgumentException ( "Must have at least two elements [count=" + count + "]" ) ; } int index = getInt ( count - 1 ) ; T value = null ; do { value = iter . next ( ) ; if ( value == skip ) { value = iter . next ( ) ; } } while ( index -- > 0 ) ; return value ;
public class Expression { /** * Applies a type conversion to this expression which is chained to all * previous conversions . * @ param toType the type to convert to . * @ param preferCast a hint that the conversion should be performed by a * type cast operation , by default is true . * @ throws IllegalArgumentException when the conversion is illegal . */ public void convertTo ( Type toType , boolean preferCast ) { } }
Type fromType = getType ( ) ; Type actual = Type . preserveType ( fromType , toType ) ; if ( actual . equals ( fromType ) ) { return ; } boolean legal = false ; if ( ! preferCast && fromType == Type . NULL_TYPE ) { preferCast = true ; } if ( fromType == null ) { legal = true ; } else if ( fromType . isPrimitive ( ) ) { if ( actual . isPrimitive ( ) ) { if ( actual . getNaturalClass ( ) != void . class ) { legal = true ; } } else { Class < ? > fromObj = fromType . getObjectClass ( ) ; Class < ? > toObj = actual . getObjectClass ( ) ; if ( toObj . isAssignableFrom ( fromObj ) ) { legal = true ; if ( fromObj != toObj ) { actual = fromType . toNonPrimitive ( ) ; } } else if ( Number . class . isAssignableFrom ( fromObj ) && actual . hasPrimitivePeer ( ) ) { if ( Number . class . isAssignableFrom ( toObj ) ) { legal = true ; convertTo ( actual . toPrimitive ( ) ) ; } else if ( Character . class . isAssignableFrom ( toObj ) ) { legal = true ; convertTo ( new Type ( char . class ) ) ; } } } } else { // From non - primitive . . . if ( actual . isPrimitive ( ) ) { if ( fromType . hasPrimitivePeer ( ) ) { legal = true ; if ( fromType . isNullable ( ) ) { // NullPointerException is possible . mExceptionPossible = true ; } Type fromPrim = fromType . toPrimitive ( ) ; if ( fromPrim . getNaturalClass ( ) != actual . getNaturalClass ( ) ) { convertTo ( fromPrim ) ; } } else { Class < ? > fromObj = fromType . getObjectClass ( ) ; Class < ? > toObj = actual . getObjectClass ( ) ; if ( Number . class . isAssignableFrom ( fromObj ) && Number . class . isAssignableFrom ( toObj ) ) { legal = true ; if ( fromType . isNullable ( ) ) { // NullPointerException is possible . mExceptionPossible = true ; } } else if ( preferCast ) { legal = true ; convertTo ( actual . toNonPrimitive ( ) , true ) ; } } } else { Class < ? > fromObj = fromType . getObjectClass ( ) ; Class < ? > toObj = actual . getObjectClass ( ) ; if ( fromObj . equals ( toObj ) ) { legal = true ; if ( fromType . isNonNull ( ) || ! actual . isNonNull ( ) ) { // No useful conversion applied , bail out . return ; } } else if ( fromObj . isAssignableFrom ( toObj ) ) { // Downcast . if ( preferCast ) { legal = true ; } } else if ( toObj . isAssignableFrom ( fromObj ) ) { // Upcast . legal = true ; if ( fromType . isNonNull ( ) || ! actual . isNonNull ( ) ) { // No useful conversion applied , bail out . return ; } } else if ( Number . class . isAssignableFrom ( fromObj ) && Number . class . isAssignableFrom ( toObj ) && actual . hasPrimitivePeer ( ) ) { // Conversion like Integer - > Double . legal = true ; if ( fromType . isNonNull ( ) ) { convertTo ( actual . toPrimitive ( ) , true ) ; } } // This test only captures array conversions . else if ( fromObj . getComponentType ( ) != null && toObj . getComponentType ( ) != null && actual . convertableFrom ( fromType ) >= 0 ) { legal = true ; if ( fromType . isNullable ( ) ) { // NullPointerException is possible . mExceptionPossible = true ; } } } } if ( ! legal ) { // Try String conversion . if ( actual . getNaturalClass ( ) . isAssignableFrom ( String . class ) ) { legal = true ; if ( actual . isNonNull ( ) ) { addConversion ( Type . NON_NULL_STRING_TYPE , false ) ; } else { addConversion ( Type . STRING_TYPE , false ) ; } } } if ( ! legal && ! preferCast && ! fromType . isPrimitive ( ) && ! actual . isPrimitive ( ) ) { // Even though a cast isn ' t preferred , its the last available // option . Class < ? > fromObj = fromType . getObjectClass ( ) ; Class < ? > toObj = actual . getObjectClass ( ) ; if ( fromObj . isAssignableFrom ( toObj ) ) { // Downcast . legal = true ; } else if ( toObj . isAssignableFrom ( fromObj ) ) { // Upcast . legal = true ; } } if ( legal ) { addConversion ( actual , preferCast ) ; } else { throw new IllegalArgumentException ( "Can't convert " + fromType + " to " + toType ) ; }
public class Expressions { /** * Create a new Path expression * @ param type type of expression * @ param variable variable name * @ return path expression */ public static < T > DslPath < T > dslPath ( Class < ? extends T > type , String variable ) { } }
return new DslPath < T > ( type , PathMetadataFactory . forVariable ( variable ) ) ;
public class Cache { /** * 删除哈希表 key 中的一个或多个指定域 , 不存在的域将被忽略 。 */ public Long hdel ( Object key , Object ... fields ) { } }
Jedis jedis = getJedis ( ) ; try { return jedis . hdel ( keyToBytes ( key ) , fieldsToBytesArray ( fields ) ) ; } finally { close ( jedis ) ; }
public class GridNode { /** * Get a window of values surrounding the current node . * < p > Notes : < / p > * < ul > * < li > the size has to be odd , so that the current node can be in the center . * If the size is even , size + 1 will be used . < / li > * < li > values outside the boundaries of the raster will be set to novalue . * No exception is thrown . < / li > * < / ul > * @ param size the size of the window . The window will be a matrix window [ size ] [ size ] . * @ param doCircular if < code > true < / code > the window values are set to novalue * were necessary to make it circular . * @ return the read window . */ public double [ ] [ ] getWindow ( int size , boolean doCircular ) { } }
if ( size % 2 == 0 ) { size ++ ; } double [ ] [ ] window = new double [ size ] [ size ] ; int delta = ( size - 1 ) / 2 ; if ( ! doCircular ) { for ( int c = - delta ; c <= delta ; c ++ ) { int tmpCol = col + c ; for ( int r = - delta ; r <= delta ; r ++ ) { int tmpRow = row + r ; GridNode n = new GridNode ( gridIter , cols , rows , xRes , yRes , tmpCol , tmpRow ) ; window [ r + delta ] [ c + delta ] = n . elevation ; } } } else { double radius = delta ; // rows + half cell for ( int c = - delta ; c <= delta ; c ++ ) { int tmpCol = col + c ; for ( int r = - delta ; r <= delta ; r ++ ) { int tmpRow = row + r ; double distance = sqrt ( c * c + r * r ) ; if ( distance <= radius ) { GridNode n = new GridNode ( gridIter , cols , rows , xRes , yRes , tmpCol , tmpRow ) ; window [ r + delta ] [ c + delta ] = n . elevation ; } else { window [ r + delta ] [ c + delta ] = doubleNovalue ; } } } } return window ;
public class HBaseVersion { /** * Prints out the HBase { @ link Version } enum value for the current version of HBase on the classpath . */ public static void main ( String [ ] args ) { } }
Version version = HBaseVersion . get ( ) ; System . out . println ( version . getMajorVersion ( ) ) ;
public class StopWatch { /** * Stop the current task . The results are undefined if timing methods are * called without invoking at least one pair { @ link # start ( ) } / * { @ link # stop ( ) } methods . * @ see # start ( ) */ public void stop ( ) throws IllegalStateException { } }
if ( ! this . running ) { throw new IllegalStateException ( "Can't stop StopWatch: it's not running" ) ; } long lastTime = System . currentTimeMillis ( ) - this . startTimeMillis ; this . totalTimeMillis += lastTime ; this . lastTaskInfo = new TaskInfo ( this . currentTaskName , lastTime ) ; if ( this . keepTaskList ) { this . taskList . add ( lastTaskInfo ) ; } ++ this . taskCount ; this . running = false ; this . currentTaskName = null ;
public class OAuth2SessionRef { /** * Use the refresh token to get a new token with a longer lifespan */ public synchronized void refreshToken ( ) { } }
final String refreshToken = this . response . refresh_token ; this . response = null ; this . cachedInfo = null ; final String responseStr = authService . getToken ( UserManagerOAuthService . GRANT_TYPE_REFRESH_TOKEN , null , null , clientId , clientSecret , refreshToken , null , null , null ) ; loadAuthResponse ( responseStr ) ;
public class ComputationGraph { /** * Return the layer size ( number of units ) for the specified layer . * Note that the meaning of the " layer size " can depend on the type of layer . For example : < br > * - DenseLayer , OutputLayer , recurrent layers : number of units ( nOut configuration option ) < br > * - ConvolutionLayer : the channels ( number of channels ) < br > * - Subsampling layers , global pooling layers , etc : size of 0 is always returned < br > * @ param layer Index of the layer to get the size of . Must be in range 0 to nLayers - 1 inclusive * @ return Size of the layer */ public int layerSize ( int layer ) { } }
if ( layer < 0 || layer > layers . length ) { throw new IllegalArgumentException ( "Invalid layer index: " + layer + ". Layer index must be between 0 and " + ( layers . length - 1 ) + " inclusive" ) ; } return layerSize ( layers [ layer ] . conf ( ) . getLayer ( ) . getLayerName ( ) ) ;
public class PreviousSnapshotsCalculator { /** * Returns a Map from snapshot . id to snapshot with this id . * The Map contains entries for all previous snapshots . * I . e , for each snapshot S from a given list , there is a map entry for S . id . previous ( ) */ Map < SnapshotIdentifier , CdoSnapshot > calculate ( List < CdoSnapshot > snapshots ) { } }
Map < SnapshotIdentifier , CdoSnapshot > previousSnapshots = new HashMap < > ( ) ; populatePreviousSnapshotsWithSnapshots ( previousSnapshots , snapshots ) ; List < CdoSnapshot > missingPreviousSnapshots = getMissingPreviousSnapshots ( snapshots , previousSnapshots ) ; populatePreviousSnapshotsWithSnapshots ( previousSnapshots , missingPreviousSnapshots ) ; return previousSnapshots ;
public class DataProviderFactory { /** * Creates an instance of the specified class using the specified < code > ClassLoader < / code > object . * @ throws ClassNotFoundException if the given class could not be found or could not be instantiated */ private static Class < ? > findClass ( String className , ClassLoader classLoader ) throws ClassNotFoundException { } }
try { Class < ? > spiClass ; if ( classLoader == null ) { spiClass = Class . forName ( className ) ; } else { try { spiClass = Class . forName ( className , false , classLoader ) ; } catch ( ClassNotFoundException ex ) { spiClass = Class . forName ( className ) ; } } return spiClass ; } catch ( ClassNotFoundException x ) { throw x ; } catch ( Exception x ) { throw new ClassNotFoundException ( "Factory " + className + " could not be instantiated: " + x , x ) ; }
public class AutoZone { /** * async */ void preQueryIndex ( final ZoneIndex index , final QueryHandler complete ) { } }
if ( index == null ) { complete . onFailure ( ResponseInfo . InvalidToken ) ; return ; } ZoneInfo info = zones . get ( index ) ; if ( info != null ) { complete . onSuccess ( ) ; return ; } getZoneJsonAsync ( index , new CompletionHandler ( ) { @ Override public void complete ( ResponseInfo info , JSONObject response ) { if ( info . isOK ( ) && response != null ) { try { ZoneInfo info2 = ZoneInfo . buildFromJson ( response ) ; zones . put ( index , info2 ) ; complete . onSuccess ( ) ; return ; } catch ( JSONException e ) { e . printStackTrace ( ) ; complete . onFailure ( ResponseInfo . NetworkError ) ; return ; } } complete . onFailure ( info . statusCode ) ; } } ) ;
public class FileUtil { /** * 创建文件及其父目录 , 如果这个文件存在 , 直接返回这个文件 < br > * 此方法不对File对象类型做判断 , 如果File不存在 , 无法判断其类型 * @ param fullFilePath 文件的全路径 , 使用POSIX风格 * @ return 文件 , 若路径为null , 返回null * @ throws IORuntimeException IO异常 */ public static File touch ( String fullFilePath ) throws IORuntimeException { } }
if ( fullFilePath == null ) { return null ; } return touch ( file ( fullFilePath ) ) ;
public class JDBCStorageConnection { /** * Delete Property Values . * @ param cid * Property id * @ param pdata * PropertyData * @ param update * boolean true if it ' s delete - add sequence ( update operation ) * @ param sizeHandler * accumulates changed size * @ throws IOException * i / O error * @ throws SQLException * if database error occurs * @ throws RepositoryException * @ throws InvalidItemStateException */ private void deleteValues ( String cid , PropertyData pdata , boolean update , ChangedSizeHandler sizeHandler ) throws IOException , SQLException , RepositoryException , InvalidItemStateException { } }
Set < String > storages = new HashSet < String > ( ) ; final ResultSet valueRecords = findValueStorageDescAndSize ( cid ) ; try { if ( valueRecords . next ( ) ) { do { final String storageId = valueRecords . getString ( COLUMN_VSTORAGE_DESC ) ; if ( ! valueRecords . wasNull ( ) ) { storages . add ( storageId ) ; } else { sizeHandler . accumulatePrevSize ( valueRecords . getLong ( 1 ) ) ; } } while ( valueRecords . next ( ) ) ; } // delete all values in value storage for ( String storageId : storages ) { final ValueIOChannel channel = this . containerConfig . valueStorageProvider . getChannel ( storageId ) ; try { sizeHandler . accumulatePrevSize ( channel . getValueSize ( pdata . getIdentifier ( ) ) ) ; channel . delete ( pdata . getIdentifier ( ) ) ; valueChanges . add ( channel ) ; } finally { channel . close ( ) ; } } // delete all Values in database deleteValueData ( cid ) ; } finally { JDBCUtils . freeResources ( valueRecords , null , null ) ; }
public class Util { /** * Count the number of JDBC parameters in a sql statement . * @ param query * . sql ( ) * @ return */ static int parametersCount ( Query query ) { } }
if ( query . names ( ) . isEmpty ( ) ) return countQuestionMarkParameters ( query . sql ( ) ) ; else return query . names ( ) . size ( ) ;
public class Configuration { /** * Check to see if the size is valid for any of the images types * @ param sizeToCheck * @ return */ public boolean isValidSize ( String sizeToCheck ) { } }
return isValidPosterSize ( sizeToCheck ) || isValidBackdropSize ( sizeToCheck ) || isValidProfileSize ( sizeToCheck ) || isValidLogoSize ( sizeToCheck ) ;
public class FdfWriter { /** * Removes the field value . * @ param field the field name * @ return < CODE > true < / CODE > if the field was found and removed , * < CODE > false < / CODE > otherwise */ public boolean removeField ( String field ) { } }
HashMap map = fields ; StringTokenizer tk = new StringTokenizer ( field , "." ) ; if ( ! tk . hasMoreTokens ( ) ) return false ; ArrayList hist = new ArrayList ( ) ; while ( true ) { String s = tk . nextToken ( ) ; Object obj = map . get ( s ) ; if ( obj == null ) return false ; hist . add ( map ) ; hist . add ( s ) ; if ( tk . hasMoreTokens ( ) ) { if ( obj instanceof HashMap ) map = ( HashMap ) obj ; else return false ; } else { if ( obj instanceof HashMap ) return false ; else break ; } } for ( int k = hist . size ( ) - 2 ; k >= 0 ; k -= 2 ) { map = ( HashMap ) hist . get ( k ) ; String s = ( String ) hist . get ( k + 1 ) ; map . remove ( s ) ; if ( ! map . isEmpty ( ) ) break ; } return true ;
public class AmazonRoute53ResolverClient { /** * Adds IP addresses to an inbound or an outbound resolver endpoint . If you want to adding more than one IP address , * submit one < code > AssociateResolverEndpointIpAddress < / code > request for each IP address . * To remove an IP address from an endpoint , see < a > DisassociateResolverEndpointIpAddress < / a > . * @ param associateResolverEndpointIpAddressRequest * @ return Result of the AssociateResolverEndpointIpAddress operation returned by the service . * @ throws ResourceNotFoundException * The specified resource doesn ' t exist . * @ throws InvalidParameterException * One or more parameters in this request are not valid . * @ throws InvalidRequestException * The request is invalid . * @ throws ResourceExistsException * The resource that you tried to create already exists . * @ throws InternalServiceErrorException * We encountered an unknown error . Try again in a few minutes . * @ throws LimitExceededException * The request caused one or more limits to be exceeded . * @ throws ThrottlingException * The request was throttled . Try again in a few minutes . * @ sample AmazonRoute53Resolver . AssociateResolverEndpointIpAddress * @ see < a * href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53resolver - 2018-04-01 / AssociateResolverEndpointIpAddress " * target = " _ top " > AWS API Documentation < / a > */ @ Override public AssociateResolverEndpointIpAddressResult associateResolverEndpointIpAddress ( AssociateResolverEndpointIpAddressRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeAssociateResolverEndpointIpAddress ( request ) ;
public class EmailIntents { /** * Create an intent to send an email with an attachment to a single recipient * @ param address The recipient address ( or null if not specified ) * @ param subject The subject of the email ( or null if not specified ) * @ param body The body of the email ( or null if not specified ) * @ param attachment The URI of a file to attach to the email . Note that the URI must point to a location the email * application is allowed to read and has permissions to access . * @ return the intent */ public static Intent newEmailIntent ( String address , String subject , String body , Uri attachment ) { } }
return newEmailIntent ( address == null ? null : new String [ ] { address } , subject , body , attachment ) ;
public class DRUMS { /** * Determines the number of elements in each buckets by opening all files . * @ return the number of elements in the database . * @ throws IOException * @ throws FileLockException */ public long size ( ) throws FileLockException , IOException { } }
long size = 0L ; for ( int bucketId = 0 ; bucketId < hashFunction . getNumberOfBuckets ( ) ; bucketId ++ ) { HeaderIndexFile < Data > headerIndexFile = new HeaderIndexFile < Data > ( gp . DATABASE_DIRECTORY + "/" + hashFunction . getFilename ( bucketId ) , gp . HEADER_FILE_LOCK_RETRY , gp ) ; size += headerIndexFile . getFilledUpFromContentStart ( ) / gp . getElementSize ( ) ; headerIndexFile . close ( ) ; } return size ;
public class FDBigInteger { /** * @ requires p5 > = 0 & & p2 > = 0; * @ assignable \ nothing ; * @ ensures \ result . value ( ) = = \ old ( pow52 ( p5 , p2 ) ) ; */ public static FDBigInteger valueOfPow52 ( int p5 , int p2 ) { } }
if ( p5 != 0 ) { if ( p2 == 0 ) { return big5pow ( p5 ) ; } else if ( p5 < SMALL_5_POW . length ) { int pow5 = SMALL_5_POW [ p5 ] ; int wordcount = p2 >> 5 ; int bitcount = p2 & 0x1f ; if ( bitcount == 0 ) { return new FDBigInteger ( new int [ ] { pow5 } , wordcount ) ; } else { return new FDBigInteger ( new int [ ] { pow5 << bitcount , pow5 >>> ( 32 - bitcount ) } , wordcount ) ; } } else { return big5pow ( p5 ) . leftShift ( p2 ) ; } } else { return valueOfPow2 ( p2 ) ; }
public class WFileWidgetRenderer { /** * Paints the given WFileWidget . * @ param component the WFileWidget to paint . * @ param renderContext the RenderContext to paint to . */ @ Override public void doRender ( final WComponent component , final WebXmlRenderContext renderContext ) { } }
WFileWidget fileWidget = ( WFileWidget ) component ; XmlStringBuilder xml = renderContext . getWriter ( ) ; boolean readOnly = fileWidget . isReadOnly ( ) ; xml . appendTagOpen ( TAG_NAME ) ; xml . appendAttribute ( "id" , component . getId ( ) ) ; xml . appendOptionalAttribute ( "class" , component . getHtmlClass ( ) ) ; xml . appendOptionalAttribute ( "track" , component . isTracking ( ) , "true" ) ; xml . appendOptionalAttribute ( "hidden" , component . isHidden ( ) , "true" ) ; if ( readOnly ) { xml . appendAttribute ( "readOnly" , "true" ) ; xml . appendEnd ( ) ; return ; } xml . appendOptionalAttribute ( "disabled" , fileWidget . isDisabled ( ) , "true" ) ; xml . appendOptionalAttribute ( "required" , fileWidget . isMandatory ( ) , "true" ) ; xml . appendOptionalAttribute ( "toolTip" , fileWidget . getToolTip ( ) ) ; xml . appendOptionalAttribute ( "accessibleText" , fileWidget . getAccessibleText ( ) ) ; xml . appendOptionalAttribute ( "acceptedMimeTypes" , typesToString ( fileWidget . getFileTypes ( ) ) ) ; long maxFileSize = fileWidget . getMaxFileSize ( ) ; xml . appendOptionalAttribute ( "maxFileSize" , maxFileSize > 0 , maxFileSize ) ; List < Diagnostic > diags = fileWidget . getDiagnostics ( Diagnostic . ERROR ) ; if ( diags == null || diags . isEmpty ( ) ) { xml . appendEnd ( ) ; return ; } xml . appendClose ( ) ; DiagnosticRenderUtil . renderDiagnostics ( fileWidget , renderContext ) ; xml . appendEndTag ( TAG_NAME ) ;
public class DomainEntry { /** * ( Deprecated ) The options for the domain entry . * < note > * In releases prior to November 29 , 2017 , this parameter was not included in the API response . It is now * deprecated . * < / note > * @ param options * ( Deprecated ) The options for the domain entry . < / p > < note > * In releases prior to November 29 , 2017 , this parameter was not included in the API response . It is now * deprecated . * @ return Returns a reference to this object so that method calls can be chained together . */ @ Deprecated public DomainEntry withOptions ( java . util . Map < String , String > options ) { } }
setOptions ( options ) ; return this ;
public class TorrentInfo { /** * This function will map a range in a specific file into a range in the torrent . * The { @ code offset } parameter is the offset in the file , given in bytes , where * 0 is the start of the file . * The input range is assumed to be valid within the torrent . { @ code offset + size } * is not allowed to be greater than the file size . { @ code index } * must refer to a valid file , i . e . it cannot be { @ code > = numFiles ( ) } . * @ param file * @ param offset * @ param size * @ return * @ see com . frostwire . jlibtorrent . PeerRequest */ public PeerRequest mapFile ( int file , long offset , int size ) { } }
return new PeerRequest ( ti . map_file ( file , offset , size ) ) ;
public class AmazonIdentityManagementClient { /** * Retrieves the specified inline policy document that is embedded in the specified IAM user . * < note > * Policies returned by this API are URL - encoded compliant with < a href = " https : / / tools . ietf . org / html / rfc3986 " > RFC * 3986 < / a > . You can use a URL decoding method to convert the policy back to plain JSON text . For example , if you * use Java , you can use the < code > decode < / code > method of the < code > java . net . URLDecoder < / code > utility class in the * Java SDK . Other languages and SDKs provide similar functionality . * < / note > * An IAM user can also have managed policies attached to it . To retrieve a managed policy document that is attached * to a user , use < a > GetPolicy < / a > to determine the policy ' s default version . Then use < a > GetPolicyVersion < / a > to * retrieve the policy document . * For more information about policies , see < a * href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / policies - managed - vs - inline . html " > Managed Policies and * Inline Policies < / a > in the < i > IAM User Guide < / i > . * @ param getUserPolicyRequest * @ return Result of the GetUserPolicy operation returned by the service . * @ throws NoSuchEntityException * The request was rejected because it referenced a resource entity that does not exist . The error message * describes the resource . * @ throws ServiceFailureException * The request processing has failed because of an unknown error , exception or failure . * @ sample AmazonIdentityManagement . GetUserPolicy * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / iam - 2010-05-08 / GetUserPolicy " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetUserPolicyResult getUserPolicy ( GetUserPolicyRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetUserPolicy ( request ) ;
public class DefaultCacheContainer { /** * { @ inheritDoc } * @ see org . infinispan . manager . EmbeddedCacheManager # getCache ( java . lang . String , boolean ) */ @ Override public < K , V > Cache < K , V > getCache ( String cacheName , boolean start ) { } }
Cache < K , V > cache = this . cm . getCache ( this . getCacheName ( cacheName ) , start ) ; return ( cache != null ) ? new DelegatingCache < > ( cache ) : null ;
public class DatabaseJournal { /** * { @ inheritDoc } */ public void init ( String id , NamespaceResolver resolver ) throws JournalException { } }
super . init ( id , resolver ) ; init ( ) ; try { conHelper = createConnectionHelper ( getDataSource ( ) ) ; // make sure schemaObjectPrefix consists of legal name characters only schemaObjectPrefix = conHelper . prepareDbIdentifier ( schemaObjectPrefix ) ; // check if schema objects exist and create them if necessary if ( isSchemaCheckEnabled ( ) ) { createCheckSchemaOperation ( ) . run ( ) ; } // Make sure that the LOCAL _ REVISIONS table exists ( see JCR - 1087) if ( isSchemaCheckEnabled ( ) ) { checkLocalRevisionSchema ( ) ; } buildSQLStatements ( ) ; initInstanceRevisionAndJanitor ( ) ; } catch ( Exception e ) { String msg = "Unable to create connection." ; throw new JournalException ( msg , e ) ; } log . info ( "DatabaseJournal initialized." ) ;
public class MappedBuffer { /** * Allocates a mapped buffer . * Memory will be mapped by opening and expanding the given { @ link java . io . File } to the desired { @ code count } and mapping the * file contents into memory via { @ link java . nio . channels . FileChannel # map ( java . nio . channels . FileChannel . MapMode , long , long ) } . * The resulting buffer will have a capacity of { @ code initialCapacity } . The underlying { @ link UnsafeMappedBytes } will be * initialized to the next power of { @ code 2 } . As bytes are written to the buffer , the buffer ' s capacity will double * as long as { @ code maxCapacity > capacity } . * @ param file The file to map into memory . If the file doesn ' t exist it will be automatically created . * @ param initialCapacity The initial capacity of the buffer . * @ param maxCapacity The maximum capacity of the buffer . * @ return The mapped buffer . * @ throws NullPointerException If { @ code file } is { @ code null } * @ throws IllegalArgumentException If the { @ code capacity } or { @ code maxCapacity } is greater than * { @ link Integer # MAX _ VALUE } . * @ see # allocate ( java . io . File ) * @ see # allocate ( java . io . File , java . nio . channels . FileChannel . MapMode ) * @ see # allocate ( java . io . File , long ) * @ see # allocate ( java . io . File , java . nio . channels . FileChannel . MapMode , long ) * @ see # allocate ( java . io . File , java . nio . channels . FileChannel . MapMode , long , long ) */ public static MappedBuffer allocate ( File file , long initialCapacity , long maxCapacity ) { } }
return allocate ( file , FileChannel . MapMode . READ_WRITE , initialCapacity , maxCapacity ) ;
public class PartitionRequestClient { /** * Sends a task event backwards to an intermediate result partition producer . * Backwards task events flow between readers and writers and therefore * will only work when both are running at the same time , which is only * guaranteed to be the case when both the respective producer and * consumer task run pipelined . */ public void sendTaskEvent ( ResultPartitionID partitionId , TaskEvent event , final RemoteInputChannel inputChannel ) throws IOException { } }
checkNotClosed ( ) ; tcpChannel . writeAndFlush ( new TaskEventRequest ( event , partitionId , inputChannel . getInputChannelId ( ) ) ) . addListener ( new ChannelFutureListener ( ) { @ Override public void operationComplete ( ChannelFuture future ) throws Exception { if ( ! future . isSuccess ( ) ) { SocketAddress remoteAddr = future . channel ( ) . remoteAddress ( ) ; inputChannel . onError ( new LocalTransportException ( String . format ( "Sending the task event to '%s' failed." , remoteAddr ) , future . channel ( ) . localAddress ( ) , future . cause ( ) ) ) ; } } } ) ;
public class DatePicker { /** * Set the range of selectable dates . * @ param minDay The day value of minimum date . * @ param minMonth The month value of minimum date . * @ param minYear The year value of minimum date . * @ param maxDay The day value of maximum date . * @ param maxMonth The month value of maximum date . * @ param maxYear The year value of maximum date . */ public void setDateRange ( int minDay , int minMonth , int minYear , int maxDay , int maxMonth , int maxYear ) { } }
mAdapter . setDateRange ( minDay , minMonth , minYear , maxDay , maxMonth , maxYear ) ;
public class PmiRegistry { /** * return the PerfLevelDescriptor for each module / instance / submodule / subinstance during runtime . */ public static PerfLevelDescriptor [ ] getAllInstrumentationLevels ( ) { } }
if ( disabled ) return null ; ArrayList res = moduleRoot . getPerfLevelDescriptors ( false ) ; // create returned array PerfLevelDescriptor [ ] retArray = new PerfLevelDescriptor [ res . size ( ) ] ; for ( int i = 0 ; i < retArray . length ; i ++ ) retArray [ i ] = ( PerfLevelDescriptor ) res . get ( i ) ; return retArray ;
public class DeprecationStatus { /** * Returns the timestamp ( in milliseconds since epoch ) on or after which the deprecation state of * this resource will be changed to { @ link Status # OBSOLETE } . Returns { @ code null } if not set . * @ throws IllegalStateException if { @ link # getObsolete ( ) } is not a valid date , time or datetime */ public Long getObsoleteMillis ( ) { } }
try { return obsolete != null ? TIMESTAMP_FORMATTER . parse ( obsolete , Instant . FROM ) . toEpochMilli ( ) : null ; } catch ( DateTimeParseException ex ) { throw new IllegalStateException ( ex . getMessage ( ) , ex ) ; }
public class JdbcKAMLoaderImpl { /** * { @ inheritDoc } */ @ Override public void loadAnnotationValues ( AnnotationValueTable avt ) throws SQLException { } }
PreparedStatement aps = getPreparedStatement ( ANNOTATION_SQL ) ; Set < Entry < Integer , TableAnnotationValue > > annotationEntries = avt . getIndexValue ( ) . entrySet ( ) ; for ( Entry < Integer , TableAnnotationValue > annotationEntry : annotationEntries ) { aps . setInt ( 1 , ( annotationEntry . getKey ( ) + 1 ) ) ; TableAnnotationValue tableValue = annotationEntry . getValue ( ) ; String value = tableValue . getAnnotationValue ( ) ; int oid ; Integer objectId = valueIndexMap . get ( value ) ; if ( objectId != null ) { oid = objectId ; } else { oid = saveObject ( 1 , value ) ; valueIndexMap . put ( value , oid ) ; } aps . setInt ( 2 , oid ) ; aps . setInt ( 3 , ( tableValue . getAnnotationDefinitionId ( ) + 1 ) ) ; aps . addBatch ( ) ; } aps . executeBatch ( ) ;
public class ResponseDownloadPerformer { protected void writeDownloadStream ( InputStream ins , OutputStream out ) throws IOException { } }
try { fromInputStreamToOutputStream ( ins , out ) ; } catch ( IOException e ) { throwDownloadIOException ( e ) ; }
public class StopBackupJobRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( StopBackupJobRequest stopBackupJobRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( stopBackupJobRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( stopBackupJobRequest . getBackupJobId ( ) , BACKUPJOBID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DatatypeConverter { /** * Prints a currency symbol position value . * @ param value CurrencySymbolPosition instance * @ return currency symbol position */ public static final String printCurrencySymbolPosition ( CurrencySymbolPosition value ) { } }
String result ; switch ( value ) { default : case BEFORE : { result = "0" ; break ; } case AFTER : { result = "1" ; break ; } case BEFORE_WITH_SPACE : { result = "2" ; break ; } case AFTER_WITH_SPACE : { result = "3" ; break ; } } return ( result ) ;
public class GISLayerWriter { /** * Flush temp buffers , write down final information in * file header ( file size . . . ) , and close the streams . * @ throws IOException in case of error . */ @ SuppressWarnings ( "resource" ) @ Override public void close ( ) throws IOException { } }
if ( this . tmpFile . canRead ( ) ) { try { this . tmpOutput . close ( ) ; try ( WritableByteChannel out = Channels . newChannel ( this . output ) ) { try ( ReadableByteChannel in = Channels . newChannel ( new FileInputStream ( this . tmpFile ) ) ) { // Write the header final int limit = HEADER_KEY . getBytes ( ) . length + 6 ; final ByteBuffer hBuffer = ByteBuffer . allocate ( limit ) ; hBuffer . limit ( limit ) ; in . read ( hBuffer ) ; hBuffer . position ( HEADER_KEY . getBytes ( ) . length + 2 ) ; hBuffer . putInt ( this . length ) ; hBuffer . rewind ( ) ; out . write ( hBuffer ) ; // Write the objects final ByteBuffer buffer = ByteBuffer . allocate ( 4096 ) ; int read ; while ( ( read = in . read ( buffer ) ) >= 0 ) { buffer . rewind ( ) ; buffer . limit ( read ) ; out . write ( buffer ) ; buffer . rewind ( ) ; buffer . limit ( buffer . capacity ( ) ) ; } } } } finally { this . tmpFile . delete ( ) ; } }
public class SasFileParser { /** * Put next page to cache and read it ' s header . * @ throws IOException if reading from the { @ link SasFileParser # sasFileStream } string is impossible . */ private void processNextPage ( ) throws IOException { } }
int bitOffset = sasFileProperties . isU64 ( ) ? PAGE_BIT_OFFSET_X64 : PAGE_BIT_OFFSET_X86 ; currentPageDataSubheaderPointers . clear ( ) ; try { sasFileStream . readFully ( cachedPage , 0 , sasFileProperties . getPageLength ( ) ) ; } catch ( EOFException ex ) { eof = true ; return ; } readPageHeader ( ) ; if ( currentPageType == PAGE_META_TYPE_1 || currentPageType == PAGE_META_TYPE_2 || currentPageType == PAGE_AMD_TYPE ) { List < SubheaderPointer > subheaderPointers = new ArrayList < SubheaderPointer > ( ) ; processPageMetadata ( bitOffset , subheaderPointers ) ; if ( currentPageType == PAGE_AMD_TYPE ) { processMissingColumnInfo ( ) ; } }
public class DomXmlMessageValidator { /** * Validate namespaces in message . The method compares namespace declarations in the root * element of the received message to expected namespaces . Prefixes are important too , so * differing namespace prefixes will fail the validation . * @ param expectedNamespaces * @ param receivedMessage */ protected void validateNamespaces ( Map < String , String > expectedNamespaces , Message receivedMessage ) { } }
if ( CollectionUtils . isEmpty ( expectedNamespaces ) ) { return ; } if ( receivedMessage . getPayload ( ) == null || ! StringUtils . hasText ( receivedMessage . getPayload ( String . class ) ) ) { throw new ValidationException ( "Unable to validate message namespaces - receive message payload was empty" ) ; } log . debug ( "Start XML namespace validation" ) ; Document received = XMLUtils . parseMessagePayload ( receivedMessage . getPayload ( String . class ) ) ; Map < String , String > foundNamespaces = XMLUtils . lookupNamespaces ( receivedMessage . getPayload ( String . class ) ) ; if ( foundNamespaces . size ( ) != expectedNamespaces . size ( ) ) { throw new ValidationException ( "Number of namespace declarations not equal for node " + XMLUtils . getNodesPathName ( received . getFirstChild ( ) ) + " found " + foundNamespaces . size ( ) + " expected " + expectedNamespaces . size ( ) ) ; } for ( Entry < String , String > entry : expectedNamespaces . entrySet ( ) ) { String namespace = entry . getKey ( ) ; String url = entry . getValue ( ) ; if ( foundNamespaces . containsKey ( namespace ) ) { if ( ! foundNamespaces . get ( namespace ) . equals ( url ) ) { throw new ValidationException ( "Namespace '" + namespace + "' values not equal: found '" + foundNamespaces . get ( namespace ) + "' expected '" + url + "' in reference node " + XMLUtils . getNodesPathName ( received . getFirstChild ( ) ) ) ; } else { if ( log . isDebugEnabled ( ) ) { log . debug ( "Validating namespace " + namespace + " value as expected " + url + " - value OK" ) ; } } } else { throw new ValidationException ( "Missing namespace " + namespace + "(" + url + ") in node " + XMLUtils . getNodesPathName ( received . getFirstChild ( ) ) ) ; } } log . info ( "XML namespace validation successful: All values OK" ) ;
public class FastForward { /** * Initialization method for a FastForward client . * @ return A new instance of a FastForward client . * @ throws SocketException If a datagram socket cannot be created . */ public static FastForward setup ( InetAddress addr , int port ) throws SocketException { } }
final DatagramSocket socket = new DatagramSocket ( ) ; return new FastForward ( addr , port , socket ) ;
public class ProbeHandlerThread { /** * Handle the probe . */ public void run ( ) { } }
ResponseWrapper response = null ; LOGGER . info ( "Received probe id: " + probe . getProbeId ( ) ) ; // Only handle probes that we haven ' t handled before // The Probe Generator needs to send a stream of identical UDP packets // to compensate for UDP reliability issues . Therefore , the Responder // will likely get more than 1 identical probe . We should ignore // duplicates . if ( ! isProbeHandled ( probe . getProbeId ( ) ) ) { if ( this . noBrowser && probe . isNaked ( ) ) { LOGGER . warn ( "Responder set to noBrowser mode. Discarding naked probe with id [" + probe . getProbeId ( ) + "]" ) ; } else { for ( ProbeHandlerPlugin handler : handlers ) { response = handler . handleProbeEvent ( probe ) ; if ( ! response . isEmpty ( ) ) { LOGGER . debug ( "Response to probe [" + probe . getProbeId ( ) + "] includes " + response . numberOfServices ( ) ) ; Iterator < RespondToURL > respondToURLs = probe . getRespondToURLs ( ) . iterator ( ) ; if ( probe . getRespondToURLs ( ) . isEmpty ( ) ) LOGGER . warn ( "Processed probe [" + probe . getProbeId ( ) + "] with no respondTo address. That's odd." ) ; if ( respondToURLs . hasNext ( ) ) { RespondToURL respondToURL = respondToURLs . next ( ) ; // we are ignoring the label for now boolean success = sendResponse ( respondToURL . url , probe . getRespondToPayloadType ( ) , response ) ; if ( ! success ) { LOGGER . warn ( "Issue sending probe [" + probe . getProbeId ( ) + "] response to [" + respondToURL . url + "]" ) ; } } } else { LOGGER . error ( "Response to probe [" + probe . getProbeId ( ) + "] is empty. Not sending empty response." ) ; } } } markProbeAsHandled ( probe . getProbeId ( ) ) ; responder . probeProcessed ( ) ; } else { LOGGER . info ( "Discarding duplicate/handled probe with id: " + probe . getProbeId ( ) ) ; }
public class FloatArray { /** * Converts this < code > FloatArray < / code > to an array of floats . * @ return An array of floats containing the same elements as this array . */ public float [ ] toFloatArray ( ) { } }
float [ ] copy = new float [ size ] ; for ( int i = 0 ; i < size ; i ++ ) { copy [ i ] = elements [ i ] ; } return copy ;
public class LimesurveyRC { /** * Check if a survey is active . * @ param surveyId the survey id of the survey you want to check * @ return true if the survey is active * @ throws LimesurveyRCException the limesurvey rc exception */ public boolean isSurveyActive ( int surveyId ) throws LimesurveyRCException { } }
LsApiBody . LsApiParams params = getParamsWithKey ( surveyId ) ; List < String > surveySettings = new ArrayList < > ( ) ; surveySettings . add ( "active" ) ; params . setSurveySettings ( surveySettings ) ; return "Y" . equals ( callRC ( new LsApiBody ( "get_survey_properties" , params ) ) . getAsJsonObject ( ) . get ( "active" ) . getAsString ( ) ) ;
public class DiscreteFourierTransformOps { /** * Computes the magnitude of the complex image : < br > * magnitude = sqrt ( real < sup > 2 < / sup > + imaginary < sup > 2 < / sup > ) * @ param transform ( Input ) Complex interleaved image * @ param magnitude ( Output ) Magnitude of image */ public static void magnitude ( InterleavedF32 transform , GrayF32 magnitude ) { } }
checkImageArguments ( magnitude , transform ) ; for ( int y = 0 ; y < transform . height ; y ++ ) { int indexTran = transform . startIndex + y * transform . stride ; int indexMag = magnitude . startIndex + y * magnitude . stride ; for ( int x = 0 ; x < transform . width ; x ++ , indexTran += 2 ) { float real = transform . data [ indexTran ] ; float img = transform . data [ indexTran + 1 ] ; magnitude . data [ indexMag ++ ] = ( float ) Math . sqrt ( real * real + img * img ) ; } }
public class TableWriterServiceImpl { /** * Writes a blob page to the current output segment . * @ param page the blob to be written * @ param saveSequence * @ return true on completion */ @ InService ( TableWriterService . class ) public void writeBlobPage ( PageBlob page , int saveSequence , Result < Integer > result ) { } }
SegmentStream sOut = getBlobStream ( ) ; int saveLength = 0 ; int saveTail = 0 ; if ( _blobSizeMax < page . getLength ( ) ) { _blobSizeMax = page . getLength ( ) ; calculateSegmentSize ( ) ; } sOut . writePage ( this , page , saveLength , saveTail , saveSequence , result ) ; _isBlobDirty = true ;
public class AbstractVariantEndpoint { /** * Delete Variant * @ param variantId id of { @ link Variant } * @ return no content or 404 * @ statuscode 204 The Variant successfully deleted * @ statuscode 400 The requested Variant resource exists but it is not of the given type * @ statuscode 404 The requested Variant resource does not exist */ @ DELETE @ Path ( "/{variantId}" ) public Response deleteVariant ( @ PathParam ( "variantId" ) String variantId ) { } }
Variant variant = variantService . findByVariantID ( variantId ) ; if ( variant == null ) { return Response . status ( Response . Status . NOT_FOUND ) . entity ( "Could not find requested Variant" ) . build ( ) ; } if ( ! type . isInstance ( variant ) ) { return Response . status ( Response . Status . BAD_REQUEST ) . entity ( "Requested Variant is of another type/platform" ) . build ( ) ; } logger . trace ( "Deleting: {}" , variant . getClass ( ) . getSimpleName ( ) ) ; variantService . removeVariant ( variant ) ; return Response . noContent ( ) . build ( ) ;
public class KModuleDeploymentService { /** * This creates and fills a { @ link RuntimeEnvironmentBuilder } instance , which is later used when creating services . * A lot of the logic here is used to process the information in the { @ link DeploymentDescriptor } instance , which is * part of the { @ link DeploymentUnit } . * @ param deploymentUnit The { @ link KModuleDeploymentUnit } , which is filled by the method * @ param deployedUnit The { @ link DeployedUnit } , which is also filled by the method * @ param kieContainer The { @ link KieContainer } , which contains information needed to fill the above two arguments * @ param mode The { @ link MergeMode } used to resolve conflicts in the { @ link DeploymentDescriptor } . * @ return A { @ link RuntimeEnvironmentBuilder } instance ready for use */ protected RuntimeEnvironmentBuilder boostrapRuntimeEnvironmentBuilder ( KModuleDeploymentUnit deploymentUnit , DeployedUnit deployedUnit , KieContainer kieContainer , MergeMode mode ) { } }
DeploymentDescriptor descriptor = deploymentUnit . getDeploymentDescriptor ( ) ; if ( descriptor == null || ( ( DeploymentDescriptorImpl ) descriptor ) . isEmpty ( ) ) { // skip empty descriptors as its default can override settings DeploymentDescriptorManager descriptorManager = new DeploymentDescriptorManager ( "org.jbpm.domain" ) ; List < DeploymentDescriptor > descriptorHierarchy = DeploymentDescriptorManagerUtil . getDeploymentDescriptorHierarchy ( descriptorManager , kieContainer ) ; descriptor = merger . merge ( descriptorHierarchy , mode ) ; deploymentUnit . setDeploymentDescriptor ( descriptor ) ; } else if ( descriptor != null && ! deploymentUnit . isDeployed ( ) ) { DeploymentDescriptorManager descriptorManager = new DeploymentDescriptorManager ( "org.jbpm.domain" ) ; List < DeploymentDescriptor > descriptorHierarchy = DeploymentDescriptorManagerUtil . getDeploymentDescriptorHierarchy ( descriptorManager , kieContainer ) ; descriptorHierarchy . add ( 0 , descriptor ) ; descriptor = merger . merge ( descriptorHierarchy , mode ) ; deploymentUnit . setDeploymentDescriptor ( descriptor ) ; } // first set on unit the strategy deploymentUnit . setStrategy ( descriptor . getRuntimeStrategy ( ) ) ; // setting up runtime environment via builder RuntimeEnvironmentBuilder builder = null ; if ( descriptor . getPersistenceMode ( ) == PersistenceMode . NONE ) { builder = RuntimeEnvironmentBuilder . Factory . get ( ) . newDefaultInMemoryBuilder ( ) ; } else { builder = RuntimeEnvironmentBuilder . Factory . get ( ) . newDefaultBuilder ( ) ; } // populate various properties of the builder EntityManagerFactory emf = EntityManagerFactoryManager . get ( ) . getOrCreate ( descriptor . getPersistenceUnit ( ) ) ; builder . entityManagerFactory ( emf ) ; Map < String , Object > contaxtParams = new HashMap < String , Object > ( ) ; contaxtParams . put ( "entityManagerFactory" , emf ) ; contaxtParams . put ( "classLoader" , kieContainer . getClassLoader ( ) ) ; // process object models that are globally configured ( environment entries , session configuration ) for ( NamedObjectModel model : descriptor . getEnvironmentEntries ( ) ) { Object entry = getInstanceFromModel ( model , kieContainer , contaxtParams ) ; builder . addEnvironmentEntry ( model . getName ( ) , entry ) ; } for ( NamedObjectModel model : descriptor . getConfiguration ( ) ) { Object entry = getInstanceFromModel ( model , kieContainer , contaxtParams ) ; builder . addConfiguration ( model . getName ( ) , ( String ) entry ) ; } List < ObjectMarshallingStrategy > mStrategies = new ArrayList < > ( ) ; for ( ObjectModel model : descriptor . getMarshallingStrategies ( ) ) { Object strategy = getInstanceFromModel ( model , kieContainer , contaxtParams ) ; mStrategies . add ( ( ObjectMarshallingStrategy ) strategy ) ; } // lastly add the main default strategy mStrategies . add ( new SerializablePlaceholderResolverStrategy ( ClassObjectMarshallingStrategyAcceptor . DEFAULT ) ) ; mStrategies . add ( new JavaSerializableResolverStrategy ( ClassObjectMarshallingStrategyAcceptor . DEFAULT ) ) ; builder . addEnvironmentEntry ( EnvironmentName . OBJECT_MARSHALLING_STRATEGIES , mStrategies . toArray ( new ObjectMarshallingStrategy [ 0 ] ) ) ; builder . addEnvironmentEntry ( "KieDeploymentDescriptor" , descriptor ) ; builder . addEnvironmentEntry ( "KieContainer" , kieContainer ) ; if ( executorService != null ) { builder . addEnvironmentEntry ( "ExecutorService" , executorService ) ; } if ( identityProvider != null ) { builder . addEnvironmentEntry ( EnvironmentName . IDENTITY_PROVIDER , identityProvider ) ; } // populate all assets with roles for this deployment unit List < String > requiredRoles = descriptor . getRequiredRoles ( DeploymentDescriptor . TYPE_VIEW ) ; if ( requiredRoles != null && ! requiredRoles . isEmpty ( ) ) { for ( DeployedAsset desc : deployedUnit . getDeployedAssets ( ) ) { if ( desc instanceof ProcessAssetDesc ) { ( ( ProcessAssetDesc ) desc ) . setRoles ( requiredRoles ) ; } } } // Classes 3 : classes added from descriptor List < String > remoteableClasses = descriptor . getClasses ( ) ; if ( remoteableClasses != null && ! remoteableClasses . isEmpty ( ) ) { for ( String className : remoteableClasses ) { Class descriptorClass = null ; try { descriptorClass = kieContainer . getClassLoader ( ) . loadClass ( className ) ; logger . debug ( "Loaded {} into the classpath from deployment descriptor {}" , className , kieContainer . getReleaseId ( ) . toExternalForm ( ) ) ; } catch ( ClassNotFoundException cnfe ) { throw new IllegalArgumentException ( "Class " + className + " not found in the project" ) ; } catch ( NoClassDefFoundError e ) { throw new IllegalArgumentException ( "Class " + className + " not found in the project" ) ; } addClassToDeployedUnit ( descriptorClass , ( DeployedUnitImpl ) deployedUnit ) ; } } return builder ;
public class JndiContext { /** * Will create a JNDI Context and register it as the initial context factory builder * @ return the context * @ throws NamingException * on any issue during initial context factory builder registration */ static JndiContext createJndiContext ( ) throws NamingException { } }
try { if ( ! NamingManager . hasInitialContextFactoryBuilder ( ) ) { JndiContext ctx = new JndiContext ( ) ; NamingManager . setInitialContextFactoryBuilder ( ctx ) ; return ctx ; } else { return ( JndiContext ) NamingManager . getInitialContext ( null ) ; } } catch ( Exception e ) { jqmlogger . error ( "Could not create JNDI context: " + e . getMessage ( ) ) ; NamingException ex = new NamingException ( "Could not initialize JNDI Context" ) ; ex . setRootCause ( e ) ; throw ex ; }
public class LocalProperties { public static LocalProperties forOrdering ( Ordering o ) { } }
LocalProperties props = new LocalProperties ( ) ; props . ordering = o ; props . groupedFields = o . getInvolvedIndexes ( ) ; return props ;
public class IfcObjectDefinitionImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) public EList < IfcRelAssigns > getHasAssignments ( ) { } }
return ( EList < IfcRelAssigns > ) eGet ( Ifc2x3tc1Package . Literals . IFC_OBJECT_DEFINITION__HAS_ASSIGNMENTS , true ) ;
public class HFactory { /** * Creates a Keyspace with the given consistency level , fail over policy * and user credentials . For a reference to the consistency level , please * refer to http : / / wiki . apache . org / cassandra / API . * @ param keyspace * @ param cluster * @ param consistencyLevelPolicy * @ param credentials * @ return */ public static Keyspace createKeyspace ( String keyspace , Cluster cluster , ConsistencyLevelPolicy consistencyLevelPolicy , FailoverPolicy failoverPolicy , Map < String , String > credentials ) { } }
return new ExecutingKeyspace ( keyspace , cluster . getConnectionManager ( ) , consistencyLevelPolicy , failoverPolicy , credentials ) ;
public class CommerceDiscountUserSegmentRelPersistenceImpl { /** * Returns the first commerce discount user segment rel in the ordered set where commerceDiscountId = & # 63 ; . * @ param commerceDiscountId the commerce discount ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching commerce discount user segment rel * @ throws NoSuchDiscountUserSegmentRelException if a matching commerce discount user segment rel could not be found */ @ Override public CommerceDiscountUserSegmentRel findByCommerceDiscountId_First ( long commerceDiscountId , OrderByComparator < CommerceDiscountUserSegmentRel > orderByComparator ) throws NoSuchDiscountUserSegmentRelException { } }
CommerceDiscountUserSegmentRel commerceDiscountUserSegmentRel = fetchByCommerceDiscountId_First ( commerceDiscountId , orderByComparator ) ; if ( commerceDiscountUserSegmentRel != null ) { return commerceDiscountUserSegmentRel ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "commerceDiscountId=" ) ; msg . append ( commerceDiscountId ) ; msg . append ( "}" ) ; throw new NoSuchDiscountUserSegmentRelException ( msg . toString ( ) ) ;
public class MobileProcessLauncher { /** * Get program arguments to pass * @ return the program arguments to pass represented as an array of { @ link String } * @ throws IOException */ @ Override String [ ] getProgramArguments ( ) throws IOException { } }
LOGGER . entering ( ) ; List < String > args = new LinkedList < > ( Arrays . asList ( super . getProgramArguments ( ) ) ) ; // add the defaults which we don ' t already have a value for for ( Entry < String , JsonElement > entry : defaultArgs . entrySet ( ) ) { String key = entry . getKey ( ) ; if ( ! args . contains ( key ) ) { args . add ( key ) ; String val = defaultArgs . get ( key ) . getAsString ( ) ; if ( StringUtils . isNotBlank ( val ) ) { args . add ( val ) ; } } } removeSeLionArgumentsAndValues ( args ) ; LOGGER . exiting ( args . toString ( ) ) ; return args . toArray ( new String [ args . size ( ) ] ) ;
public class JimfsFileSystems { /** * Initialize and configure a new file system with the given provider and URI , using the given * configuration . */ public static JimfsFileSystem newFileSystem ( JimfsFileSystemProvider provider , URI uri , Configuration config ) throws IOException { } }
PathService pathService = new PathService ( config ) ; FileSystemState state = new FileSystemState ( removeFileSystemRunnable ( uri ) ) ; JimfsFileStore fileStore = createFileStore ( config , pathService , state ) ; FileSystemView defaultView = createDefaultView ( config , fileStore , pathService ) ; WatchServiceConfiguration watchServiceConfig = config . watchServiceConfig ; JimfsFileSystem fileSystem = new JimfsFileSystem ( provider , uri , fileStore , pathService , defaultView , watchServiceConfig ) ; pathService . setFileSystem ( fileSystem ) ; return fileSystem ;
public class TunnelingFeature { /** * Creates a new tunneling feature - get service . * @ param channelId tunneling connection channel identifier * @ param seq tunneling connection send sequence number * @ param featureId the requested interface feature * @ return new tunneling feature - get service */ public static TunnelingFeature newGet ( final int channelId , final int seq , final InterfaceFeature featureId ) { } }
return new TunnelingFeature ( KNXnetIPHeader . TunnelingFeatureGet , channelId , seq , featureId , Success ) ;
public class CpnlElFunctions { /** * Returns the escaped text of a rich text value as HTML text for a tag attribute . * We assume that the result is used as value for a insertion done by jQuery . html ( ) ; * in this case all ' & . . . ' escaped chars are translated back by jQuery and the XSS protection * is broken - to avoid this each ' & ' in the value is ' double escaped ' * @ param value the value to escape * @ return the HTML escaped rich text of the value */ public static String attr ( SlingHttpServletRequest request , String value , int qType ) { } }
if ( value != null ) { value = rich ( request , value ) ; value = value . replaceAll ( "&" , "&amp;" ) // prevent from unescaping in jQuery . html ( ) . replaceAll ( QTYPE_CHAR [ qType ] , QTYPE_ESC [ qType ] ) ; } return value ;
public class ConstructState { /** * Merge a { @ link ConstructState } for a child construct into this { @ link ConstructState } . * Non - override property names will be mutated as follows : key - > construct . name ( ) + infix + key * @ param construct type of the child construct . * @ param constructState { @ link ConstructState } to merge . * @ param infix infix added to each non - override key ( for example converter number if there are multiple converters ) . */ public void addConstructState ( Constructs construct , ConstructState constructState , Optional < String > infix ) { } }
addOverwriteProperties ( constructState . getOverwritePropertiesMap ( ) ) ; constructState . removeProp ( OVERWRITE_PROPS_KEY ) ; for ( String key : constructState . getPropertyNames ( ) ) { setProp ( construct . name ( ) + "." + ( infix . isPresent ( ) ? infix . get ( ) + "." : "" ) + key , constructState . getProp ( key ) ) ; } addAll ( constructState ) ;
public class SavedQueriesPanel { /** * < / editor - fold > / / GEN - END : initComponents */ private void selectQueryNode ( javax . swing . event . TreeSelectionEvent evt ) { } }
// GEN - FIRST : event _ selectQueryNode String currentQuery = "" ; DefaultMutableTreeNode node = ( DefaultMutableTreeNode ) evt . getPath ( ) . getLastPathComponent ( ) ; if ( node instanceof QueryTreeElement ) { QueryTreeElement queryElement = ( QueryTreeElement ) node ; currentQuery = queryElement . getQuery ( ) ; currentId = queryElement ; TreeNode parent = queryElement . getParent ( ) ; if ( parent == null || parent . toString ( ) . equals ( "" ) ) { fireQueryChanged ( "" , currentQuery , currentId . getID ( ) ) ; } else { fireQueryChanged ( parent . toString ( ) , currentQuery , currentId . getID ( ) ) ; } } else if ( node instanceof QueryGroupTreeElement ) { QueryGroupTreeElement groupElement = ( QueryGroupTreeElement ) node ; currentId = null ; currentQuery = null ; fireQueryChanged ( groupElement . toString ( ) , "" , "" ) ; } else if ( node == null ) { currentId = null ; currentQuery = null ; }
public class CreateBrowserQuery { /** * / * ( non - Javadoc ) * @ see net . timewalker . ffmq4 . network . packet . AbstractPacket # serializeTo ( net . timewalker . ffmq4 . utils . RawDataOutputStream ) */ @ Override protected void serializeTo ( RawDataBuffer out ) { } }
super . serializeTo ( out ) ; out . writeInt ( browserId . asInt ( ) ) ; DestinationSerializer . serializeTo ( queue , out ) ; out . writeNullableUTF ( messageSelector ) ;
public class CmsFocalPointController { /** * Saves the focal point to a property on the image . < p > */ public void reset ( ) { } }
if ( isEditable ( ) ) { String val = "" ; CmsUUID sid = m_imageInfoProvider . get ( ) . getStructureId ( ) ; List < CmsPropertyModification > propChanges = new ArrayList < > ( ) ; propChanges . add ( new CmsPropertyModification ( sid , CmsGwtConstants . PROPERTY_IMAGE_FOCALPOINT , val , false ) ) ; propChanges . add ( new CmsPropertyModification ( sid , CmsGwtConstants . PROPERTY_IMAGE_FOCALPOINT , val , true ) ) ; final CmsPropertyChangeSet changeSet = new CmsPropertyChangeSet ( sid , propChanges ) ; CmsRpcAction < Void > action = new CmsRpcAction < Void > ( ) { @ Override public void execute ( ) { CmsCoreProvider . getVfsService ( ) . saveProperties ( changeSet , this ) ; } @ SuppressWarnings ( "synthetic-access" ) @ Override protected void onResponse ( Void result ) { m_focalPoint = null ; m_savedFocalPoint = null ; m_imageInfoProvider . get ( ) . setFocalPoint ( null ) ; updatePoint ( ) ; if ( m_nextAction != null ) { m_nextAction . run ( ) ; } } } ; action . execute ( ) ; }
public class ObjectUtil { /** * { @ code null } 安全的对象比较 , { @ code null } 对象排在末尾 * @ param < T > 被比较对象类型 * @ param c1 对象1 , 可以为 { @ code null } * @ param c2 对象2 , 可以为 { @ code null } * @ return 比较结果 , 如果c1 & lt ; c2 , 返回数小于0 , c1 = = c2返回0 , c1 & gt ; c2 大于0 * @ since 3.0.7 * @ see java . util . Comparator # compare ( Object , Object ) */ public static < T extends Comparable < ? super T > > int compare ( T c1 , T c2 ) { } }
return CompareUtil . compare ( c1 , c2 ) ;
public class ApacheHTTPResponse { /** * Wait for and then return the response body . * @ return body of the response * @ throws InterruptedException if interrupted while awaiting the response * @ throws BOSHException on communication failure */ public AbstractBody getBody ( ) throws InterruptedException , BOSHException { } }
if ( toThrow != null ) { throw ( toThrow ) ; } lock . lock ( ) ; try { if ( ! sent ) { awaitResponse ( ) ; } } finally { lock . unlock ( ) ; } return body ;
public class ErrorDetectingWrapper { /** * Old - style calls , including multiquery , has its own wacky error format : * < pre > * " error _ code " : 602, * " error _ msg " : " bogus is not a member of the user table . " , * " request _ args " : [ * " key " : " queries " , * " value " : " { " query1 " : " SELECT uid FROM user WHERE uid = 503702723 " , * " query2 " : " SELECT uid FROM user WHERE bogus = 503702723 " } " * " key " : " method " , * " value " : " fql . multiquery " * " key " : " access _ token " , * " value " : " blahblahblah " * " key " : " format " , * " value " : " json " * < / pre > * The code interpretations rely heavily on http : / / wiki . developers . facebook . com / index . php / Error _ codes * The wayback machine : https : / / web . archive . org / web / 20091223080550 / http : / / wiki . developers . facebook . com / index . php / Error _ codes */ protected void checkForOldRestStyleError ( JsonNode node ) { } }
JsonNode errorCode = node . get ( "error_code" ) ; if ( errorCode != null ) { int code = errorCode . intValue ( ) ; String msg = node . path ( "error_msg" ) . asText ( ) ; this . throwCodeAndMessage ( code , msg ) ; }
public class AbstractGISGridSet { @ Override @ Pure public int indexOf ( Object obj ) { } }
if ( this . clazz . isInstance ( obj ) ) { return this . grid . indexOf ( this . clazz . cast ( obj ) ) ; } return - 1 ;
public class DownloadService { /** * Loads JSON from a JSONP URL . * Metadata for downloadables and update centers is offered in two formats , both designed for download from the browser ( predating { @ link DownloadSettings } ) : * HTML using { @ code postMessage } for newer browsers , and JSONP as a fallback . * Confusingly , the JSONP files are given the { @ code * . json } file extension , when they are really JavaScript and should be { @ code * . js } . * This method extracts the JSON from a JSONP URL , since that is what we actually want when we download from the server . * ( Currently the true JSON is not published separately , and extracting from the { @ code * . json . html } is more work . ) * @ param src a URL to a JSONP file ( typically including { @ code id } and { @ code version } query parameters ) * @ return the embedded JSON text * @ throws IOException if either downloading or processing failed */ @ Restricted ( NoExternalUse . class ) public static String loadJSON ( URL src ) throws IOException { } }
URLConnection con = ProxyConfiguration . open ( src ) ; if ( con instanceof HttpURLConnection ) { // prevent problems from misbehaving plugins disabling redirects by default ( ( HttpURLConnection ) con ) . setInstanceFollowRedirects ( true ) ; } try ( InputStream is = con . getInputStream ( ) ) { String jsonp = IOUtils . toString ( is , "UTF-8" ) ; int start = jsonp . indexOf ( '{' ) ; int end = jsonp . lastIndexOf ( '}' ) ; if ( start >= 0 && end > start ) { return jsonp . substring ( start , end + 1 ) ; } else { throw new IOException ( "Could not find JSON in " + src ) ; } }
public class ResponseTimeRootCauseEntityMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ResponseTimeRootCauseEntity responseTimeRootCauseEntity , ProtocolMarshaller protocolMarshaller ) { } }
if ( responseTimeRootCauseEntity == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( responseTimeRootCauseEntity . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( responseTimeRootCauseEntity . getCoverage ( ) , COVERAGE_BINDING ) ; protocolMarshaller . marshall ( responseTimeRootCauseEntity . getRemote ( ) , REMOTE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ProjectIdValidator { /** * Check whether a string is a syntactically correct project ID . This method only checks syntax . * It does not check that the ID actually identifies a project in the Google Cloud Platform . * @ param id the alleged project ID * @ return true if it ' s correct , false otherwise */ public static boolean validate ( @ Nullable String id ) { } }
if ( id == null ) { return false ; } Matcher matcher = PROJECT_ID_REGEX . matcher ( id ) ; return matcher . matches ( ) ;
public class RecoveryLogManagerImpl { /** * Returns a RecoveryLog that can be used to access a specific recovery log . * Each recovery log is contained within a FailureScope . For example , the * transaction service on a distributed system has a transaction log in each * server node ( ie in each FailureScope ) . Because of this , the caller must * specify the FailureScope of the required recovery log . * Additionally , the caller must specify information regarding the identity and * physical properties of the recovery log . This is done through the LogProperties * object provided by the client service . * @ param failureScope The required FailureScope * @ param logProperties Contains the identity and physical properties of the * recovery log . * @ return The RecoveryLog instance . * @ exception InvalidLogPropertiesException The RLS does not recognize or cannot * support the supplied LogProperties */ @ Override public synchronized RecoveryLog getRecoveryLog ( FailureScope failureScope , LogProperties logProperties ) throws InvalidLogPropertiesException { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "getRecoveryLog" , new java . lang . Object [ ] { failureScope , logProperties , this } ) ; /* 5 @ PK01151D */ // If we ' re on Z , we can have a ZLogProperties ( System Logger ) based // recovery log . Otherwise , FileLogProperties and CustomLogProperties are the only supported types . if ( logProperties instanceof StreamLogProperties ) { // final PlatformHelper ph = PlatformHelperFactory . getPlatformHelper ( ) ; // if ( ph . isZOS ( ) = = false ) if ( Configuration . isZOS ( ) == false ) { if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to create stream based recovery log on non-ZOS platform" ) ; /* @ LIDB2561.1A */ if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" ) ; throw new InvalidLogPropertiesException ( ) ; } } else if ( ! ( logProperties instanceof FileLogProperties || logProperties instanceof CustomLogProperties ) ) { if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to create non-file based or non-Custom recovery log" ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" ) ; throw new InvalidLogPropertiesException ( ) ; } final Integer logIdentifier = new Integer ( logProperties . logIdentifier ( ) ) ; RecoveryLog recoveryLog = null ; // Extract all of the logs that are currently available for the given log // identifier . HashMap logsByFailureScope = ( HashMap ) _recoveryLogs . get ( logIdentifier ) ; if ( logsByFailureScope != null ) { // One or more logs for the given identifier is already available . // See if any of these logs are for the given failure scope . recoveryLog = ( RecoveryLog ) logsByFailureScope . get ( failureScope ) ; } else { // There were no logs for the given identifier . Initialize the // hashmap for this identifier and add it to the outer map so // that it can be used to store the log that will be created // below . logsByFailureScope = new HashMap ( ) ; _recoveryLogs . put ( logIdentifier , logsByFailureScope ) ; } if ( recoveryLog == null ) { if ( logProperties instanceof FileLogProperties ) { // The requested log has not been accessed already so we must create it . MultiScopeLog multiScopeRecoveryLog = null ; HashMap multiScopeLogsByServerName = null ; /* @253893A */ String serverName = failureScope . serverName ( ) ; /* @253893A */ final FileLogProperties fileLogProperties = ( FileLogProperties ) logProperties ; // File - based recovery logs have two different types . A multiple // scope log type indicates that records for more than one failure // scope may be stored in the same log . A single scope log type // indicates that records for different failure scopes must be // stored in different logs . final int logType = fileLogProperties . logType ( ) ; if ( logType == FileLogProperties . LOG_TYPE_MULTIPLE_SCOPE ) { // If this is a multiple scope log then we can check to see if // we have an existing log for the given identifier . If we do // we can re - use it . A multi - scope log will only contain failure // scopes that share a server name . In an HA - enabled environment // it ' s possible for a log identifier to have more than one // log . multiScopeLogsByServerName = ( HashMap ) _multiScopeRecoveryLogs . get ( logIdentifier ) ; /* @253893C */ if ( multiScopeLogsByServerName != null ) /* @253893A */ { /* @253893A */ multiScopeRecoveryLog = ( MultiScopeLog ) multiScopeLogsByServerName . get ( serverName ) ; /* @253893A */ } /* @253893A */ else /* @253893A */ { /* @253893A */ multiScopeLogsByServerName = new HashMap ( ) ; /* @253893A */ _multiScopeRecoveryLogs . put ( logIdentifier , multiScopeLogsByServerName ) ; /* @253893A */ } /* @253893A */ } if ( multiScopeRecoveryLog == null ) { // Either a single scope log is required or there was no // existing log for the given identifier . Create a new log . multiScopeRecoveryLog = new MultiScopeRecoveryLog ( fileLogProperties , _recoveryAgent , failureScope ) ; if ( logType == FileLogProperties . LOG_TYPE_MULTIPLE_SCOPE ) { // If this is a multiple scope log then we store it in the map so // that it can be re - used by subsequent requests for a log with the // same log identifier . // It is worth noting that in an environment where the only logs // created are single scope logs this map will // remain empty . multiScopeLogsByServerName . put ( serverName , multiScopeRecoveryLog ) ; /* @253893A */ } } // Create a new RecoveryLog object to be returned to the caller and // store it in the map so that it can be re - used if necessary . recoveryLog = new RecoveryLogImpl ( multiScopeRecoveryLog , failureScope ) ; } else if ( logProperties instanceof CustomLogProperties ) { // The requested log has not been accessed already so we must create it . MultiScopeLog multiScopeRecoveryLog = null ; HashMap multiScopeLogsByServerName = null ; /* @253893A */ String serverName = failureScope . serverName ( ) ; /* @253893A */ final CustomLogProperties customLogProperties = ( CustomLogProperties ) logProperties ; // Custom recovery logs have two different types . A multiple // scope log type indicates that records for more than one failure // scope may be stored in the same log . A single scope log type // indicates that records for different failure scopes must be // stored in different logs . final int logType = customLogProperties . logType ( ) ; if ( logType == CustomLogProperties . LOG_TYPE_MULTIPLE_SCOPE ) { // If this is a multiple scope log then we can check to see if // we have an existing log for the given identifier . If we do // we can re - use it . A multi - scope log will only contain failure // scopes that share a server name . In an HA - enabled environment // it ' s possible for a log identifier to have more than one // log . multiScopeLogsByServerName = ( HashMap ) _multiScopeRecoveryLogs . get ( logIdentifier ) ; /* @253893C */ if ( multiScopeLogsByServerName != null ) /* @253893A */ { /* @253893A */ multiScopeRecoveryLog = ( MultiScopeLog ) multiScopeLogsByServerName . get ( serverName ) ; /* @253893A */ } /* @253893A */ else /* @253893A */ { /* @253893A */ multiScopeLogsByServerName = new HashMap ( ) ; /* @253893A */ _multiScopeRecoveryLogs . put ( logIdentifier , multiScopeLogsByServerName ) ; /* @253893A */ } /* @253893A */ } if ( multiScopeRecoveryLog == null ) { // Either a single scope log is required or there was no // existing log for the given identifier . Create a new log . // Need to locate the factory for this Custom log via RecoveryDirector // then create a log . . . this could be a single or multiple scope log at this point // check the type of returned log and if necessary wrap in a RecoveryLogImpl final String customLogId = customLogProperties . pluginId ( ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Look in properties with customLogId, " + customLogId ) ; RecoveryLogFactory factory = _customLogFactories . get ( customLogId ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Retrieved factory, " + factory ) ; if ( factory == null ) { if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Custom recovery log factory NOT FOUND for " , customLogId ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" ) ; throw new InvalidLogPropertiesException ( ) ; } recoveryLog = factory . createRecoveryLog ( customLogProperties , _recoveryAgent , Configuration . getRecoveryLogComponent ( ) , failureScope ) ; if ( recoveryLog == null ) { if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Custom recovery log factory returned NULL recovery log" , customLogId ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" ) ; throw new InvalidLogPropertiesException ( ) ; } if ( logType == CustomLogProperties . LOG_TYPE_MULTIPLE_SCOPE && recoveryLog instanceof MultiScopeLog ) { // If this is a multiple scope log then we store it in the map so // that it can be re - used by subsequent requests for a log with the // same log identifier . // It is worth noting that in an environment where the only logs // created are single scope logs this map will // remain empty . multiScopeRecoveryLog = ( MultiScopeLog ) recoveryLog ; multiScopeLogsByServerName . put ( serverName , multiScopeRecoveryLog ) ; /* @253893A */ recoveryLog = new RecoveryLogImpl ( multiScopeRecoveryLog , failureScope ) ; } } else { // Create a new RecoveryLog object to be returned to the caller and // store it in the map so that it can be re - used if necessary . recoveryLog = new RecoveryLogImpl ( multiScopeRecoveryLog , failureScope ) ; } } else { // This is a stream log properties object so create // the z - specific log - use reflection to do this // to avoid a compile and runtime dependency on // the z - specific code . // TDK - IXGRecoveryLogImpl is in the same component . . . . try { final Constructor ixgLogConstructor = Class . forName ( "com.ibm.ws390.recoverylog.spi.IXGRecoveryLogImpl" ) . getConstructor ( new Class [ ] { com . ibm . ws . recoverylog . spi . FailureScope . class , com . ibm . ws . recoverylog . spi . StreamLogProperties . class , com . ibm . ws . recoverylog . spi . RecoveryAgent . class } ) ; recoveryLog = ( RecoveryLog ) ixgLogConstructor . newInstance ( new Object [ ] { failureScope , ( StreamLogProperties ) logProperties , _recoveryAgent } ) ; } catch ( Exception e ) { FFDCFilter . processException ( e , "com.ibm.ws.recoverylog.spi.RecoveryLogManagerImpl.getRecoveryLog" , "278" , this ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Exception caught initializing stream-based log" , e ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" , "InvalidLogPropertiesException" ) ; throw new InvalidLogPropertiesException ( e ) ; } } logsByFailureScope . put ( failureScope , recoveryLog ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getRecoveryLog" , recoveryLog ) ; return recoveryLog ;
public class LocalEventManager { /** * This method fires an Event * @ param event the fired Event * @ throws IllegalIDException not yet implemented * @ throws org . intellimate . izou . events . MultipleEventsException if there is currently another event getting processed */ public void fireEvent ( EventModel event ) throws IllegalIDException , org . intellimate . izou . events . MultipleEventsException { } }
if ( events == null ) return ; if ( events . isEmpty ( ) ) { events . add ( event ) ; } else { throw new org . intellimate . izou . events . MultipleEventsException ( ) ; }
public class ApplicationReportIndexService { /** * Return a global application index ( not associated with a specific { @ link ProjectModel } ) . */ public ApplicationReportIndexModel getOrCreateGlobalApplicationIndex ( ) { } }
GraphTraversal < Vertex , Vertex > pipeline = getGraphContext ( ) . getGraph ( ) . traversal ( ) . V ( ) ; pipeline . has ( WindupVertexFrame . TYPE_PROP , ApplicationReportModel . TYPE ) ; pipeline . filter ( it -> ! it . get ( ) . edges ( Direction . OUT , ApplicationReportIndexModel . APPLICATION_REPORT_INDEX_TO_PROJECT_MODEL ) . hasNext ( ) ) ; final ApplicationReportIndexModel result = pipeline . hasNext ( ) ? frame ( pipeline . next ( ) ) : create ( ) ; return result ;
public class ClientDObjectMgr { /** * Notifies the subscribers that had requested this object ( for subscription ) that it is not * available . */ protected void notifyFailure ( int oid , String message ) { } }
// let the penders know that the object is not available PendingRequest < ? > req = _penders . remove ( oid ) ; if ( req == null ) { log . warning ( "Failed to get object, but no one cares?!" , "oid" , oid ) ; return ; } for ( int ii = 0 ; ii < req . targets . size ( ) ; ii ++ ) { req . targets . get ( ii ) . requestFailed ( oid , new ObjectAccessException ( message ) ) ; }
public class FJIterate { /** * Same effect as { @ link Iterate # groupBy ( Iterable , Function ) } , * but executed in parallel batches , and writing output into a SynchronizedPutFastListMultimap . */ public static < K , V > MutableMultimap < K , V > groupBy ( Iterable < V > iterable , Function < ? super V , ? extends K > function , int batchSize , ForkJoinPool executor ) { } }
return FJIterate . groupBy ( iterable , function , SynchronizedPutFastListMultimap . < K , V > newMultimap ( ) , batchSize , executor ) ;
public class AmazonSQSMessagingClientWrapper { /** * Gets the queueUrl of a queue given a queue name owned by the provided accountId . * @ param queueName * @ param queueOwnerAccountId The AWS accountId of the account that created the queue * @ return The response from the GetQueueUrl service method , as returned by * AmazonSQS , which will include queue ` s URL * @ throws JMSException */ public GetQueueUrlResult getQueueUrl ( String queueName , String queueOwnerAccountId ) throws JMSException { } }
return getQueueUrl ( new GetQueueUrlRequest ( queueName ) . withQueueOwnerAWSAccountId ( queueOwnerAccountId ) ) ;
public class ExtensionHttpSessions { /** * Gets all of the sites with http sessions . * @ return all of the sites with http sessions */ public List < String > getSites ( ) { } }
List < String > sites = new ArrayList < String > ( ) ; if ( this . sessions == null ) { return sites ; } synchronized ( sessionLock ) { sites . addAll ( this . sessions . keySet ( ) ) ; } return sites ;
public class GetGatewayResponseResult { /** * Response templates of the < a > GatewayResponse < / a > as a string - to - string map of key - value pairs . * @ param responseTemplates * Response templates of the < a > GatewayResponse < / a > as a string - to - string map of key - value pairs . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetGatewayResponseResult withResponseTemplates ( java . util . Map < String , String > responseTemplates ) { } }
setResponseTemplates ( responseTemplates ) ; return this ;
public class ListenerTimeMeasure { /** * This method wrap a { @ link MeasureManager } ( the wrapped ) into another one * ( the wrapper ) which provides the same measures , excepted that any * { @ link PushMeasure } returned by the wrapper will be automatically wrapped * via { @ link # wrapMeasure ( PushMeasure ) } . This allows to ensure that any * { @ link MeasureListener } registered to the { @ link PushMeasure } s provided * by the wrapper will be considered , independently of who registers it or * when it is registered . You can also provide an additional key to add this * { @ link ListenerTimeMeasure } to the wrapper . < br / > * < br / > * The wrapped manager is not changed , thus it can be reused to register * { @ link MeasureListener } s that we don ' t want to consider . * @ param wrapped * the { @ link MeasureManager } to wrap * @ param measureKey * the key that the wrapper should use for this * { @ link ListenerTimeMeasure } , < code > null < / code > if it should * not use it * @ return the { @ link MeasureManager } wrapper * @ throw { @ link IllegalArgumentException } if no manager is provided or if * the additional key is already used */ public < Value > MeasureManager wrapManager ( final MeasureManager wrapped , final Object measureKey ) { } }
if ( wrapped == null ) { throw new IllegalArgumentException ( "No manager provided" ) ; } else if ( measureKey != null && wrapped . getMeasureKeys ( ) . contains ( measureKey ) ) { throw new IllegalArgumentException ( "The key " + measureKey + " is already used by the wrapped manager " + wrapped ) ; } else { MeasureManager wrapper ; if ( measureKey != null ) { wrapper = new MeasureManager ( ) { @ Override public < T > PushMeasure < T > getPushMeasure ( Object key ) { return wrapMeasure ( wrapped . < T > getPushMeasure ( key ) ) ; } @ SuppressWarnings ( "unchecked" ) @ Override public < T > PullMeasure < T > getPullMeasure ( Object key ) { if ( key . equals ( measureKey ) ) { return ( PullMeasure < T > ) ListenerTimeMeasure . this ; } else { return wrapped . < T > getPullMeasure ( key ) ; } } @ Override public Collection < Object > getMeasureKeys ( ) { Collection < Object > keys = new LinkedList < > ( wrapped . getMeasureKeys ( ) ) ; keys . add ( measureKey ) ; return keys ; } } ; } else { wrapper = new MeasureManager ( ) { @ Override public < T > PushMeasure < T > getPushMeasure ( Object key ) { return wrapMeasure ( wrapped . < T > getPushMeasure ( key ) ) ; } @ Override public < T > PullMeasure < T > getPullMeasure ( Object key ) { return wrapped . < T > getPullMeasure ( key ) ; } @ Override public Collection < Object > getMeasureKeys ( ) { return wrapped . getMeasureKeys ( ) ; } } ; } return wrapper ; }
public class AnalyzeSpark { /** * Get a list of unique values from the specified columns . * For sequence data , use { @ link # getUniqueSequence ( List , Schema , JavaRDD ) } * @ param columnName Name of the column to get unique values from * @ param schema Data schema * @ param data Data to get unique values from * @ return List of unique values */ public static List < Writable > getUnique ( String columnName , Schema schema , JavaRDD < List < Writable > > data ) { } }
int colIdx = schema . getIndexOfColumn ( columnName ) ; JavaRDD < Writable > ithColumn = data . map ( new SelectColumnFunction ( colIdx ) ) ; return ithColumn . distinct ( ) . collect ( ) ;
public class JsonSchema { /** * { @ link InputType } of the elements composed within complex type . * @ param itemKey * @ return */ public InputType getElementTypeUsingKey ( String itemKey ) { } }
String type = this . getDataType ( ) . get ( itemKey ) . getAsString ( ) . toUpperCase ( ) ; return InputType . valueOf ( type ) ;
public class ModelFactory { /** * Create a Model for a registered Blueprint using Erector . * Values set in the model will not be overridden by defaults in the * Blueprint . * @ param < T > model class * @ param erector Erector * @ param referenceModel T the reference model instance , or null * @ param withPolicies boolean if Policies should be applied to the create * @ return T new Model * @ throws CreateModelException model failed to create */ public < T > T createModel ( Erector erector , T referenceModel , boolean withPolicies ) throws CreateModelException { } }
erector . clearCommands ( ) ; T createdModel ; try { createdModel = ( T ) createNewInstance ( erector ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } logger . trace ( "Created model {} from {} based on {}" , new Object [ ] { createdModel , erector , referenceModel } ) ; final T nonNullReferenceModel = referenceModel == null ? createdModel : referenceModel ; erector . setReference ( nonNullReferenceModel ) ; if ( withPolicies ) { List < BlueprintPolicy > blueprintPolicies = this . getBlueprintPolicies ( ) . get ( erector . getTarget ( ) ) ; if ( blueprintPolicies != null ) { logger . debug ( " Running Blueprint policies" ) ; for ( BlueprintPolicy policy : blueprintPolicies ) { Map < ModelField , Set < Command > > modelFieldCommands = null ; try { logger . info ( " processing {}" , policy ) ; modelFieldCommands = policy . process ( this , erector , createdModel ) ; } catch ( PolicyException e ) { throw new CreateModelException ( e ) ; } for ( ModelField modelField : modelFieldCommands . keySet ( ) ) { erector . addCommands ( modelField , modelFieldCommands . get ( modelField ) ) ; } } } } for ( ModelField modelField : erector . getModelFields ( ) ) { logger . trace ( "ModelField {}" , ReflectionToStringBuilder . toString ( modelField ) ) ; Object value = null ; if ( withPolicies ) { List < FieldPolicy > policiesForSingleField = this . getFieldPolicies ( ) . get ( modelField . getTarget ( ) ) ; if ( policiesForSingleField != null ) { logger . debug ( " Running Field policies" ) ; for ( FieldPolicy policy : policiesForSingleField ) { try { logger . info ( " processing {} for {}" , policy , modelField . getTarget ( ) ) ; Command command = policy . process ( this , erector , modelField , createdModel ) ; if ( command != null ) { erector . addCommand ( modelField , command ) ; } } catch ( PolicyException e ) { new CreateModelException ( e ) ; } } } } if ( erector . getCommands ( modelField ) . size ( ) > 0 ) { logger . debug ( " ModelField commands: {}" , erector . getCommands ( modelField ) ) ; } if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_INJECTION ) ) { // Process DefaultField if ( modelField instanceof DefaultField ) { DefaultField defaultField = ( DefaultField ) modelField ; if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_REFERENCE_INJECTION ) ) { try { value = erector . getTemplate ( ) . get ( nonNullReferenceModel , defaultField . getName ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } } // If null or the field forces , use value set in blueprint , otherwise // use the value of the reference model if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_BLUEPRINT_INJECTION ) && ( value == null || defaultField . isForce ( ) ) ) { value = defaultField . getValue ( ) ; } // If value is an instance of FieldCallBack , eval the callback and use the value if ( value != null && value instanceof Getable ) { Getable callBack = ( Getable ) value ; value = callBack . get ( nonNullReferenceModel ) ; } try { createdModel = erector . getTemplate ( ) . set ( createdModel , defaultField . getName ( ) , value ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } // Process MappedField } else if ( modelField instanceof MappedField ) { MappedField mappedField = ( MappedField ) modelField ; if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_REFERENCE_INJECTION ) ) { try { value = erector . getTemplate ( ) . get ( nonNullReferenceModel , mappedField . getName ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } } if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_BLUEPRINT_INJECTION ) && value == null && ! mappedField . isNullable ( ) ) { value = this . createModel ( mappedField . getTarget ( ) ) ; } try { createdModel = erector . getTemplate ( ) . set ( createdModel , mappedField . getName ( ) , value ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } // Process MappedListField } else if ( modelField instanceof MappedListField ) { MappedListField listField = ( MappedListField ) modelField ; List modelList = null ; try { value = ( List ) erector . getTemplate ( ) . construct ( listField . getTargetList ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_INJECTION ) ) { try { modelList = ( List ) erector . getTemplate ( ) . get ( nonNullReferenceModel , listField . getName ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } } if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_BLUEPRINT_INJECTION ) ) { // Inject models into List If list is null or force is true or it is an empty list that is ignored if ( ( modelList == null || listField . isForce ( ) ) || ( modelList . size ( ) == 0 && ! listField . isIgnoreEmpty ( ) ) ) { for ( int x = 0 ; x < listField . getSize ( ) ; x ++ ) { ( ( List ) value ) . add ( this . createModel ( listField . getTarget ( ) ) ) ; } } else { for ( Object object : modelList ) { ( ( List ) value ) . add ( this . createModel ( object ) ) ; } } } try { createdModel = erector . getTemplate ( ) . set ( createdModel , listField . getName ( ) , value ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } // Process MappedSetField } else if ( modelField instanceof MappedSetField ) { MappedSetField setField = ( MappedSetField ) modelField ; try { value = erector . getTemplate ( ) . construct ( setField . getTargetSet ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } Set referenceModelSet = null ; if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_INJECTION ) ) { try { referenceModelSet = ( Set ) erector . getTemplate ( ) . get ( nonNullReferenceModel , setField . getName ( ) ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } } if ( ! erector . getCommands ( modelField ) . contains ( Command . SKIP_BLUEPRINT_INJECTION ) ) { // Inject models into Set If list is null or force is true or it is an empty set that is ignored if ( ( referenceModelSet == null || setField . isForce ( ) ) || ( referenceModelSet . size ( ) == 0 && ! setField . isIgnoreEmpty ( ) ) ) { for ( int x = 0 ; x < setField . getSize ( ) ; x ++ ) { ( ( Set ) value ) . add ( this . createModel ( setField . getTarget ( ) ) ) ; } } else { for ( Object object : referenceModelSet ) { ( ( Set ) value ) . add ( this . createModel ( object ) ) ; } } } try { createdModel = erector . getTemplate ( ) . set ( createdModel , setField . getName ( ) , value ) ; } catch ( BlueprintTemplateException e ) { throw new CreateModelException ( e ) ; } } } } List < Callback > afterCreateCallbacks = erector . getCallbacks ( "afterCreate" ) ; if ( afterCreateCallbacks != null ) { for ( Callback callback : afterCreateCallbacks ) { if ( callback instanceof AfterCreateCallback ) { createdModel = ( ( AfterCreateCallback < T > ) callback ) . afterCreate ( createdModel ) ; } else { // XXX : should this toss an exception ? logger . error ( "Invalid AfterCreateCallback registered for {}" , referenceModel . getClass ( ) ) ; } } } return createdModel ;
public class CmsUploadBean { /** * Starts the upload . < p > * @ return the response String ( JSON ) */ public String start ( ) { } }
// ensure that this method can only be called once if ( m_called ) { throw new UnsupportedOperationException ( ) ; } m_called = true ; // create a upload listener CmsUploadListener listener = createListener ( ) ; try { // try to parse the request parseRequest ( listener ) ; // try to create the resources on the VFS createResources ( listener ) ; // trigger update offline indexes , important for gallery search OpenCms . getSearchManager ( ) . updateOfflineIndexes ( ) ; } catch ( CmsException e ) { // an error occurred while creating the resources on the VFS , create a special error message LOG . error ( e . getMessage ( ) , e ) ; return generateResponse ( Boolean . FALSE , getCreationErrorMessage ( ) , formatStackTrace ( e ) ) ; } catch ( CmsUploadException e ) { // an expected error occurred while parsing the request , the error message is already set in the exception LOG . debug ( e . getMessage ( ) , e ) ; return generateResponse ( Boolean . FALSE , e . getMessage ( ) , formatStackTrace ( e ) ) ; } catch ( Throwable e ) { // an unexpected error occurred while parsing the request , create a non - specific error message LOG . error ( e . getMessage ( ) , e ) ; String message = m_bundle . key ( org . opencms . ade . upload . Messages . ERR_UPLOAD_UNEXPECTED_0 ) ; return generateResponse ( Boolean . FALSE , message , formatStackTrace ( e ) ) ; } finally { removeListener ( listener . getId ( ) ) ; } // the upload was successful inform the user about success return generateResponse ( Boolean . TRUE , m_bundle . key ( org . opencms . ade . upload . Messages . LOG_UPLOAD_SUCCESS_0 ) , "" ) ;