signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class CPDefinitionOptionRelLocalServiceBaseImpl { /** * Creates a new cp definition option rel with the primary key . Does not add the cp definition option rel to the database . * @ param CPDefinitionOptionRelId the primary key for the new cp definition option rel * @ return the new cp definition option rel */ @ Override @ Transactional ( enabled = false ) public CPDefinitionOptionRel createCPDefinitionOptionRel ( long CPDefinitionOptionRelId ) { } }
return cpDefinitionOptionRelPersistence . create ( CPDefinitionOptionRelId ) ;
public class Matrix { /** * Creates a new matrix that stores < i > A * B < / i > * @ param B the matrix to multiply by * @ return a new matrix < i > A * B < / i > */ public Matrix multiply ( Matrix B ) { } }
Matrix C = new DenseMatrix ( this . rows ( ) , B . cols ( ) ) ; multiply ( B , C ) ; return C ;
public class A_CmsUploadDialog { /** * Executes the submit action . < p > */ public void submit ( ) { } }
// create a JsArray containing the files to upload List < CmsFileInfo > filesToUpload = new ArrayList < CmsFileInfo > ( getFilesToUpload ( ) . values ( ) ) ; Collections . sort ( filesToUpload , CmsFileInfo . INFO_COMPARATOR ) ; CmsUploader uploader = new CmsUploader ( ) ; uploader . uploadFiles ( getUploadUri ( ) , getTargetFolder ( ) , m_isTargetRootPath , filesToUpload , getFilesToUnzip ( false ) , this ) ;
public class LazyJobLogger { /** * 检查内存中是否有日志 , 如果有就批量刷盘 */ private void checkAndFlush ( ) { } }
try { int nowSize = memoryQueue . size ( ) ; if ( nowSize == 0 ) { return ; } List < JobLogPo > batch = new ArrayList < JobLogPo > ( ) ; for ( int i = 0 ; i < nowSize ; i ++ ) { JobLogPo jobLogPo = memoryQueue . poll ( ) ; batch . add ( jobLogPo ) ; if ( batch . size ( ) >= batchFlushSize ) { flush ( batch ) ; } } if ( batch . size ( ) > 0 ) { flush ( batch ) ; } } finally { flushing . compareAndSet ( true , false ) ; }
public class ConnectionUtility { /** * 根据配置信息 , 初始化DataSourceProvider 。 */ protected static void setupDataSourceProviders ( ) { } }
List < String > providerProperties = new LinkedList < String > ( ) ; for ( Object key : configuration . keySet ( ) ) { String keyName = ( String ) key ; if ( keyName . startsWith ( PROVIDER_PROPERTY_NAME ) ) { providerProperties . add ( keyName ) ; } } dataSourceProviders = new HashMap < String , DataSourceProvider > ( ) ; for ( String keyName : providerProperties ) { String providerName = keyName . substring ( PROVIDER_PROPERTY_NAME . length ( ) , keyName . length ( ) ) ; String providerClassName = configuration . getProperty ( keyName ) ; configuration . remove ( keyName ) ; DataSourceProvider dsp = null ; try { dsp = ( DataSourceProvider ) Class . forName ( providerClassName ) . newInstance ( ) ; dataSourceProviders . put ( providerName , dsp ) ; } catch ( Throwable t ) { if ( t instanceof NoClassDefFoundError ) { log . debug ( "DataSourceProvider not initialized for '" + providerName + "' because the provider class was not found: " + t . getMessage ( ) ) ; } else { log . error ( "Cannot instantiate DataSourceProvider for '" + providerName + "': " + providerClassName , t ) ; } } }
public class RateLimitInterceptor { /** * Intercept a http call . * @ param chain the current chain of calls . * @ return a response from this call . * @ throws IOException if something goes wrong . */ @ Override public Response intercept ( Chain chain ) throws IOException { } }
final Response response = chain . proceed ( chain . request ( ) ) ; final Headers headers = response . headers ( ) ; final Map < String , List < String > > mappedHeaders = headers . toMultimap ( ) ; final RateLimits limits = new RateLimits . DefaultParser ( ) . parse ( mappedHeaders ) ; listener . onRateLimitHeaderReceived ( limits ) ; return response ;
public class TypeConverter { /** * Convert the passed source value to float * @ param aSrcValue * The source value . May not be < code > null < / code > . * @ return The converted value . * @ throws TypeConverterException * if the source value is < code > null < / code > or if no converter was * found or if the converter returned a < code > null < / code > object . * @ throws RuntimeException * If the converter itself throws an exception * @ see TypeConverterProviderBestMatch */ public static float convertToFloat ( @ Nonnull final Object aSrcValue ) { } }
if ( aSrcValue == null ) throw new TypeConverterException ( float . class , EReason . NULL_SOURCE_NOT_ALLOWED ) ; final Float aValue = convert ( aSrcValue , Float . class ) ; return aValue . floatValue ( ) ;
public class CIForm { /** * Get the type this Configuration item represents . * @ return Form */ public org . efaps . admin . ui . Form getType ( ) { } }
org . efaps . admin . ui . Form ret = null ; try { ret = org . efaps . admin . ui . Form . get ( this . uuid ) ; } catch ( final CacheReloadException e ) { CIForm . LOG . error ( "Error on retrieving Type for CIType with uuid: {}" , this . uuid ) ; } return ret ;
public class Parser { /** * 11.9 Equality Expression */ private ParseTree parseEquality ( Expression expressionIn ) { } }
SourcePosition start = getTreeStartLocation ( ) ; ParseTree left = parseRelational ( expressionIn ) ; while ( peekEqualityOperator ( ) ) { Token operator = nextToken ( ) ; ParseTree right = parseRelational ( expressionIn ) ; left = new BinaryOperatorTree ( getTreeLocation ( start ) , left , operator , right ) ; } return left ;
public class StitchedColumn { /** * content will be null if delta fit in a single block , as no stitching was performed */ @ Override public < V > V getValue ( Serializer < V > serializer ) { } }
return _content != null ? serializer . fromByteBuffer ( _content ) : _oldColumn . getValue ( serializer ) ;
public class Filters { /** * https : / / github . com / apache / hive / blob / branch - 2.0 / storage - api / src / java / org / apache / hadoop / hive / ql / io / sarg / SearchArgumentImpl . java */ private static Filter flatten ( Filter root ) { } }
if ( root instanceof BooleanFilter ) { List < Filter > children = new ArrayList < > ( ) ; children . addAll ( ( ( BooleanFilter ) root ) . getFilters ( ) ) ; // iterate through the index , so that if we add more children , // they don ' t get re - visited for ( int i = 0 ; i < children . size ( ) ; ++ i ) { Filter child = flatten ( children . get ( i ) ) ; // do we need to flatten ? if ( child . getClass ( ) == root . getClass ( ) && ! ( child instanceof NotFilter ) ) { boolean first = true ; List < Filter > grandKids = ( ( BooleanFilter ) child ) . getFilters ( ) ; for ( Filter grandkid : grandKids ) { // for the first grandkid replace the original parent if ( first ) { first = false ; children . set ( i , grandkid ) ; } else { children . add ( ++ i , grandkid ) ; } } } else { children . set ( i , child ) ; } } // if we have a singleton AND or OR , just return the child if ( children . size ( ) == 1 && ( root instanceof AndFilter || root instanceof OrFilter ) ) { return children . get ( 0 ) ; } if ( root instanceof AndFilter ) { return new AndFilter ( children ) ; } else if ( root instanceof OrFilter ) { return new OrFilter ( children ) ; } } return root ;
public class EventServiceSegment { /** * Returns the { @ link Registration } s for the event { @ code topic } . If there are no * registrations and { @ code forceCreate } , it will create a concurrent set and put it in the registration map . * @ param topic the event topic * @ param forceCreate whether to create the registration set if none exists or to return null * @ return the collection of registrations for the topic or null if none exists and { @ code forceCreate } is { @ code false } */ public Collection < Registration > getRegistrations ( String topic , boolean forceCreate ) { } }
Collection < Registration > listenerList = registrations . get ( topic ) ; if ( listenerList == null && forceCreate ) { ConstructorFunction < String , Collection < Registration > > func = key -> newSetFromMap ( new ConcurrentHashMap < Registration , Boolean > ( ) ) ; return ConcurrencyUtil . getOrPutIfAbsent ( registrations , topic , func ) ; } return listenerList ;
public class CPMeasurementUnitLocalServiceBaseImpl { /** * Performs a dynamic query on the database and returns the matching rows . * @ param dynamicQuery the dynamic query * @ return the matching rows */ @ Override public < T > List < T > dynamicQuery ( DynamicQuery dynamicQuery ) { } }
return cpMeasurementUnitPersistence . findWithDynamicQuery ( dynamicQuery ) ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcShearModulusMeasure ( ) { } }
if ( ifcShearModulusMeasureEClass == null ) { ifcShearModulusMeasureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 863 ) ; } return ifcShearModulusMeasureEClass ;
public class TemplatingManager { /** * Processes new templates . * They are applied to all the applications , provided they match * the criteria defined by the template files . * @ param newTemplates a non - null list of templates */ public void processNewTemplates ( Collection < TemplateEntry > newTemplates ) { } }
if ( this . dm != null ) { for ( ManagedApplication ma : this . dm . applicationMngr ( ) . getManagedApplications ( ) ) { Collection < TemplateEntry > filteredTemplates = TemplateUtils . findTemplatesForApplication ( ma . getName ( ) , newTemplates ) ; generate ( ma . getApplication ( ) , filteredTemplates ) ; } }
public class SavepointV2 { /** * Converts the { @ link Savepoint } containing { @ link TaskState TaskStates } to an equivalent savepoint containing * { @ link OperatorState OperatorStates } . * @ param savepoint savepoint to convert * @ param tasks map of all vertices and their job vertex ids * @ return converted completed checkpoint * @ deprecated Only kept for backwards - compatibility with versions < 1.3 . Will be removed in the future . */ @ Deprecated public static Savepoint convertToOperatorStateSavepointV2 ( Map < JobVertexID , ExecutionJobVertex > tasks , Savepoint savepoint ) { } }
if ( savepoint . getOperatorStates ( ) != null ) { return savepoint ; } boolean expandedToLegacyIds = false ; Map < OperatorID , OperatorState > operatorStates = new HashMap < > ( savepoint . getTaskStates ( ) . size ( ) << 1 ) ; for ( TaskState taskState : savepoint . getTaskStates ( ) ) { ExecutionJobVertex jobVertex = tasks . get ( taskState . getJobVertexID ( ) ) ; // on the first time we can not find the execution job vertex for an id , we also consider alternative ids , // for example as generated from older flink versions , to provide backwards compatibility . if ( jobVertex == null && ! expandedToLegacyIds ) { tasks = ExecutionJobVertex . includeLegacyJobVertexIDs ( tasks ) ; jobVertex = tasks . get ( taskState . getJobVertexID ( ) ) ; expandedToLegacyIds = true ; } if ( jobVertex == null ) { throw new IllegalStateException ( "Could not find task for state with ID " + taskState . getJobVertexID ( ) + ". " + "When migrating a savepoint from a version < 1.3 please make sure that the topology was not " + "changed through removal of a stateful operator or modification of a chain containing a stateful " + "operator." ) ; } List < OperatorID > operatorIDs = jobVertex . getOperatorIDs ( ) ; Preconditions . checkArgument ( jobVertex . getParallelism ( ) == taskState . getParallelism ( ) , "Detected change in parallelism during migration for task " + jobVertex . getJobVertexId ( ) + "." + "When migrating a savepoint from a version < 1.3 please make sure that no changes were made " + "to the parallelism of stateful operators." ) ; Preconditions . checkArgument ( operatorIDs . size ( ) == taskState . getChainLength ( ) , "Detected change in chain length during migration for task " + jobVertex . getJobVertexId ( ) + ". " + "When migrating a savepoint from a version < 1.3 please make sure that the topology was not " + "changed by modification of a chain containing a stateful operator." ) ; for ( int subtaskIndex = 0 ; subtaskIndex < jobVertex . getParallelism ( ) ; subtaskIndex ++ ) { SubtaskState subtaskState ; try { subtaskState = taskState . getState ( subtaskIndex ) ; } catch ( Exception e ) { throw new IllegalStateException ( "Could not find subtask with index " + subtaskIndex + " for task " + jobVertex . getJobVertexId ( ) + ". " + "When migrating a savepoint from a version < 1.3 please make sure that no changes were made " + "to the parallelism of stateful operators." , e ) ; } if ( subtaskState == null ) { continue ; } ChainedStateHandle < OperatorStateHandle > partitioneableState = subtaskState . getManagedOperatorState ( ) ; ChainedStateHandle < OperatorStateHandle > rawOperatorState = subtaskState . getRawOperatorState ( ) ; for ( int chainIndex = 0 ; chainIndex < taskState . getChainLength ( ) ; chainIndex ++ ) { // task consists of multiple operators so we have to break the state apart for ( int operatorIndex = 0 ; operatorIndex < operatorIDs . size ( ) ; operatorIndex ++ ) { OperatorID operatorID = operatorIDs . get ( operatorIndex ) ; OperatorState operatorState = operatorStates . get ( operatorID ) ; if ( operatorState == null ) { operatorState = new OperatorState ( operatorID , jobVertex . getParallelism ( ) , jobVertex . getMaxParallelism ( ) ) ; operatorStates . put ( operatorID , operatorState ) ; } KeyedStateHandle managedKeyedState = null ; KeyedStateHandle rawKeyedState = null ; // only the head operator retains the keyed state if ( operatorIndex == operatorIDs . size ( ) - 1 ) { managedKeyedState = subtaskState . getManagedKeyedState ( ) ; rawKeyedState = subtaskState . getRawKeyedState ( ) ; } OperatorSubtaskState operatorSubtaskState = new OperatorSubtaskState ( partitioneableState != null ? partitioneableState . get ( operatorIndex ) : null , rawOperatorState != null ? rawOperatorState . get ( operatorIndex ) : null , managedKeyedState , rawKeyedState ) ; operatorState . putState ( subtaskIndex , operatorSubtaskState ) ; } } } } return new SavepointV2 ( savepoint . getCheckpointId ( ) , operatorStates . values ( ) , savepoint . getMasterStates ( ) ) ;
public class ReportNGUtils { /** * Convert a Throwable into a list containing all of its causes . * @ param t The throwable for which the causes are to be returned . * @ return A ( possibly empty ) list of { @ link Throwable } s . */ public List < Throwable > getCauses ( Throwable t ) { } }
List < Throwable > causes = new LinkedList < Throwable > ( ) ; Throwable next = t ; while ( next . getCause ( ) != null ) { next = next . getCause ( ) ; causes . add ( next ) ; } return causes ;
public class Timex3 { /** * getter for timexInstance - gets * @ generated * @ return value of the feature */ public int getTimexInstance ( ) { } }
if ( Timex3_Type . featOkTst && ( ( Timex3_Type ) jcasType ) . casFeat_timexInstance == null ) jcasType . jcas . throwFeatMissing ( "timexInstance" , "de.unihd.dbs.uima.types.heideltime.Timex3" ) ; return jcasType . ll_cas . ll_getIntValue ( addr , ( ( Timex3_Type ) jcasType ) . casFeatCode_timexInstance ) ;
public class ReconciliationReportRow { /** * Sets the thirdPartyRevenue value for this ReconciliationReportRow . * @ param thirdPartyRevenue * The revenue calculated based on the { @ link # costPerUnit } , { @ link * # costType } , * { @ link # thirdPartyClicks } , { @ link # thirdPartyImpressions } * and { @ link # thirdPartyLineItemDays } . * This attribute is calculated by Google and is read - only . */ public void setThirdPartyRevenue ( com . google . api . ads . admanager . axis . v201805 . Money thirdPartyRevenue ) { } }
this . thirdPartyRevenue = thirdPartyRevenue ;
public class SurfDescribeOps { /** * Faster version of { @ link # gradient } which assumes the region is entirely contained inside the * of the image . This includes the convolution kernel ' s radius . */ public static void gradient_noborder ( GrayF32 ii , double tl_x , double tl_y , double samplePeriod , int regionSize , double kernelWidth , float [ ] derivX , float [ ] derivY ) { } }
ImplSurfDescribeOps . gradientInner ( ii , tl_x , tl_y , samplePeriod , regionSize , kernelWidth , derivX , derivY ) ;
public class AbstractAmazonDynamoDBAsync { /** * Simplified method form for invoking the DescribeTable operation with an AsyncHandler . * @ see # describeTableAsync ( DescribeTableRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < DescribeTableResult > describeTableAsync ( String tableName , com . amazonaws . handlers . AsyncHandler < DescribeTableRequest , DescribeTableResult > asyncHandler ) { } }
return describeTableAsync ( new DescribeTableRequest ( ) . withTableName ( tableName ) , asyncHandler ) ;
public class TypeUtil { /** * CHECKSTYLE : ON */ public static Object call ( Class < ? > oClass , String method , Object obj , Object [ ] arg ) throws InvocationTargetException , NoSuchMethodException { } }
// Lets just try all methods for now Method [ ] methods = oClass . getMethods ( ) ; for ( int c = 0 ; methods != null && c < methods . length ; c ++ ) { if ( ! methods [ c ] . getName ( ) . equals ( method ) ) { continue ; } if ( methods [ c ] . getParameterTypes ( ) . length != arg . length ) { continue ; } if ( Modifier . isStatic ( methods [ c ] . getModifiers ( ) ) != ( obj == null ) ) { continue ; } if ( ( obj == null ) && methods [ c ] . getDeclaringClass ( ) != oClass ) { continue ; } try { return methods [ c ] . invoke ( obj , arg ) ; } catch ( IllegalAccessException | IllegalArgumentException e ) { LOG . warn ( "IGNORE " , e ) ; } } throw new NoSuchMethodException ( method ) ;
public class BuryingLogic { /** * determines if this opcode couldn ' t be part of a conditional expression or at least is very unlikely to be so . * @ param seen * the currently parse opcode * @ return if this operation resets the looking for conditionals */ private boolean isResetOp ( int seen ) { } }
return resetOps . get ( seen ) || OpcodeUtils . isStore ( seen ) || OpcodeUtils . isReturn ( seen ) || ( ( OpcodeUtils . isInvoke ( seen ) && getSigConstantOperand ( ) . endsWith ( ")V" ) ) || ( isBranch ( seen ) && ( getBranchOffset ( ) < 0 ) ) ) ;
public class CmsAliasList { /** * Creates the button used for deleting aliases . < p > * @ return the new button */ protected PushButton createDeleteButton ( ) { } }
PushButton button = createIconButton ( I_CmsButton . DELETE_SMALL ) ; button . setTitle ( aliasMessages . removeAlias ( ) ) ; return button ;
public class xen_panwvpx_image { /** * < pre > * Use this operation to get panw XVA file . * < / pre > */ public static xen_panwvpx_image [ ] get ( nitro_service client ) throws Exception { } }
xen_panwvpx_image resource = new xen_panwvpx_image ( ) ; resource . validate ( "get" ) ; return ( xen_panwvpx_image [ ] ) resource . get_resources ( client ) ;
public class WorkflowManagerAbstract { /** * / * ( non - Javadoc ) * @ see nz . co . senanque . workflow . WorkflowManager # getProcessDefinition ( java . lang . String ) */ @ Override public ProcessDefinition getProcessDefinition ( String name ) { } }
for ( ProcessDefinition pd : m_allProcesses ) { if ( pd . getName ( ) . equals ( name ) ) { return pd ; } } return null ;
public class MysqlExportService { /** * This function will delete all the * temp files generated ny the library * unless it ' s otherwise instructed not to do * so by the preserveZipFile variable * @ param preserveZipFile bool */ public void clearTempFiles ( boolean preserveZipFile ) { } }
// delete the temp sql file File sqlFile = new File ( dirName + "/sql/" + sqlFileName ) ; if ( sqlFile . exists ( ) ) { boolean res = sqlFile . delete ( ) ; logger . debug ( LOG_PREFIX + ": " + sqlFile . getAbsolutePath ( ) + " deleted successfully? " + ( res ? " TRUE " : " FALSE " ) ) ; } else { logger . debug ( LOG_PREFIX + ": " + sqlFile . getAbsolutePath ( ) + " DOES NOT EXIST while clearing Temp Files" ) ; } File sqlFolder = new File ( dirName + "/sql" ) ; if ( sqlFolder . exists ( ) ) { boolean res = sqlFolder . delete ( ) ; logger . debug ( LOG_PREFIX + ": " + sqlFolder . getAbsolutePath ( ) + " deleted successfully? " + ( res ? " TRUE " : " FALSE " ) ) ; } else { logger . debug ( LOG_PREFIX + ": " + sqlFolder . getAbsolutePath ( ) + " DOES NOT EXIST while clearing Temp Files" ) ; } // only execute this section if the // file is not to be preserved if ( ! preserveZipFile ) { // delete the zipFile File zipFile = new File ( zipFileName ) ; if ( zipFile . exists ( ) ) { boolean res = zipFile . delete ( ) ; logger . debug ( LOG_PREFIX + ": " + zipFile . getAbsolutePath ( ) + " deleted successfully? " + ( res ? " TRUE " : " FALSE " ) ) ; } else { logger . debug ( LOG_PREFIX + ": " + zipFile . getAbsolutePath ( ) + " DOES NOT EXIST while clearing Temp Files" ) ; } // delete the temp folder File folder = new File ( dirName ) ; if ( folder . exists ( ) ) { boolean res = folder . delete ( ) ; logger . debug ( LOG_PREFIX + ": " + folder . getAbsolutePath ( ) + " deleted successfully? " + ( res ? " TRUE " : " FALSE " ) ) ; } else { logger . debug ( LOG_PREFIX + ": " + folder . getAbsolutePath ( ) + " DOES NOT EXIST while clearing Temp Files" ) ; } } logger . debug ( LOG_PREFIX + ": generated temp files cleared successfully" ) ;
public class ReportClient { /** * Get group statistic , time unit only supports DAY now . * @ param start Format : yyyy - MM - dd * @ param duration duration must between 0 and 60 * @ return { @ link GroupStatListResult } * @ throws APIConnectionException connect exception * @ throws APIRequestException request exception */ public GroupStatListResult getGroupStatistic ( String start , int duration ) throws APIConnectionException , APIRequestException { } }
Preconditions . checkArgument ( verifyDateFormat ( "DAY" , start ) , "Illegal date format" ) ; Preconditions . checkArgument ( 0 <= duration && duration <= 60 , " 0 <= duration <= 60" ) ; String url = mBaseReportPath + mV2StatisticPath + "/groups?time_unit=DAY&start=" + start + "&duration=" + duration ; ResponseWrapper responseWrapper = _httpClient . sendGet ( url ) ; return GroupStatListResult . fromResponse ( responseWrapper ) ;
public class ISUPMessageFactoryImpl { /** * ( non - Javadoc ) * @ see org . restcomm . protocols . ss7 . isup . ISUPMessageFactory # createREL ( ) */ @ Override public ReleaseMessage createREL ( ) { } }
ReleaseMessage msg = new ReleaseMessageImpl ( _REL_HOLDER . mandatoryCodes , _REL_HOLDER . mandatoryVariableCodes , _REL_HOLDER . optionalCodes , _REL_HOLDER . mandatoryCodeToIndex , _REL_HOLDER . mandatoryVariableCodeToIndex , _REL_HOLDER . optionalCodeToIndex ) ; return msg ;
public class ImageRequest { /** * Visible for testing . */ static int findBestSampleSize ( int actualWidth , int actualHeight , int desiredWidth , int desiredHeight ) { } }
double wr = ( double ) actualWidth / desiredWidth ; double hr = ( double ) actualHeight / desiredHeight ; double ratio = Math . min ( wr , hr ) ; float n = 1.0f ; while ( ( n * 2 ) <= ratio ) { n *= 2 ; } return ( int ) n ;
public class device_profile { /** * < pre > * Use this operation to add device profile . * < / pre > */ public static device_profile add ( nitro_service client , device_profile resource ) throws Exception { } }
resource . validate ( "add" ) ; return ( ( device_profile [ ] ) resource . perform_operation ( client , "add" ) ) [ 0 ] ;
public class OneHashPerFileTarget { /** * { @ inheritDoc } */ public void init ( ) throws ExecutionTargetInitializationException { } }
// Make sure the output directory exists or can be created . if ( outputDirectory != null ) { if ( outputDirectory . exists ( ) && ! outputDirectory . isDirectory ( ) ) { throw new ExecutionTargetInitializationException ( "'" + outputDirectory . getPath ( ) + "' already exists and is not a directory." ) ; } else { outputDirectory . mkdirs ( ) ; } }
public class StreamHelper { /** * Pass the content of the given input stream to the given output stream . The * input stream is automatically closed , whereas the output stream stays open ! * @ param aIS * The input stream to read from . May be < code > null < / code > . Automatically * closed ! * @ param aOS * The output stream to write to . May be < code > null < / code > . Not * automatically closed ! * @ param aBuffer * The buffer to use . May not be < code > null < / code > . * @ param aCopyByteCount * An optional mutable long object that will receive the total number of * copied bytes . Note : and optional old value is overwritten ! * @ param aLimit * An optional maximum number of bytes to copied from the input stream to * the output stream . May be < code > null < / code > to indicate no limit , * meaning all bytes are copied . * @ return < code > { @ link ESuccess # SUCCESS } < / code > if copying took place , < code > * { @ link ESuccess # FAILURE } < / code > otherwise */ @ Nonnull public static ESuccess copyInputStreamToOutputStream ( @ WillClose @ Nullable final InputStream aIS , @ WillNotClose @ Nullable final OutputStream aOS , @ Nonnull @ Nonempty final byte [ ] aBuffer , @ Nullable final MutableLong aCopyByteCount , @ Nullable final Long aLimit ) { } }
try { ValueEnforcer . notEmpty ( aBuffer , "Buffer" ) ; ValueEnforcer . isTrue ( aLimit == null || aLimit . longValue ( ) >= 0 , ( ) -> "Limit may not be negative: " + aLimit ) ; if ( aIS != null && aOS != null ) { // both streams are not null long nTotalBytesCopied ; if ( aLimit == null ) nTotalBytesCopied = _copyInputStreamToOutputStream ( aIS , aOS , aBuffer ) ; else nTotalBytesCopied = _copyInputStreamToOutputStreamWithLimit ( aIS , aOS , aBuffer , aLimit . longValue ( ) ) ; // Add to statistics s_aByteSizeHdl . addSize ( nTotalBytesCopied ) ; // Remember copied bytes ? if ( aCopyByteCount != null ) aCopyByteCount . set ( nTotalBytesCopied ) ; return ESuccess . SUCCESS ; } } catch ( final IOException ex ) { if ( ! isKnownEOFException ( ex ) ) LOGGER . error ( "Failed to copy from stream to stream" , ex instanceof IMockException ? null : ex ) ; } finally { // Ensure input stream is closed , even if output stream is null close ( aIS ) ; } return ESuccess . FAILURE ;
public class CloudWatchDestination { /** * An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDimensionConfigurations ( java . util . Collection ) } or * { @ link # withDimensionConfigurations ( java . util . Collection ) } if you want to override the existing values . * @ param dimensionConfigurations * An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch . * @ return Returns a reference to this object so that method calls can be chained together . */ public CloudWatchDestination withDimensionConfigurations ( CloudWatchDimensionConfiguration ... dimensionConfigurations ) { } }
if ( this . dimensionConfigurations == null ) { setDimensionConfigurations ( new java . util . ArrayList < CloudWatchDimensionConfiguration > ( dimensionConfigurations . length ) ) ; } for ( CloudWatchDimensionConfiguration ele : dimensionConfigurations ) { this . dimensionConfigurations . add ( ele ) ; } return this ;
public class ZipUtils { /** * Unzip a file into a directory . The directory is created if it does not exist . * @ return the target directory */ public static File unzip ( File zip , File toDir ) throws IOException { } }
return unzip ( zip , toDir , ( Predicate < ZipEntry > ) ze -> true ) ;
public class GoogleMapsTileMath { /** * Converts pixel coordinates in given zoom level of pyramid to EPSG : 3857 * @ param px the X pixel coordinate * @ param py the Y pixel coordinate * @ param zoomLevel the zoom level * @ return The coordinate transformed to EPSG : 3857 */ public Coordinate pixelsToMeters ( double px , double py , int zoomLevel ) { } }
double res = resolution ( zoomLevel ) ; double mx = px * res - originShift ; double my = - py * res + originShift ; return new Coordinate ( mx , my ) ;
public class LogRecord { /** * Returns the default value for a new LogRecord ' s threadID . */ private int defaultThreadID ( ) { } }
long tid = Thread . currentThread ( ) . getId ( ) ; if ( tid < MIN_SEQUENTIAL_THREAD_ID ) { return ( int ) tid ; } else { Integer id = threadIds . get ( ) ; if ( id == null ) { id = nextThreadId . getAndIncrement ( ) ; threadIds . set ( id ) ; } return id ; }
public class ModuleManager { /** * Gets the service from the given bundle jar uri . Loads and starts the bundle * if it isn ' t yet loaded * @ param bundleURI bundle jar URI * @ param svcClazz the service class exposed by the bundle jar * @ return a reference to an instance of the service class */ public < T > T getService ( URI bundleURI , Class < T > svcClazz ) { } }
return m_bundles . getService ( bundleURI , svcClazz ) ;
public class ManageTagsDialog { /** * This method initializes btnDelete * @ return javax . swing . JButton */ private JButton getBtnDelete ( ) { } }
if ( btnDelete == null ) { btnDelete = new JButton ( ) ; btnDelete . setText ( Constant . messages . getString ( "history.managetags.button.delete" ) ) ; btnDelete . setMinimumSize ( new java . awt . Dimension ( 75 , 30 ) ) ; btnDelete . setPreferredSize ( new java . awt . Dimension ( 75 , 30 ) ) ; btnDelete . setMaximumSize ( new java . awt . Dimension ( 100 , 40 ) ) ; btnDelete . setEnabled ( true ) ; btnDelete . addActionListener ( new java . awt . event . ActionListener ( ) { @ Override public void actionPerformed ( java . awt . event . ActionEvent e ) { deleteTags ( tagList . getSelectedValuesList ( ) ) ; } } ) ; } return btnDelete ;
public class SOD { /** * Get the k nearest neighbors in terms of the shared nearest neighbor * distance . * The query object is excluded from the knn list . * FIXME : move this to the database layer . * @ param relation the database holding the objects * @ param simQ similarity function * @ param queryObject the query object for which the kNNs should be determined * @ return the k nearest neighbors in terms of the shared nearest neighbor * distance without the query object */ private DBIDs getNearestNeighbors ( Relation < V > relation , SimilarityQuery < V > simQ , DBIDRef queryObject ) { } }
Heap < DoubleDBIDPair > nearestNeighbors = new TiedTopBoundedHeap < > ( knn ) ; for ( DBIDIter iter = relation . iterDBIDs ( ) ; iter . valid ( ) ; iter . advance ( ) ) { if ( DBIDUtil . equal ( iter , queryObject ) ) { continue ; } double sim = simQ . similarity ( queryObject , iter ) ; if ( sim > 0. ) { nearestNeighbors . add ( DBIDUtil . newPair ( sim , iter ) ) ; } } // Collect DBIDs ArrayModifiableDBIDs dbids = DBIDUtil . newArray ( nearestNeighbors . size ( ) ) ; while ( nearestNeighbors . size ( ) > 0 ) { dbids . add ( nearestNeighbors . poll ( ) ) ; } return dbids ;
public class ChangeHandler { /** * Method can be used to inform all registered listeners about a change . * Note : In case one listener could not be notified , the others will still be informed about the change and the exception in thrown afterwards . * @ throws CouldNotPerformException is returned if the notification has failed for some listeners . * @ throws InterruptedException is thrown if the thread was externally interrupted . */ @ Override public void notifyChange ( ) throws CouldNotPerformException , InterruptedException { } }
synchronized ( LISTENER_LOCK ) { ExceptionStack exceptionStack = null ; for ( ChangeListener listener : listeners ) { try { listener . notifyChange ( ) ; } catch ( CouldNotPerformException ex ) { exceptionStack = MultiException . push ( listener , ex , exceptionStack ) ; } } MultiException . checkAndThrow ( ( ) -> "Could not notify all listeners about the change!" , exceptionStack ) ; }
public class CmsSessionsApp { /** * Get user names as String from set of sessions . < p > * @ param ids to gain usernames from * @ param andLocalized String * @ return user names as string */ protected static String getUserNames ( Set < String > ids , String andLocalized ) { } }
List < String > userNames = new ArrayList < String > ( ) ; for ( String id : ids ) { CmsSessionInfo session = OpenCms . getSessionManager ( ) . getSessionInfo ( new CmsUUID ( id ) ) ; try { String name = A_CmsUI . getCmsObject ( ) . readUser ( session . getUserId ( ) ) . getName ( ) ; if ( ! userNames . contains ( name ) ) { userNames . add ( name ) ; } } catch ( CmsException e ) { LOG . error ( "Unable to read user information" , e ) ; } } Iterator < String > iterator = userNames . iterator ( ) ; String res = "" ; while ( iterator . hasNext ( ) ) { res += iterator . next ( ) ; if ( iterator . hasNext ( ) ) { res += ", " ; } } int lastPosSeperation = res . lastIndexOf ( ", " ) ; return lastPosSeperation == - 1 ? res : res . substring ( 0 , lastPosSeperation ) + " " + andLocalized + " " + res . substring ( lastPosSeperation + 2 , res . length ( ) ) ;
public class GroovyScriptEngine { /** * Run a script identified by name with a single argument . * @ param scriptName name of the script to run * @ param argument a single argument passed as a variable named < code > arg < / code > in the binding * @ return a < code > toString ( ) < / code > representation of the result of the execution of the script * @ throws ResourceException if there is a problem accessing the script * @ throws ScriptException if there is a problem parsing the script */ public String run ( String scriptName , String argument ) throws ResourceException , ScriptException { } }
Binding binding = new Binding ( ) ; binding . setVariable ( "arg" , argument ) ; Object result = run ( scriptName , binding ) ; return result == null ? "" : result . toString ( ) ;
public class JavaClasspathParser { /** * Reads entry of a . classpath file . * @ param projectName * - the name of project containing the . classpath file * @ param projectRootAbsoluteFullPath * - the path to project containing the . classpath file * @ return the set of CLasspath ENtries extracted from the . classpath * @ throws CoreException * - exception during parsing of . classpath * @ throws IOException * - exception during parsing of . classpath * @ throws ClasspathEntry . AssertionFailedException * - exception during parsing of . classpath * @ throws URISyntaxException * - exception during parsing of . classpath */ public static IClasspathEntry [ ] [ ] readFileEntriesWithException ( String projectName , URL projectRootAbsoluteFullPath ) throws CoreException , IOException , ClasspathEntry . AssertionFailedException , URISyntaxException { } }
return readFileEntriesWithException ( projectName , projectRootAbsoluteFullPath , null ) ;
public class AlarmRuleAction { /** * 删除node */ public void doDelete ( @ Param ( "alarmRuleId" ) Long alarmRuleId , @ Param ( "pipelineId" ) Long pipelineId , Navigator nav ) throws WebxException { } }
alarmRuleService . remove ( alarmRuleId ) ; nav . redirectToLocation ( "alarmRuleList.htm?pipelineId=" + pipelineId ) ;
public class MiniProfilerFilter { /** * If profiling is supposed to occur for the current request , profile the * request . Otherwise this filter does nothing . */ @ Override public void doFilter ( ServletRequest sReq , ServletResponse sRes , FilterChain chain ) throws IOException , ServletException { } }
HttpServletRequest req = ( HttpServletRequest ) sReq ; HttpServletResponse res = ( HttpServletResponse ) sRes ; if ( shouldProfile ( req . getRequestURI ( ) ) ) { String queryString = req . getQueryString ( ) ; String requestId = String . valueOf ( counter . incrementAndGet ( ) ) ; String redirectRequestIds = null ; if ( ! isEmpty ( queryString ) ) { String [ ] parts = queryString . split ( "&" ) ; for ( String part : parts ) { String [ ] nameValue = part . split ( "=" ) ; if ( REQUEST_ID_PARAM_REDIRECT . equals ( nameValue [ 0 ] ) ) { redirectRequestIds = nameValue [ 1 ] ; break ; } } } req . setAttribute ( REQUEST_ID_ATTRIBUTE , requestId ) ; res . addHeader ( REQUEST_ID_HEADER , redirectRequestIds != null ? redirectRequestIds + "," + requestId : requestId ) ; addIncludes ( req ) ; ResponseWrapper resWrapper = new ResponseWrapper ( res , requestId , redirectRequestIds ) ; MiniProfiler . Profile profile = null ; long startTime = System . currentTimeMillis ( ) ; MiniProfiler . start ( ) ; try { chain . doFilter ( sReq , resWrapper ) ; } finally { profile = MiniProfiler . stop ( ) ; } Map < String , Object > requestData = new HashMap < String , Object > ( ) ; requestData . put ( "requestURL" , req . getRequestURI ( ) + ( ( req . getQueryString ( ) != null ) ? "?" + req . getQueryString ( ) : "" ) ) ; requestData . put ( "timestamp" , startTime ) ; requestData . put ( "redirect" , resWrapper . getDidRedirect ( ) ) ; String appstatsId = resWrapper . getAppstatsId ( ) ; if ( appstatsId != null ) { requestData . put ( "appstatsId" , appstatsId ) ; } requestData . put ( "profile" , profile ) ; ms . put ( String . format ( MEMCACHE_KEY_FORMAT_STRING , requestId ) , requestData , Expiration . byDeltaSeconds ( dataExpiry ) ) ; } else { chain . doFilter ( sReq , sRes ) ; }
public class GeoTagImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < Project > getProjects ( ) { } }
return ( EList < Project > ) eGet ( StorePackage . Literals . GEO_TAG__PROJECTS , true ) ;
public class Protobuf { /** * Streams multiple messages to { @ code file } . Reading the messages back requires to * call methods { @ code readStream ( . . . ) } . * See https : / / developers . google . com / protocol - buffers / docs / techniques # streaming */ public static < MSG extends Message > void writeStream ( Iterable < MSG > messages , File toFile , boolean append ) { } }
OutputStream out = null ; try { out = new BufferedOutputStream ( new FileOutputStream ( toFile , append ) ) ; writeStream ( messages , out ) ; } catch ( Exception e ) { throw ContextException . of ( "Unable to write messages" , e ) . addContext ( "file" , toFile ) ; } finally { IOUtils . closeQuietly ( out ) ; }
public class OSGiConfigUtils { /** * Test to see if the given configuration key starts with any known system prefixes * @ param key the key to test * @ return true if it startsWith one of the system prefixes */ public static boolean isSystemKey ( String key ) { } }
for ( String prefix : Config13Constants . SYSTEM_PREFIXES ) { if ( key . startsWith ( prefix ) ) { return true ; } } return false ;
public class Jose4jValidator { /** * A . If JWT contains " aud " , and one or more " aud " starts http or https , * and resources service URL contains the " aud " ( and audiences is not configured ) , it is considered matched . JWT will be * accepted . * In other words , if " aud " is URL , " audiences " configuration is not required . However , if audiences is configured , the exact * matching is required , and resource service URL matching is not performed . * B . If JWT contains " aud " , and openidConnectClient does not config " audiences " , or configured " audience " do not match , it is * an error condition . * C . if JWT does not contain " aud " , but openidConnectClient contains " audiences " , it is an error condition . * D . ALL _ AUDIENCES can be specified to ignore audience check . * @ param allowedAudiences * @ param audiences * @ return */ String jwtAudienceElementCheck ( List < String > allowedAudiences , List < String > audiences ) { } }
if ( allowedAudiences == null ) { // handle Point A . for ( String audience : audiences ) { if ( oidcClientRequest . isPreServiceUrl ( audience ) ) { return audience ; } } return null ; // Point B . } // Point B . for ( String audience : audiences ) { for ( String allowedAud : allowedAudiences ) { // this is not null when created in the configuration instnce // ALL _ AUDIENCES . Point D . if ( allowedAud != null && allowedAud . equals ( audience ) ) return audience ; } } return null ;
public class QueryBatcherImpl { /** * Accepts a QueryBatch which was successfully retrieved from the server and a * QueryBatchListener which was failed to apply and retry that listener on the batch . */ @ Override public void retryListener ( QueryBatch batch , QueryBatchListener queryBatchListener ) { } }
// We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable . DatabaseClient client = null ; Forest [ ] forests = batch . getBatcher ( ) . getForestConfig ( ) . listForests ( ) ; for ( Forest forest : forests ) { if ( forest . equals ( batch . getForest ( ) ) ) client = getMoveMgr ( ) . getForestClient ( forest ) ; } QueryBatchImpl retryBatch = new QueryBatchImpl ( ) . withClient ( client ) . withBatcher ( batch . getBatcher ( ) ) . withTimestamp ( batch . getTimestamp ( ) ) . withServerTimestamp ( batch . getServerTimestamp ( ) ) . withItems ( batch . getItems ( ) ) . withJobTicket ( batch . getJobTicket ( ) ) . withJobBatchNumber ( batch . getJobBatchNumber ( ) ) . withJobResultsSoFar ( batch . getJobResultsSoFar ( ) ) . withForestBatchNumber ( batch . getForestBatchNumber ( ) ) . withForestResultsSoFar ( batch . getForestResultsSoFar ( ) ) . withForest ( batch . getForest ( ) ) . withJobTicket ( batch . getJobTicket ( ) ) ; queryBatchListener . processEvent ( retryBatch ) ;
public class RegionInstanceGroupManagerClient { /** * Lists the instances in the managed instance group and instances that are scheduled to be * created . The list includes any current actions that the group has scheduled for its instances . * < p > Sample code : * < pre > < code > * try ( RegionInstanceGroupManagerClient regionInstanceGroupManagerClient = RegionInstanceGroupManagerClient . create ( ) ) { * ProjectRegionInstanceGroupManagerName instanceGroupManager = ProjectRegionInstanceGroupManagerName . of ( " [ PROJECT ] " , " [ REGION ] " , " [ INSTANCE _ GROUP _ MANAGER ] " ) ; * RegionInstanceGroupManagersListInstancesResponse response = regionInstanceGroupManagerClient . listManagedInstancesRegionInstanceGroupManagers ( instanceGroupManager ) ; * < / code > < / pre > * @ param instanceGroupManager The name of the managed instance group . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final RegionInstanceGroupManagersListInstancesResponse listManagedInstancesRegionInstanceGroupManagers ( ProjectRegionInstanceGroupManagerName instanceGroupManager ) { } }
ListManagedInstancesRegionInstanceGroupManagersHttpRequest request = ListManagedInstancesRegionInstanceGroupManagersHttpRequest . newBuilder ( ) . setInstanceGroupManager ( instanceGroupManager == null ? null : instanceGroupManager . toString ( ) ) . build ( ) ; return listManagedInstancesRegionInstanceGroupManagers ( request ) ;
public class ComponentFactory { /** * Factory method for create a new { @ link FeedbackPanel } . * @ param id * the id * @ return the { @ link FeedbackPanel } . */ public static FeedbackPanel newFeedbackPanel ( final String id ) { } }
final FeedbackPanel feedbackPanel = new FeedbackPanel ( id ) ; feedbackPanel . setOutputMarkupId ( true ) ; return feedbackPanel ;
public class GoogleAddon { /** * MapAddon implementation : */ public void accept ( PainterVisitor visitor , Object group , Bbox bounds , boolean recursive ) { } }
if ( googleMap == null ) { // create as first child of raster group map . getRasterContext ( ) . drawGroup ( null , this ) ; String id = map . getRasterContext ( ) . getId ( this ) ; String graphicsId = map . getVectorContext ( ) . getId ( ) ; googleMap = createGoogleMap ( id , graphicsId , type . name ( ) , showMap , getVerticalMargin ( ) , getHorizontalMargin ( ) , getVerticalAlignmentString ( ) ) ; }
public class CoreLabelTokenFactory { /** * Constructs a CoreLabel as a String with a corresponding BEGIN and END position . * ( Does not take substring ) . */ public CoreLabel makeToken ( String tokenText , int begin , int length ) { } }
return makeToken ( tokenText , tokenText , begin , length ) ;
public class NativeTypedArrayView { /** * Constructor properties */ @ Override protected void fillConstructorProperties ( IdFunctionObject ctor ) { } }
ctor . put ( "BYTES_PER_ELEMENT" , ctor , ScriptRuntime . wrapInt ( getBytesPerElement ( ) ) ) ;
public class PanCompilerTask { /** * Set the include globs to use for the pan compiler loadpath . * @ param includes * String of comma - or space - separated file globs */ public void setIncludes ( String includes ) { } }
// Split the string into separate file globs . String [ ] globs = includes . split ( "[\\s,]+" ) ; // Loop over these globs and create dirsets from them . // Do not set the root directory until the task is // executed . for ( String glob : globs ) { DirSet dirset = new DirSet ( ) ; dirset . setIncludes ( glob ) ; this . includes . add ( dirset ) ; }
public class AvailabilityZoneDetailMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AvailabilityZoneDetail availabilityZoneDetail , ProtocolMarshaller protocolMarshaller ) { } }
if ( availabilityZoneDetail == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( availabilityZoneDetail . getName ( ) , NAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class GetAllAdUnitSizes { /** * Runs the example . * @ param adManagerServices the services factory . * @ param session the session . * @ throws ApiException if the API request failed with one or more service errors . * @ throws RemoteException if the API request failed due to other errors . */ public static void runExample ( AdManagerServices adManagerServices , AdManagerSession session ) throws RemoteException { } }
// Get the InventoryService . InventoryServiceInterface inventoryService = adManagerServices . get ( session , InventoryServiceInterface . class ) ; // Create a statement to select all ad unit sizes . StatementBuilder statementBuilder = new StatementBuilder ( ) ; // Get all ad unit sizes . AdUnitSize [ ] adUnitSizes = inventoryService . getAdUnitSizesByStatement ( statementBuilder . toStatement ( ) ) ; if ( adUnitSizes != null ) { for ( int i = 0 ; i < adUnitSizes . length ; i ++ ) { AdUnitSize adUnitSize = adUnitSizes [ i ] ; System . out . printf ( "%d) Ad unit size of dimensions '%s' was found.%n" , i , adUnitSize . getFullDisplayString ( ) ) ; } } else { System . out . println ( "No ad unit sizes found." ) ; }
public class JobListNextOptions { /** * Set the time the request was issued . Client libraries typically set this to the current system clock time ; set it explicitly if you are calling the REST API directly . * @ param ocpDate the ocpDate value to set * @ return the JobListNextOptions object itself . */ public JobListNextOptions withOcpDate ( DateTime ocpDate ) { } }
if ( ocpDate == null ) { this . ocpDate = null ; } else { this . ocpDate = new DateTimeRfc1123 ( ocpDate ) ; } return this ;
public class E { /** * Throws out an { @ link IllegalArgumentException } with error message specified * if ` tester ` is ` true ` . * @ param tester * when ` true ` then throw out the exception . * @ param msg * the error message format pattern . * @ param args * the error message format arguments . */ public static void illegalArgumentIf ( boolean tester , String msg , Object ... args ) { } }
if ( tester ) { throw new IllegalArgumentException ( S . fmt ( msg , args ) ) ; }
public class FeatureTokens { /** * setter for beginnings - sets * @ generated * @ param v value to set into the feature */ public void setBeginnings ( IntegerArray v ) { } }
if ( FeatureTokens_Type . featOkTst && ( ( FeatureTokens_Type ) jcasType ) . casFeat_beginnings == null ) jcasType . jcas . throwFeatMissing ( "beginnings" , "ch.epfl.bbp.uima.types.FeatureTokens" ) ; jcasType . ll_cas . ll_setRefValue ( addr , ( ( FeatureTokens_Type ) jcasType ) . casFeatCode_beginnings , jcasType . ll_cas . ll_getFSRef ( v ) ) ;
public class IfcStyledItemImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) public EList < IfcPresentationStyleAssignment > getStyles ( ) { } }
return ( EList < IfcPresentationStyleAssignment > ) eGet ( Ifc2x3tc1Package . Literals . IFC_STYLED_ITEM__STYLES , true ) ;
public class DssatXFileOutput { /** * Try to translate 3 - bit CRID to 2 - bit version stored in the map * @ param cuData the cultivar data record * @ param id the field id for contain crop id info * @ param defVal the default value when id is not available * @ return 2 - bit crop ID */ private String translateTo2BitCrid ( Map cuData , String id , String defVal ) { } }
String crid = getValueOr ( cuData , id , "" ) ; if ( ! crid . equals ( "" ) ) { return DssatCRIDHelper . get2BitCrid ( crid ) ; } else { return defVal ; }
public class Get { /** * Retrieves the cookie expiration in the application with the provided * cookieName . If the cookie doesn ' t exist , a null value will be returned . * @ param expectedCookieName - the name of the cookie * @ return String - the expiration of the cookie */ public Date cookieExpiration ( String expectedCookieName ) { } }
Cookie cookie = cookie ( expectedCookieName ) ; if ( cookie != null ) { return cookie . getExpiry ( ) ; } return null ;
public class IotHubResourcesInner { /** * Get a list of the consumer groups in the Event Hub - compatible device - to - cloud endpoint in an IoT hub . * Get a list of the consumer groups in the Event Hub - compatible device - to - cloud endpoint in an IoT hub . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorDetailsException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the PagedList & lt ; EventHubConsumerGroupInfoInner & gt ; object if successful . */ public PagedList < EventHubConsumerGroupInfoInner > listEventHubConsumerGroupsNext ( final String nextPageLink ) { } }
ServiceResponse < Page < EventHubConsumerGroupInfoInner > > response = listEventHubConsumerGroupsNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) ; return new PagedList < EventHubConsumerGroupInfoInner > ( response . body ( ) ) { @ Override public Page < EventHubConsumerGroupInfoInner > nextPage ( String nextPageLink ) { return listEventHubConsumerGroupsNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) . body ( ) ; } } ;
public class Memoize { /** * Convert a Supplier into one that caches it ' s result * @ param s Supplier to memoise * @ return Memoised Supplier */ public static < T > Function0 < T > memoizeSupplier ( final Supplier < T > s ) { } }
AtomicReference value = new AtomicReference < > ( UNSET ) ; return ( ) -> { Object val = value . get ( ) ; if ( val == UNSET ) { synchronized ( UNSET ) { if ( value . get ( ) == UNSET ) { val = s . get ( ) ; value . set ( val ) ; } } } return ( T ) val ; } ;
public class DelegatingSignatureSupportKeyService { /** * { @ inheritDoc } */ @ Override public String createSADJwt ( SAD sad ) throws SignatureException , IOException { } }
ServiceableComponent < SignatureSupportKeyService > component = null ; try { component = service . getServiceableComponent ( ) ; if ( null == component ) { throw new SignatureException ( "SignatureSupportKeyService: Error accessing underlying component: Invalid configuration" ) ; } else { final SignatureSupportKeyService svc = component . getComponent ( ) ; return svc . createSADJwt ( sad ) ; } } finally { if ( null != component ) { component . unpinComponent ( ) ; } }
public class CmsDbExportView { /** * Sets up the combo box for the target file . < p > */ private void setupComboBoxFile ( ) { } }
m_target . setInputPrompt ( CmsVaadinUtils . getMessageText ( Messages . GUI_DATABASEAPP_EXPORT_FILE_NAME_EMPTY_0 ) ) ; m_target . setNewItemsAllowed ( true ) ; List < String > files = CmsDbManager . getFileListFromServer ( true ) ; for ( String file : files ) { m_target . addItem ( file ) ; }
public class SAMkNN { /** * Removes distance - based all instances from the input samples that contradict those in the STM . */ private void clean ( Instances cleanAgainst , Instances toClean , boolean onlyLast ) { } }
if ( cleanAgainst . numInstances ( ) > this . kOption . getValue ( ) && toClean . numInstances ( ) > 0 ) { if ( onlyLast ) { cleanSingle ( cleanAgainst , ( cleanAgainst . numInstances ( ) - 1 ) , toClean ) ; } else { for ( int i = 0 ; i < cleanAgainst . numInstances ( ) ; i ++ ) { cleanSingle ( cleanAgainst , i , toClean ) ; } } }
public class Xcelite { /** * Gets the sheet with the specified index . * @ param sheetIndex the sheet name * @ return XceliteSheet object */ public XceliteSheet getSheet ( String sheetName ) { } }
Sheet sheet = workbook . getSheet ( sheetName ) ; if ( sheet == null ) { throw new XceliteException ( String . format ( "Could not find sheet named \"%s\"" , sheetName ) ) ; } return new XceliteSheetImpl ( sheet , file ) ;
public class GeneratedDConnectionDaoImpl { /** * query - by method for field createdBy * @ param createdBy the specified attribute * @ return an Iterable of DConnections for the specified createdBy */ public Iterable < DConnection > queryByCreatedBy ( java . lang . String createdBy ) { } }
return queryByField ( null , DConnectionMapper . Field . CREATEDBY . getFieldName ( ) , createdBy ) ;
public class PeerGroup { /** * < p > Link the given wallet to this PeerGroup . This is used for three purposes : < / p > * < ol > * < li > So the wallet receives broadcast transactions . < / li > * < li > Announcing pending transactions that didn ' t get into the chain yet to our peers . < / li > * < li > Set the fast catchup time using { @ link PeerGroup # setFastCatchupTimeSecs ( long ) } , to optimize chain * download . < / li > * < / ol > * < p > Note that this should be done before chain download commences because if you add a wallet with keys earlier * than the current chain head , the relevant parts of the chain won ' t be redownloaded for you . < / p > * < p > The Wallet will have an event listener registered on it , so to avoid leaks remember to use * { @ link PeerGroup # removeWallet ( Wallet ) } on it if you wish to keep the Wallet but lose the PeerGroup . < / p > */ public void addWallet ( Wallet wallet ) { } }
lock . lock ( ) ; try { checkNotNull ( wallet ) ; checkState ( ! wallets . contains ( wallet ) ) ; wallets . add ( wallet ) ; wallet . setTransactionBroadcaster ( this ) ; wallet . addCoinsReceivedEventListener ( Threading . SAME_THREAD , walletCoinsReceivedEventListener ) ; wallet . addKeyChainEventListener ( Threading . SAME_THREAD , walletKeyEventListener ) ; wallet . addScriptsChangeEventListener ( Threading . SAME_THREAD , walletScriptsEventListener ) ; addPeerFilterProvider ( wallet ) ; for ( Peer peer : peers ) { peer . addWallet ( wallet ) ; } } finally { lock . unlock ( ) ; }
public class SoapUtil { /** * Disables validation of https server certificates , only to be used during development and tests ! ! ! * @ param client */ public static void disableTlsServerCertificateCheck ( Client client ) { } }
if ( ! ( client . getConduit ( ) instanceof HTTPConduit ) ) { log . warn ( "Conduit not of type HTTPConduit (" + client . getConduit ( ) . getClass ( ) . getName ( ) + ") , skip disabling server certification validation." ) ; return ; } log . warn ( "Disables server certification validation for: " + client . getEndpoint ( ) . getEndpointInfo ( ) . getAddress ( ) ) ; HTTPConduit httpConduit = ( HTTPConduit ) client . getConduit ( ) ; TLSClientParameters tlsParams = new TLSClientParameters ( ) ; TrustManager [ ] trustAllCerts = new TrustManager [ ] { new FakeTrustManager ( ) } ; tlsParams . setTrustManagers ( trustAllCerts ) ; tlsParams . setDisableCNCheck ( true ) ; httpConduit . setTlsClientParameters ( tlsParams ) ;
public class LifeCycleHelper { /** * Validates a component using any { @ link Validate } methods . This is * typically done after * { @ link # assignProvidedProperties ( ComponentDescriptor , Object ) } and * { @ link # assignConfiguredProperties ( ComponentDescriptor , Object , BeanConfiguration ) } * Usually validation is light - weight , idempotent and quick , as compared to * { @ link # initialize ( ComponentDescriptor , Object , boolean ) } . * @ param descriptor * @ param component */ public void validate ( ComponentDescriptor < ? > descriptor , Object component ) { } }
InitializeCallback callback = new InitializeCallback ( true , false , _includeNonDistributedTasks ) ; callback . onEvent ( component , descriptor ) ;
public class UnsafeOperations { /** * Construct and allocate on the heap an instant of the given class , without calling the class constructor * @ param clazz Class to create instant for * @ param < T > Type of the instance to be constructed * @ return The new instance * @ throws IllegalStateException Indicates a problem occurred */ public final < T > T allocateInstance ( Class < T > clazz ) throws IllegalStateException { } }
try { @ SuppressWarnings ( "unchecked" ) final T result = ( T ) THE_UNSAFE . allocateInstance ( clazz ) ; return result ; } catch ( InstantiationException e ) { throw new IllegalStateException ( "Cannot allocate instance: " + e . getMessage ( ) , e ) ; }
public class DifferenceEngine { /** * Compare two Documents for doctype and then element differences * @ param control * @ param test * @ param listener * @ param elementQualifier * @ throws DifferenceFoundException */ protected void compareDocument ( Document control , Document test , DifferenceListener listener , ElementQualifier elementQualifier ) throws DifferenceFoundException { } }
DocumentType controlDoctype = control . getDoctype ( ) ; DocumentType testDoctype = test . getDoctype ( ) ; compare ( getNullOrNotNull ( controlDoctype ) , getNullOrNotNull ( testDoctype ) , controlDoctype , testDoctype , listener , HAS_DOCTYPE_DECLARATION ) ; if ( controlDoctype != null && testDoctype != null ) { compareNode ( controlDoctype , testDoctype , listener , elementQualifier ) ; }
public class FutureConverter { /** * Converts { @ link java . util . concurrent . CompletableFuture } to { @ link com . google . api . core . ApiFuture } . */ public static < T > ApiFuture < T > toApiFuture ( CompletableFuture < T > completableFuture ) { } }
return ApiFutureUtils . createApiFuture ( Java8FutureUtils . createValueSourceFuture ( completableFuture ) ) ;
public class SecretRewriter { /** * Decides if this directory is worth visiting or not . */ protected boolean isIgnoredDir ( File dir ) { } }
// ignoring the workspace and the artifacts directories . Both of them // are potentially large and they do not store any secrets . String n = dir . getName ( ) ; return n . equals ( "workspace" ) || n . equals ( "artifacts" ) || n . equals ( "plugins" ) // no mutable data here || n . equals ( "." ) || n . equals ( ".." ) ;
public class DirContextAdapter { /** * { @ inheritDoc } */ @ Override public void setAttributeValue ( String name , Object value ) { } }
// new entry if ( ! updateMode && value != null ) { originalAttrs . put ( name , value ) ; } // updating entry if ( updateMode ) { Attribute attribute = new NameAwareAttribute ( name ) ; if ( value != null ) { attribute . add ( value ) ; } updatedAttrs . put ( attribute ) ; }
public class DFSClient { /** * Sets or resets quotas for a directory . * @ see org . apache . hadoop . hdfs . protocol . ClientProtocol # setQuota ( String , long , long ) */ void setQuota ( String src , long namespaceQuota , long diskspaceQuota ) throws IOException { } }
// sanity check if ( ( namespaceQuota <= 0 && namespaceQuota != FSConstants . QUOTA_DONT_SET && namespaceQuota != FSConstants . QUOTA_RESET ) || ( diskspaceQuota <= 0 && diskspaceQuota != FSConstants . QUOTA_DONT_SET && diskspaceQuota != FSConstants . QUOTA_RESET ) ) { throw new IllegalArgumentException ( "Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota ) ; } try { namenode . setQuota ( src , namespaceQuota , diskspaceQuota ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , FileNotFoundException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; }
public class ProcessThread { /** * Get threads holding lock this thread is trying to acquire . * @ return { @ link ThreadSet } that contains blocked thread or empty set if this thread does not hold any lock . */ public @ Nonnull SetType getBlockingThreads ( ) { } }
final ThreadType blocking = getBlockingThread ( ) ; if ( blocking == null ) return runtime . getEmptyThreadSet ( ) ; return runtime . getThreadSet ( Collections . singleton ( blocking ) ) ;
public class AbstractFormModel { /** * Register converters for a given property name . * @ param propertyName name of property on which to register converters * @ param toConverter Convert from source to target type * @ param fromConverter Convert from target to source type */ public void registerPropertyConverter ( String propertyName , Converter toConverter , Converter fromConverter ) { } }
DefaultConversionService propertyConversionService = ( DefaultConversionService ) propertyConversionServices . get ( propertyName ) ; propertyConversionService . addConverter ( toConverter ) ; propertyConversionService . addConverter ( fromConverter ) ;
public class PublishingListener { /** * A listener for ndarray message * @ param message the message for the callback */ @ Override public void onNDArrayMessage ( NDArrayMessage message ) { } }
try ( AeronNDArrayPublisher publisher = AeronNDArrayPublisher . builder ( ) . streamId ( streamId ) . ctx ( aeronContext ) . channel ( masterUrl ) . build ( ) ) { publisher . publish ( message ) ; log . debug ( "NDArray PublishingListener publishing to channel " + masterUrl + ":" + streamId ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; }
public class TypeConversionUtil { /** * Extract the write part of a scalar attribute * @ param array * @ param writeType * @ return */ public static Object getWritePart ( final Object array , final AttrWriteType writeType ) { } }
if ( writeType . equals ( AttrWriteType . READ_WRITE ) ) { return Array . get ( array , 1 ) ; } else { return Array . get ( array , 0 ) ; }
public class HttpServer { /** * Join the listeners . * Join all listeners that are instances of ThreadPool . * @ exception InterruptedException */ public void join ( ) throws InterruptedException { } }
for ( int l = 0 ; l < _listeners . size ( ) ; l ++ ) { HttpListener listener = ( HttpListener ) _listeners . get ( l ) ; if ( listener . isStarted ( ) && listener instanceof ThreadPool ) { ( ( ThreadPool ) listener ) . join ( ) ; } }
public class CmsFlexCacheKey { /** * Appends a flex cache key value to the given buffer . < p > * @ param str the buffer to append to * @ param key the key to append * @ param value the value to append */ private static void appendKeyValue ( StringBuffer str , String key , String value ) { } }
str . append ( key ) ; if ( value == IS_USED ) { str . append ( ";" ) ; } else { str . append ( "=(" ) ; str . append ( value ) ; str . append ( ");" ) ; }
public class HeapUpdateDoublesSketch { /** * Loads the Combined Buffer , min and max from the given source Memory . * The resulting Combined Buffer is always in non - compact form and must be pre - allocated . * @ param srcMem the given source Memory * @ param serVer the serialization version of the source * @ param srcIsCompact true if the given source Memory is in compact form * @ param combBufCap total items for the combined buffer ( size in doubles ) */ private void srcMemoryToCombinedBuffer ( final Memory srcMem , final int serVer , final boolean srcIsCompact , final int combBufCap ) { } }
final int preLongs = 2 ; final int extra = ( serVer == 1 ) ? 3 : 2 ; // space for min and max values , buf alloc ( SerVer 1) final int preBytes = ( preLongs + extra ) << 3 ; final int bbCnt = baseBufferCount_ ; final int k = getK ( ) ; final long n = getN ( ) ; final double [ ] combinedBuffer = new double [ combBufCap ] ; // always non - compact // Load min , max putMinValue ( srcMem . getDouble ( MIN_DOUBLE ) ) ; putMaxValue ( srcMem . getDouble ( MAX_DOUBLE ) ) ; if ( srcIsCompact ) { // Load base buffer srcMem . getDoubleArray ( preBytes , combinedBuffer , 0 , bbCnt ) ; // Load levels from compact srcMem long bitPattern = bitPattern_ ; if ( bitPattern != 0 ) { long memOffset = preBytes + ( bbCnt << 3 ) ; int combBufOffset = 2 * k ; while ( bitPattern != 0L ) { if ( ( bitPattern & 1L ) > 0L ) { srcMem . getDoubleArray ( memOffset , combinedBuffer , combBufOffset , k ) ; memOffset += ( k << 3 ) ; // bytes , increment compactly } combBufOffset += k ; // doubles , increment every level bitPattern >>>= 1 ; } } } else { // srcMem not compact final int levels = Util . computeNumLevelsNeeded ( k , n ) ; final int totItems = ( levels == 0 ) ? bbCnt : ( 2 + levels ) * k ; srcMem . getDoubleArray ( preBytes , combinedBuffer , 0 , totItems ) ; } putCombinedBuffer ( combinedBuffer ) ;
public class DbxClientV1 { /** * Get the metadata for a given path and its children if anything has * changed since the last time you got them ( as determined by the value * of { @ link DbxEntry . WithChildren # hash } from the last result ) . * @ param path * The path ( starting with " / " ) to the file or folder ( see { @ link DbxPathV1 } ) . * @ param previousFolderHash * The value of { @ link DbxEntry . WithChildren # hash } from the last time * you got the metadata for this folder ( and children ) . * @ return Never returns { @ code null } . If the folder at the given path hasn ' t changed * since you last retrieved it ( i . e . its contents match { @ code previousFolderHash } ) , return * { @ code Maybe . Nothing } . If it doesn ' t match { @ code previousFolderHash } return either * { @ code Maybe . Just ( null ) } if there ' s nothing there or { @ code Maybe . Just } with the * metadata . */ public Maybe < DbxEntry . /* @ Nullable */ WithChildren > getMetadataWithChildrenIfChanged ( String path , boolean includeMediaInfo , /* @ Nullable */ String previousFolderHash ) throws DbxException { } }
return getMetadataWithChildrenIfChangedBase ( path , includeMediaInfo , previousFolderHash , DbxEntry . WithChildren . ReaderMaybeDeleted ) ;
public class ScriptableObject { /** * Associate arbitrary application - specific value with this object . * Value can only be associated with the given object and key only once . * The method ignores any subsequent attempts to change the already * associated value . * < p > The associated values are not serialized . * @ param key key object to select particular value . * @ param value the value to associate * @ return the passed value if the method is called first time for the * given key or old value for any subsequent calls . * @ see # getAssociatedValue ( Object key ) */ public synchronized final Object associateValue ( Object key , Object value ) { } }
if ( value == null ) throw new IllegalArgumentException ( ) ; Map < Object , Object > h = associatedValues ; if ( h == null ) { h = new HashMap < Object , Object > ( ) ; associatedValues = h ; } return Kit . initHash ( h , key , value ) ;
public class CallbackRegistry { /** * To be called to set the failure exception * @ param failure the exception * @ return true if this result will be used ( first result registered ) */ boolean failure ( Throwable failure ) { } }
State < T > oldState ; synchronized ( mutex ) { if ( state . isCompleted ( ) ) { return false ; } oldState = state ; state = state . getFailureState ( failure ) ; } oldState . callFailureCallbacks ( failure ) ; return true ;
public class Rebuilder { /** * Rebuild the hashTable in the given Memory at its current size . Changes theta and thus count . * This assumes a Memory preamble of standard form with correct values of curCount and thetaLong . * ThetaLong and curCount will change . * Afterwards , caller must update local class members curCount and thetaLong from Memory . * @ param mem the Memory the given Memory * @ param preambleLongs size of preamble in longs * @ param lgNomLongs the log _ base2 of k , the configuration parameter of the sketch */ static final void quickSelectAndRebuild ( final WritableMemory mem , final int preambleLongs , final int lgNomLongs ) { } }
// Note : This copies the Memory data onto the heap and then at the end copies the result // back to Memory . Even if we tried to do this directly into Memory it would require pre - clearing , // and the internal loops would be slower . The bulk copies are performed at a low level and // are quite fast . Measurements reveal that we are not paying much of a penalty . // Pull data into tmp arr for QS algo final int lgArrLongs = extractLgArrLongs ( mem ) ; final int curCount = extractCurCount ( mem ) ; final int arrLongs = 1 << lgArrLongs ; final long [ ] tmpArr = new long [ arrLongs ] ; final int preBytes = preambleLongs << 3 ; mem . getLongArray ( preBytes , tmpArr , 0 , arrLongs ) ; // copy mem data to tmpArr // Do the QuickSelect on a tmp arr to create new thetaLong final int pivot = ( 1 << lgNomLongs ) + 1 ; // ( K + 1 ) pivot for QS final long newThetaLong = selectExcludingZeros ( tmpArr , curCount , pivot ) ; insertThetaLong ( mem , newThetaLong ) ; // UPDATE thetalong // Rebuild to clean up dirty data , update count final long [ ] tgtArr = new long [ arrLongs ] ; final int newCurCount = HashOperations . hashArrayInsert ( tmpArr , tgtArr , lgArrLongs , newThetaLong ) ; insertCurCount ( mem , newCurCount ) ; // UPDATE curCount // put the rebuilt array back into memory mem . putLongArray ( preBytes , tgtArr , 0 , arrLongs ) ;
public class JobDetail { /** * A list of job attempts associated with this job . * @ param attempts * A list of job attempts associated with this job . */ public void setAttempts ( java . util . Collection < AttemptDetail > attempts ) { } }
if ( attempts == null ) { this . attempts = null ; return ; } this . attempts = new java . util . ArrayList < AttemptDetail > ( attempts ) ;
public class JsonDeserializationContext { /** * Trace an error with current reader state and returns a corresponding exception . * @ param message error message * @ param reader current reader * @ return a { @ link JsonDeserializationException } with the given message */ public JsonDeserializationException traceError ( String message , JsonReader reader ) { } }
getLogger ( ) . log ( Level . SEVERE , message ) ; traceReaderInfo ( reader ) ; return new JsonDeserializationException ( message ) ;
public class AwtPaint { /** * Shifts the bitmap pattern so that it will always start at a multiple of * itself for any tile the pattern is used . This ensures that regardless of * size of the pattern it tiles correctly . * @ param origin the reference point */ @ Override public void setBitmapShaderShift ( Point origin ) { } }
if ( this . texturePaint != null ) { int relativeDx = ( ( int ) - origin . x ) % this . shaderWidth ; int relativeDy = ( ( int ) - origin . y ) % this . shaderHeight ; Rectangle rectangle = new Rectangle ( relativeDx , relativeDy , this . shaderWidth , this . shaderHeight ) ; this . texturePaint = new TexturePaint ( this . texturePaint . getImage ( ) , rectangle ) ; }
public class LongStreamEx { /** * Returns an infinite sequential ordered { @ code LongStreamEx } produced by * iterative application of a function { @ code f } to an initial element * { @ code seed } , producing a stream consisting of { @ code seed } , * { @ code f ( seed ) } , { @ code f ( f ( seed ) ) } , etc . * The first element ( position { @ code 0 } ) in the { @ code LongStreamEx } will * be the provided { @ code seed } . For { @ code n > 0 } , the element at position * { @ code n } , will be the result of applying the function { @ code f } to the * element at position { @ code n - 1 } . * @ param seed the initial element * @ param f a function to be applied to to the previous element to produce a * new element * @ return A new sequential { @ code LongStream } * @ see # iterate ( long , LongPredicate , LongUnaryOperator ) */ public static LongStreamEx iterate ( final long seed , final LongUnaryOperator f ) { } }
return iterate ( seed , x -> true , f ) ;
public class SanitizedContent { /** * Converts a Soy { @ link SanitizedContent } of kind TRUSTED _ RESOURCE _ URI into a { @ link * TrustedResourceUrl } . * @ throws IllegalStateException if this SanitizedContent ' s content kind is not { @ link * ContentKind # TRUSTED _ RESOURCE _ URI } . */ public TrustedResourceUrl toTrustedResourceUrl ( ) { } }
Preconditions . checkState ( getContentKind ( ) == ContentKind . TRUSTED_RESOURCE_URI , "toTrustedResourceUrl() only valid for SanitizedContent of kind TRUSTED_RESOURCE_URI, " + "is: %s" , getContentKind ( ) ) ; return UncheckedConversions . trustedResourceUrlFromStringKnownToSatisfyTypeContract ( getContent ( ) ) ;
public class LongElement { /** * < p > Erzeugt ein neues Uhrzeitelement ohne Formatsymbol . < / p > * @ param name name of element * @ param defaultMin default minimum * @ param defaultMax default maximum */ static LongElement create ( String name , long defaultMin , long defaultMax ) { } }
return new LongElement ( name , defaultMin , defaultMax ) ;
public class MainActivity { /** * checks are completed */ @ Nullable private static Object json ( @ Nullable Object in ) { } }
if ( in == null ) return null ; return new Object ( ) { @ Override public String toString ( ) { return "{\"fake-key\": \"fake-value\"}" ; } } ;
public class PdfContentByte { /** * Create a new uncolored tiling pattern . * Variables xstep and ystep are set to the same values * of width and height . * @ param width the width of the pattern * @ param height the height of the pattern * @ param color the default color . Can be < CODE > null < / CODE > * @ return the < CODE > PdfPatternPainter < / CODE > where the pattern will be created */ public PdfPatternPainter createPattern ( float width , float height , Color color ) { } }
return createPattern ( width , height , width , height , color ) ;