signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class RendererRecyclerViewAdapter { /** * Use { @ link # getStates ( ) } */ @ Deprecated @ NonNull public SparseArray < ViewState > getViewStates ( ) { } }
final SparseArray < ViewState > list = new SparseArray < > ( ) ; final Iterator < Map . Entry < Integer , ViewState > > iterator = getStates ( ) . entrySet ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { final Map . Entry < Integer , ViewState > next = iterator . next ( ) ; list . put ( next . getKey ( ) , next . getValue ( ) ) ; } return list ;
public class DateTimePeriod { /** * Create a list of DateTimePeriods that represent the last year of * YearMonth periods . For example , if its currently January 2009 , this * would return periods representing " January 2009 , December 2008 , . . . February 2008" * @ param zone * @ return */ static public List < DateTimePeriod > createLastYearMonths ( DateTimeZone zone ) { } }
ArrayList < DateTimePeriod > periods = new ArrayList < DateTimePeriod > ( ) ; // get today ' s date DateTime now = new DateTime ( zone ) ; // start with today ' s current month and 11 others ( last 12 months ) for ( int i = 0 ; i < 12 ; i ++ ) { // create a new period DateTimePeriod period = createMonth ( now . getYear ( ) , now . getMonthOfYear ( ) , zone ) ; periods . add ( period ) ; // subtract 1 month now = now . minusMonths ( 1 ) ; } return periods ;
public class TimeSpinner { /** * Toggles showing more time items . If enabled , a noon and a late night time item are shown . * @ param enable True to enable , false to disable more time items . */ public void setShowMoreTimeItems ( boolean enable ) { } }
if ( enable && ! showMoreTimeItems ) { // create the noon and late night item : final Resources res = getResources ( ) ; // switch the afternoon item to 2pm : insertAdapterItem ( new TimeItem ( res . getString ( R . string . time_afternoon_2 ) , formatTime ( 14 , 0 ) , 14 , 0 , R . id . time_afternoon_2 ) , 2 ) ; removeAdapterItemById ( R . id . time_afternoon ) ; // noon item : insertAdapterItem ( new TimeItem ( res . getString ( R . string . time_noon ) , formatTime ( 12 , 0 ) , 12 , 0 , R . id . time_noon ) , 1 ) ; // late night item : addAdapterItem ( new TimeItem ( res . getString ( R . string . time_late_night ) , formatTime ( 23 , 0 ) , 23 , 0 , R . id . time_late_night ) ) ; } else if ( ! enable && showMoreTimeItems ) { // switch back the afternoon item : insertAdapterItem ( new TimeItem ( getResources ( ) . getString ( R . string . time_afternoon ) , formatTime ( 13 , 0 ) , 13 , 0 , R . id . time_afternoon ) , 3 ) ; removeAdapterItemById ( R . id . time_afternoon_2 ) ; removeAdapterItemById ( R . id . time_noon ) ; removeAdapterItemById ( R . id . time_late_night ) ; } showMoreTimeItems = enable ;
public class DFSInputStream { /** * Read bytes starting from the specified position . * @ param position start read from this position * @ param buffer read buffer * @ param offset offset into buffer * @ param length number of bytes to read * @ return actual number of bytes read */ public int read ( long position , byte [ ] buffer , int offset , int length , ReadOptions options ) throws IOException { } }
// sanity checks dfsClient . checkOpen ( ) ; if ( closed ) { throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; long filelen = getFileLength ( ) ; if ( ( position < 0 ) || ( position >= filelen ) ) { return - 1 ; } int realLen = length ; if ( ( position + length ) > filelen ) { realLen = ( int ) ( filelen - position ) ; } DFSReadProfilingData pData = DFSClient . getDFSReadProfilingData ( ) ; // determine the block and byte range within the block // corresponding to position and realLen List < LocatedBlock > blockRange = getBlockRange ( position , realLen ) ; if ( ! tryPreadFromLocal ( blockRange , position , buffer , offset , length , realLen , start ) ) { // non - local or multiple block read . int remaining = realLen ; for ( LocatedBlock blk : blockRange ) { long targetStart = position - blk . getStartOffset ( ) ; long bytesToRead = Math . min ( remaining , blk . getBlockSize ( ) - targetStart ) ; if ( dfsClient . allowParallelReads && dfsClient . parallelReadsThreadPool != null ) { fetchBlockByteRangeSpeculative ( blk , targetStart , targetStart + bytesToRead - 1 , buffer , offset , options ) ; } else { if ( pData != null ) { cliData = new FSClientReadProfilingData ( ) ; pData . addDFSClientReadProfilingData ( cliData ) ; cliData . startRead ( ) ; } fetchBlockByteRange ( blk , targetStart , targetStart + bytesToRead - 1 , buffer , offset , options ) ; if ( pData != null ) { cliData . endRead ( ) ; } } remaining -= bytesToRead ; position += bytesToRead ; offset += bytesToRead ; } assert remaining == 0 : "Wrong number of bytes read." ; } if ( dfsClient . stats != null ) { dfsClient . stats . incrementBytesRead ( realLen ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incPreadTime ( timeval ) ; dfsClient . metrics . incPreadSize ( realLen ) ; dfsClient . metrics . incPreadOps ( ) ; return realLen ;
public class CommerceWarehouseUtil { /** * Returns a range of all the commerce warehouses where groupId = & # 63 ; and commerceCountryId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceWarehouseModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param groupId the group ID * @ param commerceCountryId the commerce country ID * @ param start the lower bound of the range of commerce warehouses * @ param end the upper bound of the range of commerce warehouses ( not inclusive ) * @ return the range of matching commerce warehouses */ public static List < CommerceWarehouse > findByG_C ( long groupId , long commerceCountryId , int start , int end ) { } }
return getPersistence ( ) . findByG_C ( groupId , commerceCountryId , start , end ) ;
public class ConfigBasedDatasetsFinder { /** * Based on the { @ link # whitelistTag } , find all URI which imports the tag . Then filter out * 1 . disabled dataset URI * 2 . None leaf dataset URI * Then created { @ link ConfigBasedDataset } based on the { @ link Config } of the URIs */ @ Override public List < Dataset > findDatasets ( ) throws IOException { } }
Set < URI > leafDatasets = getValidDatasetURIs ( this . commonRoot ) ; if ( leafDatasets . isEmpty ( ) ) { return ImmutableList . of ( ) ; } // Parallel execution for copyDataset for performance consideration . final List < Dataset > result = new CopyOnWriteArrayList < > ( ) ; Iterator < Callable < Void > > callableIterator = Iterators . transform ( leafDatasets . iterator ( ) , new Function < URI , Callable < Void > > ( ) { @ Override public Callable < Void > apply ( final URI datasetURI ) { return findDatasetsCallable ( configClient , datasetURI , props , blacklistPatterns , result ) ; } } ) ; this . executeItertorExecutor ( callableIterator ) ; log . info ( "found {} datasets in ConfigBasedDatasetsFinder" , result . size ( ) ) ; return result ;
public class PublicDiffCriteria { /** * Check if there is a change between two versions of a field . * Returns true if the access flags differ , or if the inital value * of the field differs . * @ param oldInfo Info about the old version of the field . * @ param newInfo Info about the new version of the field . * @ return True if the fields differ , false otherwise . */ public boolean differs ( FieldInfo oldInfo , FieldInfo newInfo ) { } }
if ( Tools . isFieldAccessChange ( oldInfo . getAccess ( ) , newInfo . getAccess ( ) ) ) return true ; if ( oldInfo . getValue ( ) == null || newInfo . getValue ( ) == null ) { if ( oldInfo . getValue ( ) != newInfo . getValue ( ) ) return true ; } else if ( ! oldInfo . getValue ( ) . equals ( newInfo . getValue ( ) ) ) return true ; return false ;
public class ThreadPoolThriftServerImpl { /** * 初始化Thrift服务 * 启动Thrift服务之前必须要进行初始化 . * @ param serverTransport * 服务传输类型 */ protected void initServer ( TServerTransport serverTransport ) { } }
ThriftServerConfiguration configuration = getServerConfiguration ( ) ; // 使用TThreadPoolServer方式启动Thrift服务器 , 对每个连接都会单独建立一个线程 . TThreadPoolServer . Args args = new TThreadPoolServer . Args ( serverTransport ) . transportFactory ( configuration . getTransportFactory ( ) ) . protocolFactory ( configuration . getProtocolFactory ( ) ) ; // 如果不设置ExecutorService , 则默认使用ThreadPoolExecutor实现 . if ( configuration . getExecutorService ( ) != null ) args . executorService ( configuration . getExecutorService ( ) ) ; server = new TThreadPoolServer ( configuration . getServerArgsAspect ( ) . tThreadPoolServerArgsAspect ( args ) . processor ( getProcessor ( ) ) ) ; if ( configuration . getServerEventHandler ( ) != null ) server . setServerEventHandler ( configuration . getServerEventHandler ( ) ) ;
public class QueryDSL { /** * < p > lexer . < / p > * @ param expression a { @ link java . lang . String } object . * @ return a { @ link ameba . db . dsl . QueryLexer } object . */ public static QueryLexer lexer ( String expression ) { } }
QueryLexer lexer = new QueryLexer ( input ( expression ) ) ; lexer . removeErrorListeners ( ) ; lexer . addErrorListener ( ERROR_LISTENER ) ; return lexer ;
public class ByteBufferInputStream { private synchronized boolean waitForContent ( ) throws InterruptedIOException { } }
if ( _buffer != null ) { if ( _buffer . hasRemaining ( ) ) return true ; // recycle buffer recycle ( _buffer ) ; _buffer = null ; } while ( ! _closed && LazyList . size ( _buffers ) == 0 ) { try { this . wait ( _timeout ) ; } catch ( InterruptedException e ) { log . debug ( e ) ; throw new InterruptedIOException ( e . toString ( ) ) ; } } if ( _closed ) return false ; if ( LazyList . size ( _buffers ) == 0 ) throw new SocketTimeoutException ( ) ; _buffer = ( ByteBuffer ) LazyList . get ( _buffers , 0 ) ; _buffers = LazyList . remove ( _buffers , 0 ) ; return true ;
public class WordCountGenerator { /** * Build count map map . * @ param pattern the pattern * @ param text the text * @ return the map */ public static Map < String , Long > buildCountMap ( Pattern pattern , String text ) { } }
return buildCountMap ( pattern . splitAsStream ( text ) ) ;
public class DriverFactory { /** * Sets the logging level of the generated web driver . * @ param caps * The web driver ' s capabilities * @ param level * The logging level */ private void setLoggingLevel ( DesiredCapabilities caps ) { } }
final LoggingPreferences logPrefs = new LoggingPreferences ( ) ; logPrefs . enable ( LogType . BROWSER , Level . ALL ) ; logPrefs . enable ( LogType . CLIENT , Level . OFF ) ; logPrefs . enable ( LogType . DRIVER , Level . OFF ) ; logPrefs . enable ( LogType . PERFORMANCE , Level . OFF ) ; logPrefs . enable ( LogType . PROFILER , Level . OFF ) ; logPrefs . enable ( LogType . SERVER , Level . OFF ) ; caps . setCapability ( CapabilityType . LOGGING_PREFS , logPrefs ) ;
public class ParallelUtils { /** * This helper method provides a convenient way to break up a computation * across < tt > N < / tt > items into individual indices to be processed . This * method is meant for when the execution time of any given index is highly * variable , and so for load balancing purposes , should be treated as * individual jobs . If runtime is consistent , look at { @ link # run ( boolean , int , jsat . utils . concurrent . LoopChunkRunner , java . util . concurrent . ExecutorService ) * @ param parallel a boolean indicating if the work should be done in * parallel . If false , it will run single - threaded . This is for code * convenience so that only one set of code is needed to handle both cases . * @ param N the total number of items to process . * @ param ir the runnable over a contiguous range * @ param threadPool the source of threads for the computation */ public static void run ( boolean parallel , int N , IndexRunnable ir , ExecutorService threadPool ) { } }
if ( ! parallel ) { for ( int i = 0 ; i < N ; i ++ ) ir . run ( i ) ; return ; } final CountDownLatch latch = new CountDownLatch ( N ) ; IntStream . range ( 0 , N ) . forEach ( threadID -> { threadPool . submit ( ( ) -> { ir . run ( threadID ) ; latch . countDown ( ) ; } ) ; } ) ; try { latch . await ( ) ; } catch ( InterruptedException ex ) { Logger . getLogger ( ParallelUtils . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; }
public class AbstractItem { /** * Renames this item */ @ RequirePOST @ Restricted ( NoExternalUse . class ) public HttpResponse doConfirmRename ( @ QueryParameter String newName ) throws IOException { } }
newName = newName == null ? null : newName . trim ( ) ; FormValidation validationError = doCheckNewName ( newName ) ; if ( validationError . kind != FormValidation . Kind . OK ) { throw new Failure ( validationError . getMessage ( ) ) ; } renameTo ( newName ) ; // send to the new job page // note we can ' t use getUrl ( ) because that would pick up old name in the // Ancestor . getUrl ( ) return HttpResponses . redirectTo ( "../" + newName ) ;
public class Temporals { /** * Converts a { @ code ChronoUnit } to a { @ code TimeUnit } . * This handles the seven units declared in { @ code TimeUnit } . * @ param unit the unit to convert , not null * @ return the converted unit , not null * @ throws IllegalArgumentException if the unit cannot be converted */ public static TimeUnit timeUnit ( ChronoUnit unit ) { } }
Objects . requireNonNull ( unit , "unit" ) ; switch ( unit ) { case NANOS : return TimeUnit . NANOSECONDS ; case MICROS : return TimeUnit . MICROSECONDS ; case MILLIS : return TimeUnit . MILLISECONDS ; case SECONDS : return TimeUnit . SECONDS ; case MINUTES : return TimeUnit . MINUTES ; case HOURS : return TimeUnit . HOURS ; case DAYS : return TimeUnit . DAYS ; default : throw new IllegalArgumentException ( "ChronoUnit cannot be converted to TimeUnit: " + unit ) ; }
public class ThresholdBlock_MT { /** * Computes the min - max value for each block in the image */ @ Override protected void computeStatistics ( T input , int innerWidth , int innerHeight ) { } }
final int statPixelStride = stats . getImageType ( ) . getNumBands ( ) ; final int statStride = stats . stride ; int vblocks = innerHeight / blockHeight ; if ( vblocks * blockHeight < innerHeight ) vblocks ++ ; // for ( int y = 0 ; y < innerHeight ; y + = blockHeight ) { BoofConcurrency . loopFor ( 0 , vblocks , vblock -> { BlockProcessor < T , S > processor = processors . pop ( ) ; processor . init ( blockWidth , blockHeight , thresholdFromLocalBlocks ) ; int y = vblock * blockHeight ; int indexStats = ( y / blockHeight ) * statStride ; for ( int x = 0 ; x < innerWidth ; x += blockWidth , indexStats += statPixelStride ) { processor . computeBlockStatistics ( x , y , blockWidth , blockHeight , indexStats , input , stats ) ; } // handle the case where the image ' s width isn ' t evenly divisible by the block ' s width if ( innerWidth != input . width ) { processor . computeBlockStatistics ( innerWidth , y , input . width - innerWidth , blockHeight , indexStats , input , stats ) ; } processors . recycle ( processor ) ; } ) ; // NOTE : below could be thrown into its own thread before the code above . Not easy with current thread design // handle the case where the image ' s height isn ' t evenly divisible by the block ' s height if ( innerHeight != input . height ) { BlockProcessor < T , S > processor = processors . pop ( ) ; processor . init ( blockWidth , blockHeight , thresholdFromLocalBlocks ) ; int indexStats = ( innerHeight / blockHeight ) * statStride ; int y = innerHeight ; int blockHeight = input . height - innerHeight ; for ( int x = 0 ; x < innerWidth ; x += blockWidth , indexStats += statPixelStride ) { processor . computeBlockStatistics ( x , y , blockWidth , blockHeight , indexStats , input , stats ) ; } if ( innerWidth != input . width ) { processor . computeBlockStatistics ( innerWidth , y , input . width - innerWidth , blockHeight , indexStats , input , stats ) ; } }
public class CmsContextMenu { /** * Adds a separator to this menu . < p > */ public void addSeparator ( ) { } }
CmsLabel sparator = new CmsLabel ( ) ; sparator . setStyleName ( I_CmsLayoutBundle . INSTANCE . contextmenuCss ( ) . menuItemSeparator ( ) ) ; m_panel . add ( sparator ) ;
public class SignatureInfo { /** * Constructs payload to be signed . * @ return payload to sign * @ see < a href = " https : / / cloud . google . com / storage / docs / access - control # Signed - URLs " > Signed URLs < / a > */ public String constructUnsignedPayload ( ) { } }
// TODO reverse order when V4 becomes default if ( Storage . SignUrlOption . SignatureVersion . V4 . equals ( signatureVersion ) ) { return constructV4UnsignedPayload ( ) ; } return constructV2UnsignedPayload ( ) ;
public class ST_Scale { /** * Scales the given geometry by multiplying the coordinates by the * indicated x and y scale factors , leaving the z - coordinate untouched . * @ param geom Geometry * @ param xFactor x scale factor * @ param yFactor y scale factor * @ return The geometry scaled by the given x and y scale factors */ public static Geometry scale ( Geometry geom , double xFactor , double yFactor ) { } }
return scale ( geom , xFactor , yFactor , 1.0 ) ;
public class DebugKelp { /** * Debug with a table key * @ param out the result of the debug stream * @ param path the database source * @ param tableKey the specific table to display * @ throws IOException */ public void debug ( WriteStream out , Path path , byte [ ] tableKey ) throws IOException { } }
SegmentKelpBuilder builder = new SegmentKelpBuilder ( ) ; builder . path ( path ) ; builder . create ( false ) ; builder . services ( ServicesAmp . newManager ( ) . get ( ) ) ; SegmentServiceImpl segmentService = builder . build ( ) ; for ( SegmentExtent extent : segmentService . getSegmentExtents ( ) ) { debugSegment ( out , segmentService , extent , tableKey ) ; }
public class BatchGetItemResult { /** * A map of table name to a list of items . Each object in < code > Responses < / code > consists of a table name , along * with a map of attribute data consisting of the data type and attribute value . * @ return A map of table name to a list of items . Each object in < code > Responses < / code > consists of a table name , * along with a map of attribute data consisting of the data type and attribute value . */ public java . util . Map < String , java . util . List < java . util . Map < String , AttributeValue > > > getResponses ( ) { } }
return responses ;
public class FieldFilterBuilder { /** * Filter fields with default access . * @ return This builder to support method chaining . */ public FieldFilterBuilder isDefault ( ) { } }
add ( new NegationFieldFilter ( new ModifierFieldFilter ( Modifier . PUBLIC & Modifier . PROTECTED & Modifier . PRIVATE ) ) ) ; return this ;
public class DateTimeUtil { /** * returns a date time instance by a number , the conversion from the double to date is o the base of * the CFML rules . * @ param days double value to convert to a number * @ return DateTime Instance */ public DateTime toDateTime ( double days ) { } }
long utc = Math . round ( days * DAY_MILLIS ) ; utc -= CF_UNIX_OFFSET ; utc -= getLocalTimeZoneOffset ( utc ) ; return new DateTimeImpl ( utc , false ) ;
public class ClassDoc { /** * Converts inner class names . */ public String getQualifiedTypeNameForFile ( ) { } }
String typeName = getTypeNameForFile ( ) ; String qualifiedTypeName = mDoc . qualifiedTypeName ( ) ; int packageLength = qualifiedTypeName . length ( ) - typeName . length ( ) ; if ( packageLength <= 0 ) { return typeName ; } String packagePath = qualifiedTypeName . substring ( 0 , packageLength ) ; return packagePath + typeName ;
public class SQLMetadataSegmentManager { /** * Builds a VersionedIntervalTimeline containing used segments that overlap the intervals passed . */ private VersionedIntervalTimeline < String , DataSegment > buildVersionedIntervalTimeline ( final String dataSource , final Collection < Interval > intervals , final Handle handle ) { } }
return VersionedIntervalTimeline . forSegments ( intervals . stream ( ) . flatMap ( interval -> handle . createQuery ( StringUtils . format ( "SELECT payload FROM %1$s WHERE dataSource = :dataSource AND start < :end AND %2$send%2$s > :start AND used = true" , getSegmentsTable ( ) , connector . getQuoteString ( ) ) ) . setFetchSize ( connector . getStreamingFetchSize ( ) ) . bind ( "dataSource" , dataSource ) . bind ( "start" , interval . getStart ( ) . toString ( ) ) . bind ( "end" , interval . getEnd ( ) . toString ( ) ) . map ( ( i , resultSet , context ) -> { try { return jsonMapper . readValue ( resultSet . getBytes ( "payload" ) , DataSegment . class ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } ) . list ( ) . stream ( ) ) . iterator ( ) ) ;
public class LssClient { /** * Start your pulling live session by live session id . * @ param sessionId Live session id . * @ return the response */ public StartPullSessionResponse startPullSession ( String sessionId ) { } }
StartPullSessionRequest request = new StartPullSessionRequest ( ) . withSessionId ( sessionId ) ; return startPullSession ( request ) ;
public class Entity { /** * Deprecated */ public List < List < Term > > getReferences ( ) { } }
List < List < Term > > list = new ArrayList < List < Term > > ( ) ; for ( Span < Term > span : this . references ) { list . add ( span . getTargets ( ) ) ; } return list ;
public class NodeUtil { /** * Gets the boolean value of a node that represents a expression . This method * effectively emulates the < code > Boolean ( ) < / code > JavaScript cast function . * Note : unlike getPureBooleanValue this function does not return UNKNOWN * for expressions with side - effects . */ static TernaryValue getImpureBooleanValue ( Node n ) { } }
switch ( n . getToken ( ) ) { case ASSIGN : case COMMA : // For ASSIGN and COMMA the value is the value of the RHS . return getImpureBooleanValue ( n . getLastChild ( ) ) ; case NOT : TernaryValue value = getImpureBooleanValue ( n . getLastChild ( ) ) ; return value . not ( ) ; case AND : { TernaryValue lhs = getImpureBooleanValue ( n . getFirstChild ( ) ) ; TernaryValue rhs = getImpureBooleanValue ( n . getLastChild ( ) ) ; return lhs . and ( rhs ) ; } case OR : { TernaryValue lhs = getImpureBooleanValue ( n . getFirstChild ( ) ) ; TernaryValue rhs = getImpureBooleanValue ( n . getLastChild ( ) ) ; return lhs . or ( rhs ) ; } case HOOK : { TernaryValue trueValue = getImpureBooleanValue ( n . getSecondChild ( ) ) ; TernaryValue falseValue = getImpureBooleanValue ( n . getLastChild ( ) ) ; if ( trueValue . equals ( falseValue ) ) { return trueValue ; } else { return TernaryValue . UNKNOWN ; } } case NEW : case ARRAYLIT : case OBJECTLIT : // ignoring side - effects return TernaryValue . TRUE ; case VOID : return TernaryValue . FALSE ; default : return getPureBooleanValue ( n ) ; }
public class HpelFormatter { /** * Formats a RepositoryLogRecord using the formatter ' s locale * @ param record log record to be formatted * @ return the resulting formatted string output . */ public String formatRecord ( RepositoryLogRecord record ) { } }
if ( null == record ) { throw new IllegalArgumentException ( "Record cannot be null" ) ; } return formatRecord ( record , ( Locale ) null ) ;
public class TrafficPlugin { /** * Add local layer to the map . */ private void addLocalLayer ( ) { } }
LineLayer local = TrafficLayer . getLineLayer ( Local . BASE_LAYER_ID , Local . ZOOM_LEVEL , Local . FILTER , Local . FUNCTION_LINE_COLOR , Local . FUNCTION_LINE_WIDTH , Local . FUNCTION_LINE_OFFSET ) ; LineLayer localCase = TrafficLayer . getLineLayer ( Local . CASE_LAYER_ID , Local . ZOOM_LEVEL , Local . FILTER , Local . FUNCTION_LINE_COLOR_CASE , Local . FUNCTION_LINE_WIDTH_CASE , Local . FUNCTION_LINE_OFFSET , Local . FUNCTION_LINE_OPACITY_CASE ) ; addTrafficLayersToMap ( localCase , local , placeLayerBelow ( ) ) ;
public class Config { /** * Returns the exceptions which will be recovered from . By default these will include * { @ code SocketTimeoutException } , { @ code ConnectException } , { @ code AlreadyClosedException } , and * { @ code TimeoutException } , but this set can be mutated directly to change the recoverable * exceptions . */ public Set < Class < ? extends Exception > > getRecoverableExceptions ( ) { } }
return recoverableExceptions != null ? recoverableExceptions : parent != null ? parent . getRecoverableExceptions ( ) : Collections . < Class < ? extends Exception > > emptySet ( ) ;
public class StringBuilderWriter { /** * Appends the specified character sequence to this writer . * An invocation of this method of the form < tt > out . append ( csq ) < / tt > behaves in exactly the same way as the * invocation * < pre > * out . write ( csq . toString ( ) ) * < / pre > * Depending on the specification of < tt > toString < / tt > for the character sequence < tt > csq < / tt > , the entire sequence * may not be appended . For instance , invoking the < tt > toString < / tt > method of a character buffer will return a * subsequence whose content depends upon the buffer ' s position and limit . * @ param csq The character sequence to append . If < tt > csq < / tt > is < tt > null < / tt > , then the four characters * < tt > " null " < / tt > are appended to this writer . * @ return This writer * @ since 1.5 */ public StringBuilderWriter append ( CharSequence csq ) { } }
if ( csq == null ) write ( "null" ) ; else write ( csq . toString ( ) ) ; return this ;
public class CfnResponseSender { /** * Synchronously sends response to Cloudformation S3 endpoint * @ param event cloudformation event * @ param status SUCCESS or FAILURE * @ param context AWS lambda context * @ param reason custom reason to report to cfn ( optional ) * @ param data user data return object ( optional ) * @ param physicalResourceId custom physical resource ID ( optional ) * @ param noEcho Indicates whether to mask the output of the custom resource when retrieved by using the Fn : : GetAtt function ( optional ) * @ param < T > type of custom user data to send in response * @ return true if success , false if there was an error ( check logs ) */ public < T > boolean send ( @ Nonnull final CfnRequest < ? > event , @ Nonnull final Status status , @ Nonnull final Context context , @ Nullable final String reason , @ Nullable final T data , @ Nullable final String physicalResourceId , final boolean noEcho ) { } }
// Sanitize inputs checkNotNull ( event , "event" ) ; checkNotNull ( status , "status" ) ; checkNotNull ( context , "context" ) ; // Compose response final CfnResponse < T > response = new CfnResponse < > ( ) ; response . setData ( data ) ; response . setStatus ( status ) ; response . setLogicalResourceId ( event . getLogicalResourceId ( ) ) ; response . setPhysicalResourceId ( physicalResourceId == null ? context . getLogStreamName ( ) : physicalResourceId ) ; response . setReason ( reason == null ? "See the details in CloudWatch Log Stream: " + context . getLogStreamName ( ) : reason ) ; response . setRequestId ( event . getRequestId ( ) ) ; response . setStackId ( event . getStackId ( ) ) ; response . setNoEcho ( noEcho ) ; // Send response final HttpPut put = new HttpPut ( event . getResponseURL ( ) ) ; try { final String body = mapper . writeValueAsString ( response ) ; LOGGER . info ( body ) ; put . setEntity ( new StringEntity ( body ) ) ; put . setHeader ( "Content-Type" , "" ) ; httpClient . execute ( put , new BasicResponseHandler ( ) ) ; } catch ( final IOException e ) { LOGGER . error ( "Could not send response to " + event . getResponseURL ( ) , e ) ; return false ; } return true ;
public class StreamMetadataTasks { /** * Seal a stream . * @ param scope scope . * @ param stream stream name . * @ param contextOpt optional context * @ return update status . */ public CompletableFuture < UpdateStreamStatus . Status > sealStream ( String scope , String stream , OperationContext contextOpt ) { } }
final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; final long requestId = requestTracker . getRequestIdFor ( "sealStream" , scope , stream ) ; // 1 . post event for seal . SealStreamEvent event = new SealStreamEvent ( scope , stream , requestId ) ; return addIndexAndSubmitTask ( event , // 2 . set state to sealing ( ) -> streamMetadataStore . getVersionedState ( scope , stream , context , executor ) . thenCompose ( state -> { if ( state . getObject ( ) . equals ( State . SEALED ) ) { return CompletableFuture . completedFuture ( state ) ; } else { return streamMetadataStore . updateVersionedState ( scope , stream , State . SEALING , state , context , executor ) ; } } ) ) // 3 . return with seal initiated . . thenCompose ( result -> { if ( result . getObject ( ) . equals ( State . SEALED ) || result . getObject ( ) . equals ( State . SEALING ) ) { return checkDone ( ( ) -> isSealed ( scope , stream , context ) ) . thenApply ( x -> UpdateStreamStatus . Status . SUCCESS ) ; } else { return CompletableFuture . completedFuture ( UpdateStreamStatus . Status . FAILURE ) ; } } ) . exceptionally ( ex -> { log . warn ( requestId , "Exception thrown in trying to notify sealed segments {}" , ex . getMessage ( ) ) ; return handleUpdateStreamError ( ex , requestId ) ; } ) ;
public class AWSSimpleSystemsManagementClient { /** * Shares a Systems Manager document publicly or privately . If you share a document privately , you must specify the * AWS user account IDs for those people who can use the document . If you share a document publicly , you must * specify < i > All < / i > as the account ID . * @ param modifyDocumentPermissionRequest * @ return Result of the ModifyDocumentPermission operation returned by the service . * @ throws InternalServerErrorException * An error occurred on the server side . * @ throws InvalidDocumentException * The specified document does not exist . * @ throws InvalidPermissionTypeException * The permission type is not supported . < i > Share < / i > is the only supported permission type . * @ throws DocumentPermissionLimitException * The document cannot be shared with more AWS user accounts . You can share a document with a maximum of 20 * accounts . You can publicly share up to five documents . If you need to increase this limit , contact AWS * Support . * @ throws DocumentLimitExceededException * You can have at most 200 active Systems Manager documents . * @ sample AWSSimpleSystemsManagement . ModifyDocumentPermission * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ssm - 2014-11-06 / ModifyDocumentPermission " target = " _ top " > AWS * API Documentation < / a > */ @ Override public ModifyDocumentPermissionResult modifyDocumentPermission ( ModifyDocumentPermissionRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeModifyDocumentPermission ( request ) ;
public class FileStore { /** * Returns a { @ link File } for the given filename in the store . * @ throws FileNotFoundException if no file with the given filename exists * @ throws IOException if the given filename does not refer to a file */ public File getFile ( String fileName ) throws IOException { } }
validatePathname ( fileName ) ; String pathname = storageDir + separator + fileName ; File file = new File ( pathname ) ; if ( ! file . exists ( ) ) { throw new FileNotFoundException ( pathname ) ; } if ( ! file . isFile ( ) ) { throw new IOException ( '\'' + pathname + '\'' + " is not a file" ) ; } return file ;
public class ShrinkWrapFileSystemProvider { /** * { @ inheritDoc } * @ see java . nio . file . spi . FileSystemProvider # getFileAttributeView ( java . nio . file . Path , java . lang . Class , * java . nio . file . LinkOption [ ] ) */ @ Override public < V extends FileAttributeView > V getFileAttributeView ( final Path path , final Class < V > type , final LinkOption ... options ) { } }
if ( path == null ) { throw new IllegalArgumentException ( "path must be specified" ) ; } if ( type == null ) { throw new IllegalArgumentException ( "type must be specified" ) ; } if ( ! type . isAssignableFrom ( ShrinkWrapFileAttributeView . class ) ) { // Nope , we don ' t support this view return null ; } if ( ! ( path instanceof ShrinkWrapPath ) ) { throw new IllegalArgumentException ( "Only " + ShrinkWrapPath . class . getSimpleName ( ) + " is supported" ) ; } return type . cast ( new ShrinkWrapFileAttributeView ( new ShrinkWrapFileAttributes ( ( ShrinkWrapPath ) path , getArchive ( path ) ) ) ) ;
public class Pagelet { /** * Translate widget type . */ private String translateType ( String type , Map < String , String > attrs ) { } }
String translated = type . toLowerCase ( ) ; if ( "select" . equals ( type ) ) translated = "radio" ; else if ( "boolean" . equals ( type ) ) translated = "checkbox" ; else if ( "list" . equals ( type ) ) { translated = "picklist" ; String lbl = attrs . get ( "label" ) ; if ( lbl == null ) lbl = attrs . get ( "name" ) ; if ( "Output Documents" . equals ( lbl ) ) { attrs . put ( "label" , "Documents" ) ; attrs . put ( "unselectedLabel" , "Read-Only" ) ; attrs . put ( "selectedLabel" , "Writable" ) ; } } else if ( "hyperlink" . equals ( type ) ) { if ( attrs . containsKey ( "url" ) ) translated = "link" ; else translated = "text" ; } else if ( "rule" . equals ( type ) ) { if ( "EXPRESSION" . equals ( attrs . get ( "type" ) ) ) { translated = "expression" ; } else if ( "TRANSFORM" . equals ( attrs . get ( "type" ) ) ) { translated = "edit" ; attrs . put ( "label" , "Transform" ) ; } else { translated = "edit" ; attrs . put ( "label" , "Script" ) ; } attrs . remove ( "type" ) ; } else if ( "Java" . equals ( attrs . get ( "name" ) ) ) { translated = "edit" ; } else { // asset - driven String source = attrs . get ( "source" ) ; if ( "Process" . equals ( source ) ) { translated = "asset" ; attrs . put ( "source" , "proc" ) ; } else if ( "TaskTemplates" . equals ( source ) ) { translated = "asset" ; attrs . put ( "source" , "task" ) ; } else if ( "RuleSets" . equals ( source ) ) { String format = attrs . get ( "type" ) ; if ( format != null ) { String exts = "" ; String [ ] formats = format . split ( "," ) ; for ( int i = 0 ; i < formats . length ; i ++ ) { String ext = Asset . getFileExtension ( formats [ i ] ) ; if ( ext != null ) { if ( exts . length ( ) > 0 ) exts += "," ; exts += ext . substring ( 1 ) ; } } if ( exts . length ( ) > 0 ) { translated = "asset" ; attrs . put ( "source" , exts ) ; } } } } return translated ;
public class SubWriterHolderWriter { /** * Add the index comment . * @ param member the member being documented * @ param contentTree the content tree to which the comment will be added */ protected void addIndexComment ( Element member , Content contentTree ) { } }
List < ? extends DocTree > tags = utils . getFirstSentenceTrees ( member ) ; addIndexComment ( member , tags , contentTree ) ;
public class WebSocketAddon { /** * { @ inheritDoc } */ public boolean isEnabled ( Application application ) { } }
if ( enabled == null ) { enabled = ! "false" . equals ( application . getSrcProperties ( ) . get ( WEB_SOCKET_ENABLED_CONF ) ) ; if ( ! enabled ) { logger . info ( Messages . get ( "web.socket.info.disabled" ) ) ; } } return enabled ;
public class AmazonSimpleEmailServiceClient { /** * Deletes an existing custom verification email template . * For more information about custom verification email templates , see < a * href = " http : / / docs . aws . amazon . com / ses / latest / DeveloperGuide / custom - verification - emails . html " > Using Custom * Verification Email Templates < / a > in the < i > Amazon SES Developer Guide < / i > . * You can execute this operation no more than once per second . * @ param deleteCustomVerificationEmailTemplateRequest * Represents a request to delete an existing custom verification email template . * @ return Result of the DeleteCustomVerificationEmailTemplate operation returned by the service . * @ sample AmazonSimpleEmailService . DeleteCustomVerificationEmailTemplate * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / email - 2010-12-01 / DeleteCustomVerificationEmailTemplate " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteCustomVerificationEmailTemplateResult deleteCustomVerificationEmailTemplate ( DeleteCustomVerificationEmailTemplateRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteCustomVerificationEmailTemplate ( request ) ;
public class JClassWrapper { /** * Returns the annotation . */ @ Override public JAnnotation getAnnotation ( String name ) { } }
Annotation [ ] ann = _class . getAnnotations ( ) ; for ( int i = 0 ; i < ann . length ; i ++ ) { if ( ann [ i ] . annotationType ( ) . getName ( ) . equals ( name ) ) return new JAnnotationWrapper ( ann [ i ] ) ; } return null ;
public class OutputStreamLogSink { public synchronized void setFilename ( String filename ) { } }
if ( filename != null ) { filename = filename . trim ( ) ; if ( filename . length ( ) == 0 ) filename = null ; } if ( isStarted ( ) && _filename != null && filename == null ) _out = null ; _reopen = isStarted ( ) && ( ( _filename == null && filename != null ) || ( _filename != null && ! _filename . equals ( filename ) ) ) ; _filename = filename ; if ( ! isStarted ( ) && _filename != null ) _out = null ;
public class DatePicker { /** * initComponents , This initializes the components of the JFormDesigner panel . This function is * automatically generated by JFormDesigner from the JFD form design file , and should not be * modified by hand . This function can be modified , if needed , by using JFormDesigner . * Implementation notes regarding JTextField : This class uses a JTextField instead of a * JFormattedTextField as its text input component , because a date - formatted JFormattedTextField * stores its value as a java . util . Date ( Date ) object , which is not ideal for this project . Date * objects represent a specific instant in time instead of a " local date " value . Date objects * also require time zone calculations to convert them to a java . time . LocalDate . This class * displays and stores " local dates " only , and is designed to function independently of any time * zone differences . See java . time . LocalDate for details on the meaning of a LocalDate . To gain * the validation functionality of a JFormattedTextField , this class implements a similar * " commit " and " revert " capability as the JFormattedTextField class . */ private void initComponents ( ) { } }
// JFormDesigner - Component initialization - DO NOT MODIFY / / GEN - BEGIN : initComponents dateTextField = new JTextField ( ) ; toggleCalendarButton = new JButton ( ) ; // = = = = = this = = = = = setLayout ( new FormLayout ( "pref:grow, [3px,pref], [26px,pref]" , "fill:pref:grow" ) ) ; // - - - - dateTextField - - - - dateTextField . setMargin ( new Insets ( 1 , 3 , 2 , 2 ) ) ; dateTextField . setBorder ( new CompoundBorder ( new MatteBorder ( 1 , 1 , 1 , 1 , new Color ( 122 , 138 , 153 ) ) , new EmptyBorder ( 1 , 3 , 2 , 2 ) ) ) ; dateTextField . addFocusListener ( new FocusAdapter ( ) { @ Override public void focusLost ( FocusEvent e ) { setTextFieldToValidStateIfNeeded ( ) ; } } ) ; add ( dateTextField , CC . xy ( 1 , 1 ) ) ; // - - - - toggleCalendarButton - - - - toggleCalendarButton . setText ( "..." ) ; toggleCalendarButton . setFocusPainted ( false ) ; toggleCalendarButton . setFocusable ( false ) ; toggleCalendarButton . addMouseListener ( new MouseAdapter ( ) { @ Override public void mousePressed ( MouseEvent e ) { zEventToggleCalendarButtonMousePressed ( e ) ; } } ) ; add ( toggleCalendarButton , CC . xy ( 3 , 1 ) ) ; // JFormDesigner - End of component initialization / / GEN - END : initComponents
public class GeoPackageExtensions { /** * Delete the extensions for the table * @ param geoPackage * @ param table */ private static void delete ( GeoPackageCore geoPackage , String table ) { } }
ExtensionsDao extensionsDao = geoPackage . getExtensionsDao ( ) ; try { if ( extensionsDao . isTableExists ( ) ) { extensionsDao . deleteByTableName ( table ) ; } } catch ( SQLException e ) { throw new GeoPackageException ( "Failed to delete Table extensions. GeoPackage: " + geoPackage . getName ( ) + ", Table: " + table , e ) ; }
public class ImageAttribute { /** * The launch permissions . * @ param launchPermissions * The launch permissions . */ public void setLaunchPermissions ( java . util . Collection < LaunchPermission > launchPermissions ) { } }
if ( launchPermissions == null ) { this . launchPermissions = null ; return ; } this . launchPermissions = new com . amazonaws . internal . SdkInternalList < LaunchPermission > ( launchPermissions ) ;
public class AstaTextFileReader { /** * Process resources . * @ throws SQLException */ private void processResources ( ) throws SQLException { } }
List < Row > permanentRows = getTable ( "PERMANENT_RESOURCE" ) ; List < Row > consumableRows = getTable ( "CONSUMABLE_RESOURCE" ) ; Collections . sort ( permanentRows , PERMANENT_RESOURCE_COMPARATOR ) ; Collections . sort ( consumableRows , CONSUMABLE_RESOURCE_COMPARATOR ) ; m_reader . processResources ( permanentRows , consumableRows ) ;
public class JsonToModelConverter { /** * Converts given json string to { @ link TableColumnOrdering } used by { @ link TableColumnOrderingModel } . * @ param tableIdentifier an application unique table identifier . * @ param json string to convert to { @ link TableColumnOrdering } . * @ return the converted { @ link TableColumnOrdering } . */ public TableColumnOrdering convertTableColumnOrdering ( final String tableIdentifier , final String json ) { } }
final String [ ] split = this . splitColumns ( json ) ; final List < Ordering > orderings = new ArrayList < > ( ) ; for ( String column : split ) { final String [ ] attribute = this . splitAttributes ( column ) ; final String identifier = attribute [ 0 ] . split ( ":" ) [ 1 ] ; final String position = attribute [ 1 ] . split ( ":" ) [ 1 ] ; orderings . add ( new Ordering ( identifier , Integer . valueOf ( position ) ) ) ; } Collections . sort ( orderings , new Comparator < Ordering > ( ) { @ Override public int compare ( Ordering o1 , Ordering o2 ) { return o1 . getIndex ( ) . compareTo ( o2 . getIndex ( ) ) ; } } ) ; final List < String > columnIdentifier = new ArrayList < > ( ) ; for ( Ordering ordering : orderings ) { columnIdentifier . add ( ordering . getIdentifier ( ) ) ; } return new TableColumnOrdering ( tableIdentifier , columnIdentifier ) ;
public class HtmlSequencePlotting { /** * Create a HTML file with plots for the given sequence and write it to a file . * @ param title Title of the page * @ param schema Schema for the data * @ param sequence Sequence to plot */ public static void createHtmlSequencePlotFile ( String title , Schema schema , List < List < Writable > > sequence , File output ) throws Exception { } }
String s = createHtmlSequencePlots ( title , schema , sequence ) ; FileUtils . writeStringToFile ( output , s , StandardCharsets . UTF_8 ) ;
public class AdminListGroupsForUserRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AdminListGroupsForUserRequest adminListGroupsForUserRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( adminListGroupsForUserRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( adminListGroupsForUserRequest . getUsername ( ) , USERNAME_BINDING ) ; protocolMarshaller . marshall ( adminListGroupsForUserRequest . getUserPoolId ( ) , USERPOOLID_BINDING ) ; protocolMarshaller . marshall ( adminListGroupsForUserRequest . getLimit ( ) , LIMIT_BINDING ) ; protocolMarshaller . marshall ( adminListGroupsForUserRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FessMessages { /** * Add the created action message for the key ' errors . footer ' with parameters . * < pre > * message : & lt ; / ul & gt ; * < / pre > * @ param property The property name for the message . ( NotNull ) * @ return this . ( NotNull ) */ public FessMessages addErrorsFooter ( String property ) { } }
assertPropertyNotNull ( property ) ; add ( property , new UserMessage ( ERRORS_FOOTER ) ) ; return this ;
public class Server { /** * Performs shutdown tasks for the modules and the server . * All loaded modules ' shutdownModule ( ) methods are called , then * shutdownServer is called . * < h3 > How to Ensure Clean Server Shutdown < / h3 > * After having used a < code > Server < / code > instance , if you know your * program is the only client of the < code > Server < / code > in the VM * instance , you should make an explicit call to this method so that you can * catch and handle its exceptions properly . If you are usure or know that * there may be at least one other client of the < code > Server < / code > in * the VM instance , you should call < code > System . runFinalization ( ) < / code > * after ensuring you no longer have a reference . In this case , if there is * no other reference to the object in the VM , finalization will be called * ( but you will be unable to catch < code > ShutdownException < / code > * variants , if thrown ) . * Right before this is finished , the instance is removed from the server * instances map . * @ throws ServerShutdownException * If a severe server shutdown - related error occurred . * USER _ REPRESENTED = addName ( new XacmlName ( this , * " subjectRepresented " ) ) ; * @ throws ModuleShutdownException * If a severe module shutdown - related error occurred . */ public final void shutdown ( Context context ) throws ServerShutdownException , ModuleShutdownException , AuthzException { } }
logger . info ( "Shutting down server" ) ; Throwable mse = null ; try { m_moduleContext . close ( ) ; m_moduleContext . destroy ( ) ; } catch ( Throwable e ) { logger . error ( "Shutdown error: " + e . toString ( ) , e ) ; mse = e ; } shutdownServer ( ) ; logger . info ( "Server shutdown complete" ) ; s_instances . remove ( getHomeDir ( ) ) ; if ( mse != null ) { throw new ServerShutdownException ( mse . toString ( ) ) ; }
public class LongAdder { /** * Returns the current sum . The returned value is < em > NOT < / em > an * atomic snapshot ; invocation in the absence of concurrent * updates returns an accurate result , but concurrent updates that * occur while the sum is being calculated might not be * incorporated . * @ return the sum */ public long sum ( ) { } }
long sum = base ; Cell [ ] as = cells ; if ( as != null ) { int n = as . length ; for ( int i = 0 ; i < n ; ++ i ) { Cell a = as [ i ] ; if ( a != null ) sum += a . value ; } } return sum ;
public class CommerceVirtualOrderItemServiceBaseImpl { /** * Returns the commerce virtual order item local service . * @ return the commerce virtual order item local service */ public com . liferay . commerce . product . type . virtual . order . service . CommerceVirtualOrderItemLocalService getCommerceVirtualOrderItemLocalService ( ) { } }
return commerceVirtualOrderItemLocalService ;
public class CmsUndoRedoHandler { /** * Initializes the handler to be used for the given entity . < p > * @ param entity the edited entity * @ param editor the editor instance * @ param rootHandler the root attribute handler */ public void initialize ( CmsEntity entity , CmsEditorBase editor , CmsRootHandler rootHandler ) { } }
m_undo . clear ( ) ; m_redo . clear ( ) ; m_entity = entity ; m_editor = editor ; m_rootHandler = rootHandler ; m_current = new Change ( m_entity . cloneEntity ( ) , null , null , 0 , null ) ; fireStateChange ( ) ;
public class Aggregations { /** * Returns an aggregation for counting all supplied values . < br / > * This aggregation is similar to : < pre > SELECT COUNT ( * ) FROM x < / pre > * @ param < Key > the input key type * @ param < Value > the supplied value type * @ return the count of all supplied elements */ public static < Key , Value > Aggregation < Key , Value , Long > count ( ) { } }
return new AggregationAdapter ( new CountAggregation < Key , Value > ( ) ) ;
public class GreenPepperServerServiceImpl { /** * { @ inheritDoc } */ public List < Reference > getSpecificationReferences ( Specification specification ) throws GreenPepperServerException { } }
try { sessionService . startSession ( ) ; Repository repository = loadRepository ( specification . getRepository ( ) . getUid ( ) ) ; List < Reference > references = documentDao . getAllReferences ( specification ) ; log . debug ( "Retrieved Specification " + specification . getName ( ) + " Test Cases number: " + references . size ( ) ) ; return references ; } catch ( Exception ex ) { throw handleException ( RETRIEVE_REFERENCES , ex ) ; } finally { sessionService . closeSession ( ) ; }
public class PipelineApi { /** * Create a pipeline schedule variable . * < pre > < code > GitLab Endpoint : POST / projects / : id / pipeline _ schedules / : pipeline _ schedule _ id / variables < / code > < / pre > * @ param projectIdOrPath projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance , required * @ param pipelineScheduleId the pipelineSchedule ID * @ param key the key of a variable ; must have no more than 255 characters ; only A - Z , a - z , 0-9 , and _ are allowed * @ param value the value for the variable * @ return a Pipeline instance with the newly created pipeline schedule variable * @ throws GitLabApiException if any exception occurs during execution */ public Variable createPipelineScheduleVariable ( Object projectIdOrPath , Integer pipelineScheduleId , String key , String value ) throws GitLabApiException { } }
GitLabApiForm formData = new GitLabApiForm ( ) . withParam ( "key" , key , true ) . withParam ( "value" , value , true ) ; Response response = post ( Response . Status . CREATED , formData , "projects" , getProjectIdOrPath ( projectIdOrPath ) , "pipeline_schedules" , pipelineScheduleId , "variables" ) ; return ( response . readEntity ( Variable . class ) ) ;
public class ParsedValues { /** * called in context of erraneous or - block */ void reset ( ) { } }
if ( this . keys == null ) { this . len = Integer . MIN_VALUE ; this . mask = Integer . MIN_VALUE ; this . threshold = Integer . MIN_VALUE ; this . count = Integer . MIN_VALUE ; for ( int i = 0 ; i < 3 ; i ++ ) { this . ints [ i ] = Integer . MIN_VALUE ; } this . map = null ; } else { this . keys = new Object [ this . keys . length ] ; } this . count = 0 ;
public class CmsJspActionElement { /** * Include a named sub - element with parameters from the OpenCms VFS , same as * using the < code > & lt ; cms : include file = " * * * " element = " * * * " / & gt ; < / code > tag * with parameters in the tag body . < p > * The parameter map should be a map where the keys are Strings * ( the parameter names ) and the values are of type String [ ] . * However , as a convenience feature , * in case you provide just a String for the parameter value , * it will automatically be translated to a String [ 1 ] . < p > * The handling of the < code > element < / code > parameter depends on the * included file type . Most often it is used as template selector . < p > * < b > Important : < / b > Exceptions that occur in the include process are NOT * handled even if { @ link # setSupressingExceptions ( boolean ) } was set to < code > true < / code > . * @ param target the target URI of the file in the OpenCms VFS ( can be relative or absolute ) * @ param element the element ( template selector ) to display from the target * @ param editable flag to indicate if direct edit should be enabled for the element * @ param parameterMap a map of the request parameters * @ throws JspException in case there were problems including the target * @ see org . opencms . jsp . CmsJspTagInclude */ public void include ( String target , String element , boolean editable , Map < String , ? > parameterMap ) throws JspException { } }
include ( target , element , editable , true , parameterMap ) ;
public class AbstractBeanDefinition { /** * Adds a new { @ link ExecutableMethod } . * @ param executableMethod The method * @ return The bean definition */ @ SuppressWarnings ( "unused" ) @ Internal @ UsedByGeneratedCode protected final AbstractBeanDefinition < T > addExecutableMethod ( ExecutableMethod < T , ? > executableMethod ) { } }
MethodKey key = new MethodKey ( executableMethod . getMethodName ( ) , executableMethod . getArgumentTypes ( ) ) ; executableMethodMap . put ( key , executableMethod ) ; return this ;
public class SearchQueryBuilder { /** * / / / / / TODO Create factory methods */ public static AndQuery CreateAndQuery ( Query first , Query second ) { } }
AndQuery and = new AndQuery ( ) ; and . subqueries . add ( first ) ; and . subqueries . add ( second ) ; return and ;
public class ExcelExecutor { /** * 初始化 , 注册默认的DataWriter */ private void init ( ) { } }
writers . put ( Boolean . class , new BooleanDataWriter ( ) ) ; writers . put ( Calendar . class , new CalendarDataWriter ( ) ) ; writers . put ( Date . class , new DateDataWriter ( ) ) ; writers . put ( Enum . class , new EnumDataWriter ( ) ) ; writers . put ( Number . class , new NumberDataWriter ( ) ) ; writers . put ( String . class , new StringDataWriter ( ) ) ;
public class CollectionUtil { /** * 矩阵转置 ( 矩阵必须每行的列数都一样 ) ( 行变列 , 列变行 ) * @ param datas 要转置的数据 * @ param < T > 数据类型 * @ return 转置后的矩阵 , 例如 [ [ 01,02 ] [ 11,12 ] ] 会变为 [ [ 01,11 ] , [ 02,12 ] ] */ public static < T > List < List < T > > matrixTransform ( List < List < T > > datas ) { } }
if ( datas . size ( ) == 0 || datas . get ( 0 ) . size ( ) == 0 ) { return datas ; } int column = datas . size ( ) ; int row = datas . get ( 0 ) . size ( ) ; List < List < T > > newData = new ArrayList < > ( row ) ; for ( int i = 0 ; i < row ; i ++ ) { List < T > list = new ArrayList < > ( column ) ; for ( int j = 0 ; j < column ; j ++ ) { list . add ( datas . get ( j ) . get ( i ) ) ; } newData . add ( list ) ; } return newData ;
public class PatternBox { /** * Constructs a pattern where first and last small molecules are substrates to the same * biochemical reaction . * @ param blacklist a skip - list of ubiquitous molecules * @ return the pattern */ public static Pattern reactsWith ( Blacklist blacklist ) { } }
Pattern p = new Pattern ( SmallMoleculeReference . class , "SMR1" ) ; p . add ( erToPE ( ) , "SMR1" , "SPE1" ) ; p . add ( notGeneric ( ) , "SPE1" ) ; p . add ( linkToComplex ( blacklist ) , "SPE1" , "PE1" ) ; p . add ( new ParticipatesInConv ( RelType . INPUT , blacklist ) , "PE1" , "Conv" ) ; p . add ( type ( BiochemicalReaction . class ) , "Conv" ) ; p . add ( new InterToPartER ( InterToPartER . Direction . ONESIDERS ) , "Conv" , "SMR1" ) ; p . add ( new ConversionSide ( ConversionSide . Type . SAME_SIDE , blacklist , RelType . INPUT ) , "PE1" , "Conv" , "PE2" ) ; p . add ( type ( SmallMolecule . class ) , "PE2" ) ; p . add ( linkToSpecific ( ) , "PE2" , "SPE2" ) ; p . add ( notGeneric ( ) , "SPE2" ) ; p . add ( new PEChainsIntersect ( false ) , "SPE1" , "PE1" , "SPE2" , "PE2" ) ; p . add ( peToER ( ) , "SPE2" , "SMR2" ) ; p . add ( equal ( false ) , "SMR1" , "SMR2" ) ; p . add ( new InterToPartER ( InterToPartER . Direction . ONESIDERS ) , "Conv" , "SMR2" ) ; return p ;
public class MyStringUtils { /** * Return tabular data * @ param labels Labels array * @ param data Data bidimensional array * @ param padding Total space between fields * @ return String */ public static String getTabularData ( String [ ] labels , String [ ] [ ] data , int padding ) { } }
int [ ] size = new int [ labels . length ] ; for ( int i = 0 ; i < labels . length ; i ++ ) { size [ i ] = labels [ i ] . length ( ) + padding ; } for ( String [ ] row : data ) { for ( int i = 0 ; i < labels . length ; i ++ ) { if ( row [ i ] . length ( ) >= size [ i ] ) { size [ i ] = row [ i ] . length ( ) + padding ; } } } StringBuffer tabularData = new StringBuffer ( ) ; for ( int i = 0 ; i < labels . length ; i ++ ) { tabularData . append ( labels [ i ] ) ; tabularData . append ( fill ( ' ' , size [ i ] - labels [ i ] . length ( ) ) ) ; } tabularData . append ( "\n" ) ; for ( int i = 0 ; i < labels . length ; i ++ ) { tabularData . append ( fill ( '=' , size [ i ] - 1 ) ) . append ( " " ) ; } tabularData . append ( "\n" ) ; for ( String [ ] row : data ) { for ( int i = 0 ; i < labels . length ; i ++ ) { tabularData . append ( row [ i ] ) ; tabularData . append ( fill ( ' ' , size [ i ] - row [ i ] . length ( ) ) ) ; } tabularData . append ( "\n" ) ; } return tabularData . toString ( ) ;
public class AbstractPrintQuery { /** * The instance method executes exact one complete statement and populates * the result in the cached result { @ link # cachedResult } . * @ param _ complStmt complete statement instance to execute * @ param _ oneSelects lsit of OneSelects the statement is executed for * @ return true if the query contains values , else false * @ throws EFapsException on error */ @ SuppressWarnings ( "unchecked" ) protected boolean executeOneCompleteStmt ( final String _complStmt , final List < OneSelect > _oneSelects ) throws EFapsException { } }
boolean ret = false ; ConnectionResource con = null ; try { AbstractPrintQuery . LOG . debug ( "Executing SQL: {}" , _complStmt ) ; List < Object [ ] > rows = null ; boolean cached = false ; if ( isCacheEnabled ( ) ) { final QueryKey querykey = QueryKey . get ( getKey ( ) , _complStmt ) ; final Cache < QueryKey , Object > cache = QueryCache . getSqlCache ( ) ; if ( cache . containsKey ( querykey ) ) { final Object object = cache . get ( querykey ) ; if ( object instanceof List ) { rows = ( List < Object [ ] > ) object ; } cached = true ; } } if ( ! cached ) { con = Context . getThreadContext ( ) . getConnectionResource ( ) ; final Statement stmt = con . createStatement ( ) ; final ResultSet rs = stmt . executeQuery ( _complStmt ) ; final ArrayListHandler handler = new ArrayListHandler ( Context . getDbType ( ) . getRowProcessor ( ) ) ; rows = handler . handle ( rs ) ; rs . close ( ) ; stmt . close ( ) ; if ( isCacheEnabled ( ) ) { QueryCache . put ( ( ICacheDefinition ) this , QueryKey . get ( getKey ( ) , _complStmt ) , rows ) ; } } for ( final Object [ ] row : rows ) { for ( final OneSelect onesel : _oneSelects ) { onesel . addObject ( row ) ; } ret = true ; } final List < Instance > tmpList = new ArrayList < > ( ) ; final Map < Instance , Integer > sortMap = new HashMap < > ( ) ; int i = 0 ; for ( final Object [ ] row : rows ) { final Instance instance ; if ( getMainType ( ) . getMainTable ( ) . getSqlColType ( ) != null ) { instance = Instance . get ( Type . get ( ( Long ) row [ this . typeColumnIndex - 1 ] ) , ( Long ) row [ 0 ] ) ; } else { instance = Instance . get ( getMainType ( ) , ( Long ) row [ 0 ] ) ; } sortMap . put ( instance , i ) ; tmpList . add ( instance ) ; i ++ ; } if ( this . enforceSorted ) { for ( final OneSelect onesel : _oneSelects ) { onesel . sortByInstanceList ( getInstanceList ( ) , sortMap ) ; } } else { getInstanceList ( ) . clear ( ) ; getInstanceList ( ) . addAll ( tmpList ) ; } } catch ( final SQLException e ) { throw new EFapsException ( InstanceQuery . class , "executeOneCompleteStmt" , e ) ; } return ret ;
public class TopologyContext { /** * Convenience method for registering ReducedMetric . */ @ SuppressWarnings ( "rawtypes" ) public ReducedMetric registerMetric ( String name , IReducer reducer , int timeBucketSizeInSecs ) { } }
return registerMetric ( name , new ReducedMetric ( reducer ) , timeBucketSizeInSecs ) ;
public class AbstractBaseDestinationHandler { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . impl . interfaces . DestinationHandler # updateDefinition ( com . ibm . ws . sib . admin . DestinationDefinition ) */ public void updateDefinition ( BaseDestinationDefinition destinationDefinition ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "updateDefinition" , new Object [ ] { destinationDefinition } ) ; DestinationDefinition oldDefinition = definition ; DestinationDefinition newDefinition = ( DestinationDefinition ) destinationDefinition ; // Reset values to force a reload on first reference _maxFailedDeliveries = - 1 ; _blockedRetryInterval = - 1 ; if ( ! isPubSub ( ) && ( ( oldDefinition == null ) || ( oldDefinition . isReceiveExclusive ( ) != newDefinition . isReceiveExclusive ( ) ) ) ) { // notify the AnycastOutputHandler , * before * throwing off all consumers . // If this is not done , AnycastOutputHandler could // create a new consumer before knowing the change in the receiveExclusive value which is not good ! notifyAOHReceiveExclusiveChange ( newDefinition . isReceiveExclusive ( ) ) ; // throw off all consumers attached through this destination ConsumerDispatcher cm = ( ConsumerDispatcher ) getLocalPtoPConsumerManager ( ) ; if ( cm != null ) { cm . closeAllConsumersForReceiveExclusive ( ) ; } // notify the RME RemoteConsumerDispatchers that the receiveExlusive value has changed notifyRCDReceiveExclusiveChange ( newDefinition . isReceiveExclusive ( ) ) ; } // definition must be updated before notifying consumer dispatcher ( s ) of update definition = ( DestinationDefinition ) destinationDefinition ; if ( ( oldDefinition == null ) || ( oldDefinition . isReceiveAllowed ( ) != newDefinition . isReceiveAllowed ( ) ) ) { _isReceiveAllowed = newDefinition . isReceiveAllowed ( ) ; notifyReceiveAllowed ( this ) ; // Tell any aliases that inherit the receive allowed value from this // destination that the value has changed . notifyTargettingAliasesReceiveAllowed ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "updateDefinition" ) ;
public class CommunicationRegistry { /** * 注销调一个Action * @ param action * @ return */ public static void unregist ( Object action ) { } }
if ( logger . isInfoEnabled ( ) ) { logger . info ( " Un Regist Action : " + action ) ; } if ( action != null ) { for ( Iterator < Map . Entry < EventType , Object > > iter = table . entrySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { Map . Entry < EventType , Object > entry = iter . next ( ) ; if ( action == entry . getValue ( ) ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( " Find " + entry . getKey ( ) + " For : " + action ) ; } iter . remove ( ) ; } } }
public class RelationalOperations { /** * Returns true if polygon _ a is disjoint from point _ b . */ private static boolean polygonDisjointPoint_ ( Polygon polygon_a , Point point_b , double tolerance , ProgressTracker progress_tracker ) { } }
PolygonUtils . PiPResult result = PolygonUtils . isPointInPolygon2D ( polygon_a , point_b , tolerance ) ; if ( result == PolygonUtils . PiPResult . PiPOutside ) return true ; return false ;
public class ESFilterBuilder { /** * Populate filter clause . * @ param conditionalExpression * the conditional expression * @ return the filter clause */ private FilterClause populateFilterClause ( ComparisonExpression conditionalExpression ) { } }
String property = ( ( StateFieldPathExpression ) conditionalExpression . getLeftExpression ( ) ) . getPath ( 1 ) ; String condition = conditionalExpression . getComparisonOperator ( ) ; Expression rightExpression = conditionalExpression . getRightExpression ( ) ; Object value = ( rightExpression instanceof InputParameter ) ? kunderaQuery . getParametersMap ( ) . get ( ( rightExpression ) . toParsedText ( ) ) : rightExpression . toParsedText ( ) ; return ( condition != null && property != null ) ? kunderaQuery . new FilterClause ( property , condition , value , property ) : null ;
public class CmsShowResourceTable { /** * Gets the permission string . < p > * @ param cms CmsObject * @ param res Resource to get permission for * @ param type dialog type * @ return permission string for given resource * @ throws CmsException thrown if ACE can not be read */ private String getPermissionString ( CmsObject cms , CmsResource res , DialogType type ) throws CmsException { } }
if ( type . equals ( DialogType . User ) ) { cms . getPermissions ( res . getRootPath ( ) , m_principal . getName ( ) ) . getPermissionString ( ) ; } else if ( type . equals ( DialogType . Group ) ) { Iterator < CmsAccessControlEntry > itAces = cms . getAccessControlEntries ( res . getRootPath ( ) , false ) . iterator ( ) ; while ( itAces . hasNext ( ) ) { CmsAccessControlEntry ace = itAces . next ( ) ; if ( ace . getPrincipal ( ) . equals ( m_principal . getId ( ) ) ) { return ace . getPermissions ( ) . getPermissionString ( ) ; } } } return "" ;
public class CmsDialog { /** * Gets a formatted file state string . < p > * @ return formatted state string * @ throws CmsException if something goes wrong */ public String getState ( ) throws CmsException { } }
if ( CmsStringUtil . isNotEmpty ( getParamResource ( ) ) ) { CmsResource file = getCms ( ) . readResource ( getParamResource ( ) , CmsResourceFilter . ALL ) ; if ( getCms ( ) . isInsideCurrentProject ( getParamResource ( ) ) ) { return key ( Messages . getStateKey ( file . getState ( ) ) ) ; } else { return key ( Messages . GUI_EXPLORER_STATENIP_0 ) ; } } return "+++ resource parameter not found +++" ;
public class FlowPath { /** * This method adds Frame to tracking * PLEASE NOTE : Only works for first call , subsequent calls are no - op * @ param frame _ name */ public void registerFrame ( @ NonNull String frame_name ) { } }
if ( ! frames . containsKey ( frame_name ) ) frames . put ( frame_name , new FrameState ( frame_name ) ) ;
public class Log { /** * Commits entries up to the given index to the log . * @ param index The index up to which to commit entries . * @ return The log . * @ throws IllegalStateException If the log is not open . */ public Log commit ( long index ) { } }
assertIsOpen ( ) ; if ( index > 0 ) { assertValidIndex ( index ) ; segments . commitIndex ( index ) ; if ( storage . flushOnCommit ( ) ) { segments . currentSegment ( ) . flush ( ) ; } } return this ;
public class HttpOSCReadAhead { /** * If an error occurs , such as the server side closing down the socket , then * this method will be called . Depending on what state the connection is in , * this error is either sent to the application channel immediately or * delayed until the actual read for the response would start , when it can * then hand the error off to the application channel . * @ param vc * @ param rsc * @ param ioe */ public void error ( VirtualConnection vc , TCPReadRequestContext rsc , IOException ioe ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "error() called: vc=" + vc + " ioe=" + ioe ) ; } HttpOutboundServiceContextImpl osc = ( HttpOutboundServiceContextImpl ) vc . getStateMap ( ) . get ( CallbackIDs . CALLBACK_HTTPOSC ) ; if ( osc . markReadCancelSuccess ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Ignoring error callback on canceled read" ) ; } return ; } int state ; // query / set the states in one block to avoid timing windows synchronized ( osc . stateSyncObject ) { state = osc . getReadState ( ) ; osc . setCallbackState ( HttpOutboundServiceContextImpl . CALLBACK_STATE_ERROR , ioe ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Read-ahead state: " + state ) ; } switch ( state ) { case ( HttpOutboundServiceContextImpl . READ_STATE_IDLE ) : // new connection hasn ' t started yet , notify app channel now osc . getAppReadCallback ( ) . error ( vc , ioe ) ; break ; case ( HttpOutboundServiceContextImpl . READ_STATE_TIME_RESET ) : // new conn has been initialized but the read for response hasn ' t // been started yet if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Received the read-ahead immed timeout." ) ; } break ; case ( HttpOutboundServiceContextImpl . READ_STATE_SYNC ) : // d264854 : no longer possible // a synchronous read for the response has been started already osc . wakeupReadAhead ( ) ; break ; case ( HttpOutboundServiceContextImpl . READ_STATE_ASYNC ) : // d264854 : no longer possible // an async read for the response has been started already osc . setPersistent ( false ) ; osc . reConnect ( vc , ioe ) ; break ; default : if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Unexpected read-ahead state: " + state ) ; } break ; }
public class MarkSet { /** * Marks a type as transitively reachable by the includes set . Returns true if the mark is new , * the type will be retained , and its own dependencies should be traversed . */ boolean mark ( ProtoType type ) { } }
if ( type == null ) throw new NullPointerException ( "type == null" ) ; if ( identifierSet . excludes ( type ) ) return false ; return types . add ( type ) ;
public class DBTransaction { /** * Add a column with empty value . The value will be empty byte array / empty string , not null * @ param storeName Name of store that owns row . * @ param rowKey Key of row that owns column . * @ param columnName Name of column . */ public void addColumn ( String storeName , String rowKey , String columnName ) { } }
addColumn ( storeName , rowKey , columnName , EMPTY ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link Customer } { @ code > } } */ @ XmlElementDecl ( namespace = "http://schema.intuit.com/finance/v3" , name = "Customer" , substitutionHeadNamespace = "http://schema.intuit.com/finance/v3" , substitutionHeadName = "IntuitObject" ) public JAXBElement < Customer > createCustomer ( Customer value ) { } }
return new JAXBElement < Customer > ( _Customer_QNAME , Customer . class , null , value ) ;
public class MessageFormat { /** * Append characters from ' raw ' in the range [ pos , end ) to the output buffer . */ private void emit ( int pos , int end , String arg ) { } }
while ( pos < end ) { char ch = format . charAt ( pos ) ; if ( ch == '#' ) { buf . append ( arg == null ? ch : arg ) ; } else { buf . append ( ch ) ; } pos ++ ; }
public class CssEscape { /** * Render this escape . * @ param builder The TokenBuilder to render into * @ param asLiteral If given matcher matches this escapes literal , then render as literal , else as escape . * @ return the length of the token in the input character stream */ int render ( TokenBuilder builder , CharMatcher asLiteral ) { } }
char ch = ( char ) character ; if ( asLiteral . matches ( ch ) ) { builder . append ( ch ) ; } else { // TODO could normalize space end chars builder . append ( sequence ) ; } return sequence . length ( ) - 1 ;
public class GetIntrospectionSchemaRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetIntrospectionSchemaRequest getIntrospectionSchemaRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getIntrospectionSchemaRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getIntrospectionSchemaRequest . getApiId ( ) , APIID_BINDING ) ; protocolMarshaller . marshall ( getIntrospectionSchemaRequest . getFormat ( ) , FORMAT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class lbvserver { /** * Use this API to update lbvserver . */ public static base_response update ( nitro_service client , lbvserver resource ) throws Exception { } }
lbvserver updateresource = new lbvserver ( ) ; updateresource . name = resource . name ; updateresource . ipv46 = resource . ipv46 ; updateresource . ippattern = resource . ippattern ; updateresource . ipmask = resource . ipmask ; updateresource . weight = resource . weight ; updateresource . servicename = resource . servicename ; updateresource . persistencetype = resource . persistencetype ; updateresource . timeout = resource . timeout ; updateresource . persistencebackup = resource . persistencebackup ; updateresource . backuppersistencetimeout = resource . backuppersistencetimeout ; updateresource . lbmethod = resource . lbmethod ; updateresource . hashlength = resource . hashlength ; updateresource . netmask = resource . netmask ; updateresource . v6netmasklen = resource . v6netmasklen ; updateresource . rule = resource . rule ; updateresource . cookiename = resource . cookiename ; updateresource . resrule = resource . resrule ; updateresource . persistmask = resource . persistmask ; updateresource . v6persistmasklen = resource . v6persistmasklen ; updateresource . pq = resource . pq ; updateresource . sc = resource . sc ; updateresource . rtspnat = resource . rtspnat ; updateresource . m = resource . m ; updateresource . tosid = resource . tosid ; updateresource . datalength = resource . datalength ; updateresource . dataoffset = resource . dataoffset ; updateresource . sessionless = resource . sessionless ; updateresource . connfailover = resource . connfailover ; updateresource . backupvserver = resource . backupvserver ; updateresource . redirurl = resource . redirurl ; updateresource . cacheable = resource . cacheable ; updateresource . clttimeout = resource . clttimeout ; updateresource . somethod = resource . somethod ; updateresource . sothreshold = resource . sothreshold ; updateresource . sopersistence = resource . sopersistence ; updateresource . sopersistencetimeout = resource . sopersistencetimeout ; updateresource . healththreshold = resource . healththreshold ; updateresource . sobackupaction = resource . sobackupaction ; updateresource . redirectportrewrite = resource . redirectportrewrite ; updateresource . downstateflush = resource . downstateflush ; updateresource . insertvserveripport = resource . insertvserveripport ; updateresource . vipheader = resource . vipheader ; updateresource . disableprimaryondown = resource . disableprimaryondown ; updateresource . authenticationhost = resource . authenticationhost ; updateresource . authentication = resource . authentication ; updateresource . authn401 = resource . authn401 ; updateresource . authnvsname = resource . authnvsname ; updateresource . push = resource . push ; updateresource . pushvserver = resource . pushvserver ; updateresource . pushlabel = resource . pushlabel ; updateresource . pushmulticlients = resource . pushmulticlients ; updateresource . listenpolicy = resource . listenpolicy ; updateresource . listenpriority = resource . listenpriority ; updateresource . tcpprofilename = resource . tcpprofilename ; updateresource . httpprofilename = resource . httpprofilename ; updateresource . dbprofilename = resource . dbprofilename ; updateresource . comment = resource . comment ; updateresource . l2conn = resource . l2conn ; updateresource . mssqlserverversion = resource . mssqlserverversion ; updateresource . mysqlprotocolversion = resource . mysqlprotocolversion ; updateresource . mysqlserverversion = resource . mysqlserverversion ; updateresource . mysqlcharacterset = resource . mysqlcharacterset ; updateresource . mysqlservercapabilities = resource . mysqlservercapabilities ; updateresource . appflowlog = resource . appflowlog ; updateresource . netprofile = resource . netprofile ; updateresource . icmpvsrresponse = resource . icmpvsrresponse ; updateresource . newservicerequest = resource . newservicerequest ; updateresource . newservicerequestunit = resource . newservicerequestunit ; updateresource . newservicerequestincrementinterval = resource . newservicerequestincrementinterval ; updateresource . minautoscalemembers = resource . minautoscalemembers ; updateresource . maxautoscalemembers = resource . maxautoscalemembers ; updateresource . persistavpno = resource . persistavpno ; updateresource . skippersistency = resource . skippersistency ; updateresource . authnprofile = resource . authnprofile ; updateresource . macmoderetainvlan = resource . macmoderetainvlan ; updateresource . dbslb = resource . dbslb ; updateresource . dns64 = resource . dns64 ; updateresource . bypassaaaa = resource . bypassaaaa ; updateresource . recursionavailable = resource . recursionavailable ; return updateresource . update_resource ( client ) ;
public class FileAccessPermissions { /** * This method parses the { @ code ugoa } prefix indicating which flags to modify . * < ul > * < li > { @ code u } indicates that the flags of the { @ link FileAccessClass # USER user } should be changed . < / li > * < li > { @ code g } indicates that the flags of the { @ link FileAccessClass # GROUP group } should be changed . < / li > * < li > { @ code o } indicates that the flags of the { @ link FileAccessClass # OTHERS others } should be changed . < / li > * < li > { @ code a } indicates that the flags of all { @ link FileAccessClass classes } should be changed . < / li > * < / ul > * @ param parse is the current state of the parser . * @ return the bit - mask with the UGO - flags . */ private static int parseUGO ( CharSequenceScanner parse ) { } }
int ugo = 0 ; while ( true ) { char c = parse . forceNext ( ) ; if ( c == 'u' ) { ugo = ugo | MASK_USER ; } else if ( c == 'g' ) { ugo = ugo | MASK_GROUP ; } else if ( c == 'o' ) { ugo = ugo | MASK_OTHERS ; } else if ( c == 'a' ) { ugo = MASK_ALL ; } else { if ( ugo == 0 ) { // if none of u / g / o / a was specified , then ' a ' is the default ugo = MASK_ALL ; } if ( c != 0 ) { // we read too far parse . stepBack ( ) ; } return ugo ; } }
public class AbstractTTTLearner { /** * Marks a node , and propagates the label up to all nodes on the path from the block root to this node . * @ param node * the node to mark * @ param label * the label to mark the node with */ private static < I , D > void markAndPropagate ( AbstractBaseDTNode < I , D > node , D label ) { } }
AbstractBaseDTNode < I , D > curr = node ; while ( curr != null && curr . getSplitData ( ) != null ) { if ( ! curr . getSplitData ( ) . mark ( label ) ) { return ; } curr = curr . getParent ( ) ; }
public class ResourceBundleWrapper { /** * This method is for super class ' s instantiateBundle method */ public static ResourceBundleWrapper getBundleInstance ( String baseName , String localeID , ClassLoader root , boolean disableFallback ) { } }
if ( root == null ) { root = ClassLoaderUtil . getClassLoader ( ) ; } ResourceBundleWrapper b ; if ( disableFallback ) { b = instantiateBundle ( baseName , localeID , null , root , disableFallback ) ; } else { b = instantiateBundle ( baseName , localeID , ULocale . getDefault ( ) . getBaseName ( ) , root , disableFallback ) ; } if ( b == null ) { String separator = "_" ; if ( baseName . indexOf ( '/' ) >= 0 ) { separator = "/" ; } throw new MissingResourceException ( "Could not find the bundle " + baseName + separator + localeID , "" , "" ) ; } return b ;
public class AmazonEC2Client { /** * Cancels an in - process import virtual machine or import snapshot task . * @ param cancelImportTaskRequest * Contains the parameters for CancelImportTask . * @ return Result of the CancelImportTask operation returned by the service . * @ sample AmazonEC2 . CancelImportTask * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ec2-2016-11-15 / CancelImportTask " target = " _ top " > AWS API * Documentation < / a > */ @ Override public CancelImportTaskResult cancelImportTask ( CancelImportTaskRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCancelImportTask ( request ) ;
public class InjectionSink { /** * Uses immutable values , updated priority and added lines for reporting * @ param taintedInsideMethod true if not influenced by method arguments * @ return new bug instance filled with information */ public BugInstance generateBugInstance ( boolean taintedInsideMethod ) { } }
BugInstance bug = new BugInstance ( detector , bugType , originalPriority ) ; bug . addClassAndMethod ( classContext . getJavaClass ( ) , method ) ; bug . addSourceLine ( SourceLineAnnotation . fromVisitedInstruction ( classContext , method , instructionHandle ) ) ; addMessage ( bug , "Sink method" , sinkMethod ) ; addMessage ( bug , "Sink parameter" , String . valueOf ( parameterOffset ) ) ; for ( UnknownSource source : sources ) { if ( source . getSourceType ( ) == UnknownSourceType . FIELD ) { addMessage ( bug , "Unknown source" , source . getSignatureField ( ) ) ; } else if ( source . getSourceType ( ) == UnknownSourceType . RETURN ) { if ( isExclude ( source . getSignatureMethod ( ) ) ) continue ; addMessage ( bug , "Unknown source" , source . getSignatureMethod ( ) ) ; } // if ( isExclude ( source . getTaintSource ( ) ) ) { continue ; } // addMessage ( bug , " Unknown source " , source . getTaintSource ( ) ) ; } if ( sinkPriority != UNKNOWN_SINK_PRIORITY ) { // higher priority is represented by lower integer if ( sinkPriority < originalPriority ) { bug . setPriority ( sinkPriority ) ; addMessage ( bug , "Method usage" , "with tainted arguments detected" ) ; } else if ( sinkPriority > originalPriority ) { bug . setPriority ( Priorities . LOW_PRIORITY ) ; addMessage ( bug , "Method usage" , "detected only with safe arguments" ) ; } } else if ( ! taintedInsideMethod ) { addMessage ( bug , "Method usage" , "not detected" ) ; } Collections . sort ( lines ) ; SourceLineAnnotation annotation = null ; for ( Iterator < SourceLineAnnotation > it = lines . iterator ( ) ; it . hasNext ( ) ; ) { SourceLineAnnotation prev = annotation ; annotation = it . next ( ) ; if ( prev != null && prev . getClassName ( ) . equals ( annotation . getClassName ( ) ) && prev . getStartLine ( ) == annotation . getStartLine ( ) ) { // keep only one annotation per line it . remove ( ) ; } } for ( SourceLineAnnotation sourceLine : lines ) { bug . addSourceLine ( sourceLine ) ; } return bug ;
public class OPFHandler { /** * Returns an immutable list of all the items in the OPF , in document order , * including those represented by < code > link < / code > elements pointing to local * resources . * Returns the empty list if the items have not been parsed yet . * @ return the list of all items , guaranteed non - null . */ public List < OPFItem > getItems ( ) { } }
return ( items != null ) ? items . getItems ( ) : ImmutableList . < OPFItem > of ( ) ;
public class StringUtils { /** * Replace all occurrences of a substring within a string with another string . * @ param inString { @ code String } to examine . * @ param oldPattern { @ code String } to replace . * @ param newPattern { @ code String } to insert . * @ return a { @ code String } with the replacements . */ public static String replace ( String inString , String oldPattern , String newPattern ) { } }
if ( ! hasLength ( inString ) || ! hasLength ( oldPattern ) || newPattern == null ) { return inString ; } int index = inString . indexOf ( oldPattern ) ; if ( index == - 1 ) { // no occurrence - > can return input as - is return inString ; } int capacity = inString . length ( ) ; if ( newPattern . length ( ) > oldPattern . length ( ) ) { capacity += 16 ; } StringBuilder sb = new StringBuilder ( capacity ) ; int pos = 0 ; // our position in the old string int patLen = oldPattern . length ( ) ; while ( index >= 0 ) { sb . append ( inString . substring ( pos , index ) ) ; sb . append ( newPattern ) ; pos = index + patLen ; index = inString . indexOf ( oldPattern , pos ) ; } // append any characters to the right of a match sb . append ( inString . substring ( pos ) ) ; return sb . toString ( ) ;
public class JoinBooleanExpressionExecutor { /** * Standard method ( InternalProposalExecutor ) */ @ Override public NodeCentricOptimizationResults < InnerJoinNode > apply ( InnerJoinOptimizationProposal proposal , IntermediateQuery query , QueryTreeComponent treeComponent ) throws InvalidQueryOptimizationProposalException , EmptyQueryException { } }
InnerJoinNode originalTopJoinNode = proposal . getFocusNode ( ) ; ImmutableList < JoinOrFilterNode > filterOrJoinNodes = extractFilterAndInnerJoinNodes ( originalTopJoinNode , query ) ; QueryNode parentNode = query . getParent ( originalTopJoinNode ) . get ( ) ; Optional < ImmutableExpression > optionalAggregatedFilterCondition ; try { optionalAggregatedFilterCondition = joinExtractionUtils . extractFoldAndOptimizeBooleanExpressions ( filterOrJoinNodes ) ; } /* * The filter condition cannot be satisfied - - > the join node and its sub - tree is thus removed from the tree . * Returns no join node . */ catch ( UnsatisfiableExpressionException e ) { EmptyNode replacingEmptyNode = iqFactory . createEmptyNode ( query . getVariables ( originalTopJoinNode ) ) ; treeComponent . replaceSubTree ( originalTopJoinNode , replacingEmptyNode ) ; // Converts it into a NodeCentricOptimizationResults < InnerJoinNode > return new NodeCentricOptimizationResultsImpl < > ( query , Optional . of ( replacingEmptyNode ) ) ; } /* * If something has changed */ if ( ( filterOrJoinNodes . size ( ) > 1 ) || ( ! optionalAggregatedFilterCondition . equals ( originalTopJoinNode . getOptionalFilterCondition ( ) ) ) ) { /* * Optimized join node */ InnerJoinNode newJoinNode = iqFactory . createInnerJoinNode ( optionalAggregatedFilterCondition ) ; Optional < ArgumentPosition > optionalPosition = treeComponent . getOptionalPosition ( parentNode , originalTopJoinNode ) ; treeComponent . replaceNodesByOneNode ( ImmutableList . < QueryNode > copyOf ( filterOrJoinNodes ) , newJoinNode , parentNode , optionalPosition ) ; return new NodeCentricOptimizationResultsImpl < > ( query , newJoinNode ) ; } else { return new NodeCentricOptimizationResultsImpl < > ( query , originalTopJoinNode ) ; }
public class ElementCollectionImpl { /** * If not already created , a new < code > order - column < / code > element with the given value will be created . * Otherwise , the existing < code > order - column < / code > element will be returned . * @ return a new or existing instance of < code > OrderColumn < ElementCollection < T > > < / code > */ public OrderColumn < ElementCollection < T > > getOrCreateOrderColumn ( ) { } }
Node node = childNode . getOrCreate ( "order-column" ) ; OrderColumn < ElementCollection < T > > orderColumn = new OrderColumnImpl < ElementCollection < T > > ( this , "order-column" , childNode , node ) ; return orderColumn ;
public class AntiAffinityService { /** * Remove Anti - affinity policy * @ param policyRef policy reference * @ return OperationFuture wrapper for AntiAffinityPolicy */ public OperationFuture < AntiAffinityPolicy > delete ( AntiAffinityPolicy policyRef ) { } }
client . deleteAntiAffinityPolicy ( findByRef ( policyRef ) . getId ( ) ) ; return new OperationFuture < > ( policyRef , new NoWaitingJobFuture ( ) ) ;
public class Ifc4FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcBuildingElementPartTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class TextTable { /** * This method does some of the work involved with managing the creation * and openning of the cache , the rest is done in Log . java and * TextCache . java . * Better clarification of the role of the methods is needed . */ private void openCache ( Session session , String dataSourceNew , boolean isReversedNew , boolean isReadOnlyNew ) { } }
String dataSourceOld = dataSource ; boolean isReversedOld = isReversed ; boolean isReadOnlyOld = isReadOnly ; if ( dataSourceNew == null ) { dataSourceNew = "" ; } disconnect ( ) ; dataSource = dataSourceNew ; isReversed = ( isReversedNew && dataSource . length ( ) > 0 ) ; try { connect ( session , isReadOnlyNew ) ; } catch ( HsqlException e ) { dataSource = dataSourceOld ; isReversed = isReversedOld ; connect ( session , isReadOnlyOld ) ; throw e ; }
public class AggressiveInlineAliases { /** * Returns whether a ReferenceCollection for some aliasing variable references a property on the * original aliased variable that may be collapsed in CollapseProperties . * < p > See { @ link GlobalNamespace . Name # canCollapse } for what can / cannot be collapsed . */ private boolean referencesCollapsibleProperty ( ReferenceCollection aliasRefs , Name aliasedName , GlobalNamespace namespace ) { } }
for ( Reference ref : aliasRefs . references ) { if ( ref . getParent ( ) == null ) { continue ; } if ( ref . getParent ( ) . isGetProp ( ) ) { Node propertyNode = ref . getNode ( ) . getNext ( ) ; // e . g . if the reference is " alias . b . someProp " , this will be " b " . String propertyName = propertyNode . getString ( ) ; // e . g . if the aliased name is " originalName " , this will be " originalName . b " . String originalPropertyName = aliasedName . getName ( ) + "." + propertyName ; Name originalProperty = namespace . getOwnSlot ( originalPropertyName ) ; // If the original property isn ' t in the namespace or can ' t be collapsed , keep going . if ( originalProperty == null || ! originalProperty . canCollapse ( ) ) { continue ; } return true ; } } return false ;
public class RandomTwoParentSelection { /** * Selects two random parents * @ return An array of parents genes . The outer array are the parents , and the inner array are the genes . */ @ Override public double [ ] [ ] selectParents ( ) { } }
double [ ] [ ] parents = new double [ 2 ] [ ] ; int parent1Idx = rng . nextInt ( population . size ( ) ) ; int parent2Idx ; do { parent2Idx = rng . nextInt ( population . size ( ) ) ; } while ( parent1Idx == parent2Idx ) ; parents [ 0 ] = population . get ( parent1Idx ) . getGenes ( ) ; parents [ 1 ] = population . get ( parent2Idx ) . getGenes ( ) ; return parents ;
public class OffsetTime { /** * Obtains an instance of { @ code OffsetTime } from an hour , minute , second and nanosecond . * This creates an offset time with the four specified fields . * This method exists primarily for writing test cases . * Non test - code will typically use other methods to create an offset time . * { @ code LocalTime } has two additional convenience variants of the * equivalent factory method taking fewer arguments . * They are not provided here to reduce the footprint of the API . * @ param hour the hour - of - day to represent , from 0 to 23 * @ param minute the minute - of - hour to represent , from 0 to 59 * @ param second the second - of - minute to represent , from 0 to 59 * @ param nanoOfSecond the nano - of - second to represent , from 0 to 999,999,999 * @ param offset the zone offset , not null * @ return the offset time , not null * @ throws DateTimeException if the value of any field is out of range */ public static OffsetTime of ( int hour , int minute , int second , int nanoOfSecond , ZoneOffset offset ) { } }
return new OffsetTime ( LocalTime . of ( hour , minute , second , nanoOfSecond ) , offset ) ;
public class CSVParser { /** * Checks to see if the character after the current index in a String is an * escapable character . Meaning the next character is either a quotation * character or the escape char and you are inside quotes . precondition : the * current character is an escape * @ param sNextLine * the current line * @ param bInQuotes * true if the current context is quoted * @ param nIndex * current index in line * @ return < code > true < / code > if the following character is a quote */ protected boolean isNextCharacterEscapable ( @ Nonnull final String sNextLine , final boolean bInQuotes , final int nIndex ) { } }
// we are in quotes , therefore there can be escaped quotes in here . // there is indeed another character to check . return bInQuotes && sNextLine . length ( ) > ( nIndex + 1 ) && _isCharacterEscapable ( sNextLine . charAt ( nIndex + 1 ) ) ;