signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class BitcointoyouMarketDataServiceRaw { /** * List all public trades made at Bitcointoyou Exchange . * @ param currencyPair the trade currency pair * @ param tradeTimestamp the trade timestamp * @ param minTradeId the minimum trade ID * @ return an array of { @ link BitcointoyouPublicTrade } * @ throws IOException */ BitcointoyouPublicTrade [ ] getBitcointoyouPublicTrades ( CurrencyPair currencyPair , Long tradeTimestamp , Long minTradeId ) throws IOException { } }
String currency = currencyPair . base . toString ( ) ; try { return bitcointoyou . getTrades ( currency , tradeTimestamp , minTradeId ) ; } catch ( BitcointoyouException e ) { throw new ExchangeException ( e . getError ( ) ) ; }
public class GrowingNeuralGas { /** * Clustering neurons into k clusters . * @ param k the number of clusters . */ public void partition ( int k ) { } }
double [ ] [ ] reps = new double [ nodes . size ( ) ] [ ] ; int i = 0 ; for ( Node neuron : nodes ) reps [ i ++ ] = neuron . w ; double [ ] [ ] proximity = new double [ nodes . size ( ) ] [ ] ; for ( i = 0 ; i < nodes . size ( ) ; i ++ ) { proximity [ i ] = new double [ i + 1 ] ; for ( int j = 0 ; j < i ; j ++ ) proximity [ i ] [ j ] = Math . distance ( reps [ i ] , reps [ j ] ) ; } Linkage linkage = new UPGMALinkage ( proximity ) ; HierarchicalClustering hc = new HierarchicalClustering ( linkage ) ; y = hc . partition ( k ) ;
public class TimeZone { /** * Sets the < code > TimeZone < / code > that is * returned by the < code > getDefault < / code > method . If < code > zone < / code > * is null , reset the default to the value it had originally when the * VM first started . * @ param tz the new default time zone * @ hide unsupported on Android */ public static synchronized void setDefault ( TimeZone tz ) { } }
defaultZone = tz ; java . util . TimeZone jdkZone = null ; if ( defaultZone instanceof JavaTimeZone ) { jdkZone = ( ( JavaTimeZone ) defaultZone ) . unwrap ( ) ; } else { // Keep java . util . TimeZone default in sync so java . util . Date // can interoperate with android . icu . util classes . if ( tz != null ) { if ( tz instanceof android . icu . impl . OlsonTimeZone ) { // Because of the lack of APIs supporting historic // zone offset / dst saving in JDK TimeZone , // wrapping ICU TimeZone with JDK TimeZone will // cause historic offset calculation in Calendar / Date . // JDK calendar implementation calls getRawOffset ( ) and // getDSTSavings ( ) when the instance of JDK TimeZone // is not an instance of JDK internal TimeZone subclass // ( sun . util . calendar . ZoneInfo ) . Ticket # 6459 String icuID = tz . getID ( ) ; jdkZone = java . util . TimeZone . getTimeZone ( icuID ) ; if ( ! icuID . equals ( jdkZone . getID ( ) ) ) { // If the ID was unknown , retry with the canonicalized // ID instead . This will ensure that JDK 1.1 . x // compatibility IDs supported by ICU ( but not // necessarily supported by the platform ) work . // Ticket # 11483 icuID = getCanonicalID ( icuID ) ; jdkZone = java . util . TimeZone . getTimeZone ( icuID ) ; if ( ! icuID . equals ( jdkZone . getID ( ) ) ) { // JDK does not know the ID . . jdkZone = null ; } } } if ( jdkZone == null ) { jdkZone = TimeZoneAdapter . wrap ( tz ) ; } } } java . util . TimeZone . setDefault ( jdkZone ) ;
public class GeoPackageValidate { /** * Check the file extension to see if it is a GeoPackage * @ param file * GeoPackage file * @ return true if GeoPackage extension */ public static boolean hasGeoPackageExtension ( File file ) { } }
String extension = GeoPackageIOUtils . getFileExtension ( file ) ; boolean isGeoPackage = extension != null && ( extension . equalsIgnoreCase ( GeoPackageConstants . GEOPACKAGE_EXTENSION ) || extension . equalsIgnoreCase ( GeoPackageConstants . GEOPACKAGE_EXTENDED_EXTENSION ) ) ; return isGeoPackage ;
public class VLDockingUtils { /** * Fixes an VLDocking bug consisting on dockables that belong to a docking desktop have no state on it . * @ param dockable * the dockable candidate . * @ return its dockable state . If none then registers the dockable again and ensures dockable state is not null . */ public static DockableState fixVLDockingBug ( DockingDesktop dockingDesktop , Dockable dockable ) { } }
Assert . notNull ( dockingDesktop , "dockingDesktop" ) ; Assert . notNull ( dockable , "dockable" ) ; final DockingContext dockingContext = dockingDesktop . getContext ( ) ; final DockKey dockKey = dockable . getDockKey ( ) ; DockableState dockableState = dockingDesktop . getDockableState ( dockable ) ; final Boolean thisFixApplies = ( dockingContext . getDockableByKey ( dockKey . getKey ( ) ) != null ) ; if ( ( thisFixApplies ) && ( dockableState == null ) ) { dockingDesktop . registerDockable ( dockable ) ; dockableState = dockingDesktop . getDockableState ( dockable ) ; // dockKey . setLocation ( dockableState . getLocation ( ) ) ; Assert . notNull ( dockableState , "dockableState" ) ; } return dockableState ;
public class ContainerBase { /** * { @ inheritDoc } * @ see org . jboss . shrinkwrap . api . Archive # add ( org . jboss . shrinkwrap . api . asset . Asset , * org . jboss . shrinkwrap . api . ArchivePath ) */ @ Override public T add ( Asset asset , ArchivePath target ) throws IllegalArgumentException { } }
this . getArchive ( ) . add ( asset , target ) ; return covarientReturn ( ) ;
public class PrcInvoiceGfe { /** * < p > Process entity request . < / p > * @ param pReqVars additional param , e . g . return this line ' s * document in " nextEntity " for farther process * @ param pRequestData Request Data * @ param pEntity Entity to process * @ return Entity processed for farther process or null * @ throws Exception - an exception */ @ Override public final T process ( final Map < String , Object > pReqVars , final T pEntity , final IRequestData pRequestData ) throws Exception { } }
pReqVars . put ( "DebtorCreditortaxDestinationdeepLevel" , 2 ) ; Set < String > ndFlDc = new HashSet < String > ( ) ; ndFlDc . add ( "itsId" ) ; ndFlDc . add ( "itsName" ) ; ndFlDc . add ( "isForeigner" ) ; ndFlDc . add ( "taxDestination" ) ; pReqVars . put ( "DebtorCreditorneededFields" , ndFlDc ) ; T invoice = this . prcEntityPbEditDelete . process ( pReqVars , pEntity , pRequestData ) ; pReqVars . remove ( "DebtorCreditorneededFields" ) ; pReqVars . remove ( "DebtorCreditortaxDestinationdeepLevel" ) ; return invoice ;
public class HighwayHash { /** * NOTE : The 64 - bit HighwayHash algorithm is declared stable and no longer subject to change . * @ param data array with data bytes * @ param offset position of first byte of data to read from * @ param length number of bytes from data to read * @ param key array of size 4 with the key to initialize the hash with * @ return 64 - bit hash for the given data */ public static long hash64 ( byte [ ] data , int offset , int length , long [ ] key ) { } }
HighwayHash h = new HighwayHash ( key ) ; h . processAll ( data , offset , length ) ; return h . finalize64 ( ) ;
public class BaseFixData { /** * GetRecordFromCode Method . */ public static Record getRecordFromCode ( BaseField field , String strDesc , String strFieldName ) { } }
Record recSecond = ( ( ReferenceField ) field ) . getReferenceRecord ( ) ; return BaseFixData . getRecordFromCode ( field , strDesc , strFieldName , recSecond ) ;
public class PasswordHash { /** * Compares two byte arrays in length - constant time . This comparison method is used so that password hashes cannot * be extracted from an on - line system using a timing attack and then attacked off - line . * @ param a * the first byte array * @ param b * the second byte array * @ return true if both byte arrays are the same , false if not */ private static boolean slowEquals ( byte [ ] a , byte [ ] b ) { } }
int diff = a . length ^ b . length ; for ( int i = 0 ; i < a . length && i < b . length ; i ++ ) { diff |= a [ i ] ^ b [ i ] ; } return diff == 0 ;
public class TldRegionTracker { /** * Computes the gradient and changes the reference to the current pyramid */ protected void updateCurrent ( ImagePyramid < I > image ) { } }
this . currentImage = image ; for ( int i = 0 ; i < image . getNumLayers ( ) ; i ++ ) { gradient . process ( image . getLayer ( i ) , currentDerivX [ i ] , currentDerivY [ i ] ) ; }
public class DESedeKeySpec { /** * Checks if the given DES - EDE key , starting at < code > offset < / code > * inclusive , is parity - adjusted . * @ param key a byte array which holds the key value * @ param offset the offset into the byte array * @ return true if the given DES - EDE key is parity - adjusted , false * otherwise * @ exception NullPointerException if < code > key < / code > is null . * @ exception InvalidKeyException if the given key material , starting at * < code > offset < / code > inclusive , is shorter than 24 bytes */ public static boolean isParityAdjusted ( byte [ ] key , int offset ) throws InvalidKeyException { } }
if ( key . length - offset < 24 ) { throw new InvalidKeyException ( "Wrong key size" ) ; } if ( DESKeySpec . isParityAdjusted ( key , offset ) == false || DESKeySpec . isParityAdjusted ( key , offset + 8 ) == false || DESKeySpec . isParityAdjusted ( key , offset + 16 ) == false ) { return false ; } return true ;
public class ICUService { /** * Return a snapshot of the mapping from display names to visible * IDs for this service . This set will not change as factories * are added or removed , but the supported ids will , so there is * no guarantee that all and only the ids in the returned map will * be visible and supported by the service in subsequent calls , * nor is there any guarantee that the current display names match * those in the set . The display names are sorted based on the * comparator provided . */ public SortedMap < String , String > getDisplayNames ( ULocale locale , Comparator < Object > com , String matchID ) { } }
SortedMap < String , String > dncache = null ; LocaleRef ref = dnref ; if ( ref != null ) { dncache = ref . get ( locale , com ) ; } while ( dncache == null ) { synchronized ( this ) { if ( ref == dnref || dnref == null ) { dncache = new TreeMap < String , String > ( com ) ; // sorted Map < String , Factory > m = getVisibleIDMap ( ) ; Iterator < Entry < String , Factory > > ei = m . entrySet ( ) . iterator ( ) ; while ( ei . hasNext ( ) ) { Entry < String , Factory > e = ei . next ( ) ; String id = e . getKey ( ) ; Factory f = e . getValue ( ) ; dncache . put ( f . getDisplayName ( id , locale ) , id ) ; } dncache = Collections . unmodifiableSortedMap ( dncache ) ; dnref = new LocaleRef ( dncache , locale , com ) ; } else { ref = dnref ; dncache = ref . get ( locale , com ) ; } } } Key matchKey = createKey ( matchID ) ; if ( matchKey == null ) { return dncache ; } SortedMap < String , String > result = new TreeMap < String , String > ( dncache ) ; Iterator < Entry < String , String > > iter = result . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry < String , String > e = iter . next ( ) ; if ( ! matchKey . isFallbackOf ( e . getValue ( ) ) ) { iter . remove ( ) ; } } return result ;
public class MtasSolrCollectionCache { /** * Gets the . * @ param id the id * @ return the hash set * @ throws IOException Signals that an I / O exception has occurred . */ private HashSet < String > get ( String id ) throws IOException { } }
if ( collectionCachePath != null ) { Date date = clear ( ) ; if ( idToVersion . containsKey ( id ) ) { String version = idToVersion . get ( id ) ; expirationVersion . put ( version , date . getTime ( ) + ( 1000 * lifeTime ) ) ; MtasSolrCollectionCacheItem newItem = read ( version , date . getTime ( ) ) ; if ( newItem != null && newItem . id . equals ( id ) ) { return newItem . data ; } else { log . error ( "couldn't get " + version ) ; // delete file and remove from index if ( ! collectionCachePath . resolve ( version ) . toFile ( ) . delete ( ) ) { log . debug ( "couldn't delete " + version ) ; } idToVersion . remove ( id ) ; expirationVersion . remove ( version ) ; versionToItem . remove ( version ) ; } } else { log . error ( "doesn't exist anymore" ) ; } return null ; } else { throw new IOException ( "no cachePath available, can't get data" ) ; }
public class PortletEventCoordinatationService { /** * Returns a request scoped PortletEventQueue used to track events to process and events to * dispatch */ @ Override public PortletEventQueue getPortletEventQueue ( HttpServletRequest request ) { } }
request = this . portalRequestUtils . getOriginalPortalRequest ( request ) ; synchronized ( PortalWebUtils . getRequestAttributeMutex ( request ) ) { PortletEventQueue portletEventQueue = ( PortletEventQueue ) request . getAttribute ( PORTLET_EVENT_QUEUE ) ; if ( portletEventQueue == null ) { portletEventQueue = new PortletEventQueue ( ) ; request . setAttribute ( PORTLET_EVENT_QUEUE , portletEventQueue ) ; } return portletEventQueue ; }
public class RemoteCompilationProvider { /** * turns a template name and a servlet path into a */ String createTemplateServerRequest ( String servletPath , String templateName ) { } }
String pathInfo = servletPath . substring ( servletPath . indexOf ( "/" , TEMPLATE_LOAD_PROTOCOL . length ( ) ) ) ; if ( templateName != null ) { pathInfo = pathInfo + templateName ; } return pathInfo ;
public class KafkaSource { /** * this . previousOffsetFetchEpochTimes need to be initialized once */ private synchronized void getAllPreviousOffsetState ( SourceState state ) { } }
if ( this . doneGettingAllPreviousOffsets ) { return ; } this . previousOffsets . clear ( ) ; this . previousLowWatermarks . clear ( ) ; this . previousExpectedHighWatermarks . clear ( ) ; this . previousOffsetFetchEpochTimes . clear ( ) ; this . previousStartFetchEpochTimes . clear ( ) ; this . previousStopFetchEpochTimes . clear ( ) ; Map < String , Iterable < WorkUnitState > > workUnitStatesByDatasetUrns = state . getPreviousWorkUnitStatesByDatasetUrns ( ) ; if ( ! workUnitStatesByDatasetUrns . isEmpty ( ) && ! ( workUnitStatesByDatasetUrns . size ( ) == 1 && workUnitStatesByDatasetUrns . keySet ( ) . iterator ( ) . next ( ) . equals ( "" ) ) ) { this . isDatasetStateEnabled . set ( true ) ; } for ( WorkUnitState workUnitState : state . getPreviousWorkUnitStates ( ) ) { List < KafkaPartition > partitions = KafkaUtils . getPartitions ( workUnitState ) ; WorkUnit workUnit = workUnitState . getWorkunit ( ) ; MultiLongWatermark watermark = workUnitState . getActualHighWatermark ( MultiLongWatermark . class ) ; MultiLongWatermark previousLowWatermark = workUnit . getLowWatermark ( MultiLongWatermark . class ) ; MultiLongWatermark previousExpectedHighWatermark = workUnit . getExpectedHighWatermark ( MultiLongWatermark . class ) ; Preconditions . checkArgument ( partitions . size ( ) == watermark . size ( ) , String . format ( "Num of partitions doesn't match number of watermarks: partitions=%s, watermarks=%s" , partitions , watermark ) ) ; for ( int i = 0 ; i < partitions . size ( ) ; i ++ ) { KafkaPartition partition = partitions . get ( i ) ; if ( watermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousOffsets . put ( partition , watermark . get ( i ) ) ; } if ( previousLowWatermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousLowWatermarks . put ( partition , previousLowWatermark . get ( i ) ) ; } if ( previousExpectedHighWatermark . get ( i ) != ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) { this . previousExpectedHighWatermarks . put ( partition , previousExpectedHighWatermark . get ( i ) ) ; } this . previousOffsetFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , OFFSET_FETCH_EPOCH_TIME , i ) ) ; this . previousStartFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , START_FETCH_EPOCH_TIME , i ) ) ; this . previousStopFetchEpochTimes . put ( partition , KafkaUtils . getPropAsLongFromSingleOrMultiWorkUnitState ( workUnitState , STOP_FETCH_EPOCH_TIME , i ) ) ; } } this . doneGettingAllPreviousOffsets = true ;
public class CmsJspTagContainer { /** * Returns the formatter configuration for the given element . < p > * @ param cms the cms context * @ param element the element bean * @ param adeConfig the ADE configuration * @ param containerName the container name * @ param containerType the container type * @ param containerWidth the container width * @ return the formatter configuration */ public static I_CmsFormatterBean getFormatterConfigurationForElement ( CmsObject cms , CmsContainerElementBean element , CmsADEConfigData adeConfig , String containerName , String containerType , int containerWidth ) { } }
I_CmsFormatterBean formatterBean = null ; String settingsKey = CmsFormatterConfig . getSettingsKeyForContainer ( containerName ) ; if ( ( element . getFormatterId ( ) != null ) && ! element . getFormatterId ( ) . isNullUUID ( ) ) { if ( ! element . getSettings ( ) . containsKey ( settingsKey ) || element . getSettings ( ) . get ( settingsKey ) . startsWith ( CmsFormatterConfig . SCHEMA_FORMATTER_ID ) ) { for ( I_CmsFormatterBean formatter : adeConfig . getFormatters ( cms , element . getResource ( ) ) . getAllMatchingFormatters ( containerType , containerWidth ) ) { if ( element . getFormatterId ( ) . equals ( formatter . getJspStructureId ( ) ) ) { String formatterConfigId = formatter . getId ( ) ; if ( formatterConfigId == null ) { formatterConfigId = CmsFormatterConfig . SCHEMA_FORMATTER_ID + element . getFormatterId ( ) . toString ( ) ; } formatterBean = formatter ; break ; } } } else { String formatterConfigId = element . getSettings ( ) . get ( settingsKey ) ; if ( CmsUUID . isValidUUID ( formatterConfigId ) ) { formatterBean = OpenCms . getADEManager ( ) . getCachedFormatters ( cms . getRequestContext ( ) . getCurrentProject ( ) . isOnlineProject ( ) ) . getFormatters ( ) . get ( new CmsUUID ( formatterConfigId ) ) ; } } } else { if ( element . getSettings ( ) . containsKey ( settingsKey ) ) { String formatterConfigId = element . getSettings ( ) . get ( settingsKey ) ; if ( CmsUUID . isValidUUID ( formatterConfigId ) ) { formatterBean = OpenCms . getADEManager ( ) . getCachedFormatters ( cms . getRequestContext ( ) . getCurrentProject ( ) . isOnlineProject ( ) ) . getFormatters ( ) . get ( new CmsUUID ( formatterConfigId ) ) ; } } if ( formatterBean == null ) { formatterBean = adeConfig . getFormatters ( cms , element . getResource ( ) ) . getDefaultFormatter ( containerType , containerWidth ) ; } } return formatterBean ;
public class MvpBasePresenter { /** * { @ inheritDoc } */ @ Override public void detachView ( ) { } }
detachView ( true ) ; if ( viewRef != null ) { viewRef . clear ( ) ; viewRef = null ; }
public class NagiosWriter { /** * Initializes the logger . This is called when we need to create a new * logger for the given file name . * @ param fileStr * @ return a new Logger instance for the given fileStr * @ throws IOException */ protected Logger initLogger ( String fileStr ) throws IOException { } }
String loggerName = "NagiosWriter" + this . hashCode ( ) ; final PatternLayoutEncoder encoder = new PatternLayoutEncoder ( ) ; encoder . setContext ( loggerContext ) ; encoder . setPattern ( LOG_PATTERN ) ; encoder . start ( ) ; final FileAppender appender = new FileAppender ( ) ; appender . setContext ( loggerContext ) ; appender . setName ( loggerName + "File" ) ; appender . setAppend ( true ) ; appender . setBufferSize ( new FileSize ( LOG_IO_BUFFER_SIZE_BYTES ) ) ; appender . setFile ( fileStr ) ; appender . setEncoder ( encoder ) ; appender . start ( ) ; Logger logger = loggerContext . getLogger ( loggerName ) ; logger . addAppender ( appender ) ; logger . setLevel ( Level . INFO ) ; logger . setAdditive ( false ) ; return logger ;
public class ContentPump { /** * Set class loader for current thread and for Confifguration based on * Hadoop home . * @ param hdConfDir Hadoop home directory * @ param conf Hadoop configuration * @ throws MalformedURLException */ private static void setClassLoader ( File hdConfDir , Configuration conf ) throws Exception { } }
ClassLoader parent = conf . getClassLoader ( ) ; URL url = hdConfDir . toURI ( ) . toURL ( ) ; URL [ ] urls = new URL [ 1 ] ; urls [ 0 ] = url ; ClassLoader classLoader = new URLClassLoader ( urls , parent ) ; Thread . currentThread ( ) . setContextClassLoader ( classLoader ) ; conf . setClassLoader ( classLoader ) ;
public class Tree { /** * Removes the sub - node at the specified path . Returns the node that was * removed . Samples : < br > * < br > * Tree removed = node . remove ( " path . to . node " ) ; < br > * or < br > * Tree removed = node . remove ( " array [ 3 ] . subarray [ 2 ] " ) ; * @ param path * path to a sub - node node * @ return removed child , or null */ public Tree remove ( String path ) { } }
Tree child = getChild ( path , false ) ; if ( child != null ) { child . remove ( ) ; } return child ;
public class JSettingsPanel { /** * GEN - LAST : event _ jComboBoxChartTypeActionPerformed */ private void jCheckBoxDrawMarkersActionPerformed ( java . awt . event . ActionEvent evt ) { } }
// GEN - FIRST : event _ jCheckBoxDrawMarkersActionPerformed if ( jCheckBoxDrawMarkers . isSelected ( ) ) parent . getGraphPanelChart ( ) . getChartSettings ( ) . setChartMarkers ( ChartSettings . CHART_MARKERS_YES ) ; else parent . getGraphPanelChart ( ) . getChartSettings ( ) . setChartMarkers ( ChartSettings . CHART_MARKERS_NO ) ; refreshGraphPreview ( ) ;
public class ColorUtilities { /** * Convert hex color to Color . * @ return the Color object . */ public static Color fromHex ( String hex ) { } }
if ( hex . startsWith ( "#" ) ) { hex = hex . substring ( 1 ) ; } int length = hex . length ( ) ; int total = 6 ; if ( length < total ) { // we have a shortened version String token = hex ; int tokenLength = token . length ( ) ; for ( int i = 0 ; i < total ; i = i + tokenLength ) { hex += token ; } } int index = 0 ; String r = hex . substring ( index , index + 2 ) ; String g = hex . substring ( index + 2 , index + 4 ) ; String b = hex . substring ( index + 4 , index + total ) ; return new Color ( Integer . valueOf ( r , 16 ) , Integer . valueOf ( g , 16 ) , Integer . valueOf ( b , 16 ) ) ;
public class Parcel { /** * all */ public static ParcelCollection all ( Map < String , Object > params ) throws EasyPostException { } }
return all ( params , null ) ;
public class ProducerUtility { /** * Filters producers by decorator and returns it . * @ param decoratorName Decorator name * @ param producers { @ link List } of { @ link ProducerAO } to filter * @ param graphData Graph data */ public static ProducerDecoratorBean filterProducersByDecoratorName ( String decoratorName , List < ProducerAO > producers , Map < String , GraphDataBean > graphData ) { } }
ProducerDecoratorBean result = new ProducerDecoratorBean ( ) ; result . setName ( decoratorName ) ; List < ProducerAO > decoratedProducers = new ArrayList < > ( ) ; for ( ProducerAO producer : producers ) { IDecorator decorator = findOrCreateDecorator ( producer ) ; if ( decorator . getName ( ) . equals ( decoratorName ) ) { result . setCaptions ( decorator . getCaptions ( ) ) ; decoratedProducers . add ( producer ) ; // Filling graph data for ( StatLineAO statLine : producer . getLines ( ) ) { if ( ! "cumulated" . equals ( statLine . getStatName ( ) ) ) { for ( StatValueAO statValue : statLine . getValues ( ) ) { final String graphKey = decorator . getName ( ) + '_' + statValue . getName ( ) ; final GraphDataValueBean value = new GraphDataValueBean ( producer . getProducerId ( ) + '.' + statLine . getStatName ( ) , statValue . getRawValue ( ) ) ; GraphDataBean graphDataBean = graphData . get ( graphKey ) ; if ( graphDataBean == null ) { graphDataBean = new GraphDataBean ( decorator . getName ( ) + '_' + statValue . getJsVariableName ( ) , statValue . getName ( ) ) ; } graphDataBean . addValue ( value ) ; graphData . put ( graphKey , graphDataBean ) ; } } } } } result . setProducerBeans ( decoratedProducers ) ; return result ;
public class SailthruClient { /** * Synchronized singleton instance method using default URL string * @ param apiKey Sailthru API key string * @ param apiSecret Sailthru API secret string * @ return singleton instance of SailthruClient * @ deprecated */ public static synchronized SailthruClient getInstance ( String apiKey , String apiSecret ) { } }
if ( _instance == null ) { _instance = new SailthruClient ( apiKey , apiSecret , DEFAULT_API_URL ) ; } return _instance ;
public class SessionUtil { /** * Perform important client side validation : * validate both token url and sso url contains same prefix * ( protocol + host + port ) as the given authenticator url . * Explanation : * This provides a way for the user to ' authenticate ' the IDP it is * sending his / her credentials to . Without such a check , the user could * be coerced to provide credentials to an IDP impersonator . * @ param loginInput * @ param tokenUrl * @ param ssoUrl * @ throws SnowflakeSQLException */ private static void federatedFlowStep2 ( LoginInput loginInput , String tokenUrl , String ssoUrl ) throws SnowflakeSQLException { } }
try { if ( ! isPrefixEqual ( loginInput . getAuthenticator ( ) , tokenUrl ) || ! isPrefixEqual ( loginInput . getAuthenticator ( ) , ssoUrl ) ) { logger . debug ( "The specified authenticator {} is not supported." , loginInput . getAuthenticator ( ) ) ; throw new SnowflakeSQLException ( SqlState . SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION , ErrorCode . IDP_CONNECTION_ERROR . getMessageCode ( ) ) ; } } catch ( MalformedURLException ex ) { handleFederatedFlowError ( loginInput , ex ) ; }
public class MOEADD { /** * Calculate the perpendicular distance between the solution and reference line */ public double calculateDistance ( S individual , double [ ] lambda , double [ ] z_ , double [ ] nz_ ) { } }
double scale ; double distance ; double [ ] vecInd = new double [ problem . getNumberOfObjectives ( ) ] ; double [ ] vecProj = new double [ problem . getNumberOfObjectives ( ) ] ; // normalize the weight vector ( line segment ) double nd = norm_vector ( lambda ) ; for ( int i = 0 ; i < problem . getNumberOfObjectives ( ) ; i ++ ) { lambda [ i ] = lambda [ i ] / nd ; } // vecInd has been normalized to the range [ 0,1] for ( int i = 0 ; i < problem . getNumberOfObjectives ( ) ; i ++ ) { vecInd [ i ] = ( individual . getObjective ( i ) - z_ [ i ] ) / ( nz_ [ i ] - z_ [ i ] ) ; } scale = innerproduct ( vecInd , lambda ) ; for ( int i = 0 ; i < problem . getNumberOfObjectives ( ) ; i ++ ) { vecProj [ i ] = vecInd [ i ] - scale * lambda [ i ] ; } distance = norm_vector ( vecProj ) ; return distance ;
public class Async { /** * Convert a synchronous action call into an asynchronous function call through an Observable . * < img width = " 640 " src = " https : / / raw . github . com / wiki / ReactiveX / RxJava / images / rx - operators / toAsync . ans . png " alt = " " > * @ param < T1 > the first parameter type * @ param < T2 > the second parameter type * @ param < T3 > the third parameter type * @ param < T4 > the fourth parameter type * @ param < T5 > the fifth parameter type * @ param < T6 > the sixth parameter type * @ param action the action to convert * @ param scheduler the Scheduler used to execute the { @ code action } * @ return a function that returns an Observable that executes the { @ code action } and emits { @ code null } * @ see < a href = " https : / / github . com / ReactiveX / RxJava / wiki / Async - Operators # wiki - toasync - or - asyncaction - or - asyncfunc " > RxJava Wiki : toAsync ( ) < / a > * @ see < a href = " http : / / msdn . microsoft . com / en - us / library / hh212138 . aspx " > MSDN : Observable . ToAsync < / a > */ public static < T1 , T2 , T3 , T4 , T5 , T6 > Func6 < T1 , T2 , T3 , T4 , T5 , T6 , Observable < Void > > toAsync ( final Action6 < ? super T1 , ? super T2 , ? super T3 , ? super T4 , ? super T5 , ? super T6 > action , final Scheduler scheduler ) { } }
return toAsync ( Actions . toFunc ( action ) , scheduler ) ;
public class MicroHessianInput { /** * Reads a long * < pre > * L b64 b56 b48 b40 b32 b24 b16 b8 * < / pre > */ public long readLong ( ) throws IOException { } }
int tag = is . read ( ) ; if ( tag != 'L' ) throw protocolException ( "expected long" ) ; long b64 = is . read ( ) ; long b56 = is . read ( ) ; long b48 = is . read ( ) ; long b40 = is . read ( ) ; long b32 = is . read ( ) ; long b24 = is . read ( ) ; long b16 = is . read ( ) ; long b8 = is . read ( ) ; return ( ( b64 << 56 ) + ( b56 << 48 ) + ( b48 << 40 ) + ( b40 << 32 ) + ( b32 << 24 ) + ( b24 << 16 ) + ( b16 << 8 ) + b8 ) ;
public class CreateFrameRecipe { /** * This function will be called by the REST API handler to initiate making * of the recipe . It returns a { @ link Job } which will hold the created frame * once it is finished . */ public Job < Frame > exec ( ) { } }
fillMissingParameters ( ) ; Job < Frame > job = new Job < > ( dest , Frame . class . getName ( ) , "CreateFrame:original" ) ; CreateFrameExecutor cfe = new CreateFrameExecutor ( job ) ; checkParametersValidity ( ) ; buildRecipe ( cfe ) ; checkParametersValidity2 ( cfe ) ; return job . start ( cfe , cfe . workAmount ( ) ) ;
public class CmsCategoryField { /** * Adds children item to the category tree and select the categories . < p > * @ param parent the parent item * @ param children the list of children * @ param selectedCategories the list of categories to select */ private void addChildren ( CmsTreeItem parent , List < CmsCategoryTreeEntry > children , Collection < String > selectedCategories ) { } }
if ( children != null ) { for ( CmsCategoryTreeEntry child : children ) { // set the category tree item and add to parent tree item CmsTreeItem treeItem ; boolean isPartofPath = false ; Iterator < String > it = selectedCategories . iterator ( ) ; while ( it . hasNext ( ) ) { String path = it . next ( ) ; if ( path . contains ( child . getPath ( ) ) ) { isPartofPath = true ; } } if ( isPartofPath ) { m_singleSidePath = child . getSitePath ( ) ; m_valuesSet ++ ; if ( m_selectParent || ! hasSelectedChildren ( child . getChildren ( ) , selectedCategories ) ) { m_allSidePath . add ( child . getSitePath ( ) ) ; treeItem = buildTreeItem ( child , selectedCategories , false ) ; } else { treeItem = buildTreeItem ( child , selectedCategories , true ) ; } addChildren ( treeItem , child . getChildren ( ) , selectedCategories ) ; parent . addChild ( treeItem ) ; } } }
public class Distribution { /** * Creates a { @ link Distribution } . * @ param count the count of the population values . * @ param sum the sum of the population values . * @ param sumOfSquaredDeviations the sum of squared deviations of the population values . * @ param bucketOptions the bucket options used to create a histogram for the distribution . * @ param buckets { @ link Bucket } s of a histogram . * @ return a { @ code Distribution } . * @ since 0.17 */ public static Distribution create ( long count , double sum , double sumOfSquaredDeviations , BucketOptions bucketOptions , List < Bucket > buckets ) { } }
Utils . checkArgument ( count >= 0 , "count should be non-negative." ) ; Utils . checkArgument ( sumOfSquaredDeviations >= 0 , "sum of squared deviations should be non-negative." ) ; if ( count == 0 ) { Utils . checkArgument ( sum == 0 , "sum should be 0 if count is 0." ) ; Utils . checkArgument ( sumOfSquaredDeviations == 0 , "sum of squared deviations should be 0 if count is 0." ) ; } Utils . checkNotNull ( bucketOptions , "bucketOptions" ) ; List < Bucket > bucketsCopy = Collections . unmodifiableList ( new ArrayList < Bucket > ( Utils . checkNotNull ( buckets , "buckets" ) ) ) ; Utils . checkListElementNotNull ( bucketsCopy , "bucket" ) ; return new AutoValue_Distribution ( count , sum , sumOfSquaredDeviations , bucketOptions , bucketsCopy ) ;
public class AxisShufflingVertexReader { /** * Returns the value of a given axis . * @ param x Input x value . * @ param y Input y value . * @ param z Input z value . * @ param axis Axis to return . * @ return The value of a given axis . */ private double getValue ( final double x , final double y , final double z , final Axis axis ) { } }
switch ( axis ) { case X : return x ; case X_INVERTED : return - x ; case Y : return y ; case Y_INVERTED : return - y ; case Z : return z ; case Z_INVERTED : return - z ; default : throw new IllegalArgumentException ( "Unsupported axis." ) ; }
public class CommitLogSegmentManager { /** * Differs from the above because it can work on any file instead of just existing * commit log segments managed by this manager . * @ param file segment file that is no longer in use . */ void recycleSegment ( final File file ) { } }
if ( isCapExceeded ( ) || CommitLogDescriptor . fromFileName ( file . getName ( ) ) . getMessagingVersion ( ) != MessagingService . current_version ) { // ( don ' t decrease managed size , since this was never a " live " segment ) logger . debug ( "(Unopened) segment {} is no longer needed and will be deleted now" , file ) ; FileUtils . deleteWithConfirm ( file ) ; return ; } logger . debug ( "Recycling {}" , file ) ; // this wasn ' t previously a live segment , so add it to the managed size when we make it live size . addAndGet ( DatabaseDescriptor . getCommitLogSegmentSize ( ) ) ; segmentManagementTasks . add ( new Callable < CommitLogSegment > ( ) { public CommitLogSegment call ( ) { return new CommitLogSegment ( file . getPath ( ) ) ; } } ) ;
public class BodyAndHeaderParser { /** * TODO : do we care about these being ` read - only ` ? It does ensure any underlying array is hidden . . . */ private ByteBuffer submitBuffer ( ByteBuffer in ) { } }
ByteBuffer out = in . asReadOnlyBuffer ( ) ; in . position ( in . limit ( ) ) ; return out ;
public class CFMLWriterWSPref { /** * prints the characters from the buffer and resets it * TODO : make sure that printBuffer ( ) is called at the end of the stream in case we have some * characters there ! ( flush ( ) ? ) */ synchronized void printBuffer ( ) throws IOException { } }
// TODO : is synchronized really needed here ? int len = sb . length ( ) ; if ( len > 0 ) { char [ ] chars = new char [ len ] ; sb . getChars ( 0 , len , chars , 0 ) ; sb . setLength ( 0 ) ; super . write ( chars , 0 , chars . length ) ; }
public class LuceneIndex { /** * Deletes all the { @ link Document } s . */ public void truncate ( ) { } }
Log . info ( "Truncating index" ) ; try { indexWriter . deleteAll ( ) ; } catch ( IOException e ) { Log . error ( e , "Error while truncating index" ) ; throw new RuntimeException ( e ) ; }
public class OnJqueryFunction { /** * creates a new { @ link OnJqueryFunction } instance * @ param selector The CSS selector for event delegation * @ return new { @ link OnJqueryFunction } instance */ public static OnJqueryFunction on ( final Attr events , final Attr selector , final Attr data , JavaScriptInlineFunction handler ) { } }
return new OnJqueryFunction ( events , selector , data , handler ) ;
public class AppServicePlansInner { /** * Restart all apps in an App Service plan . * Restart all apps in an App Service plan . * @ param resourceGroupName Name of the resource group to which the resource belongs . * @ param name Name of the App Service plan . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceResponse } object if successful . */ public Observable < Void > restartWebAppsAsync ( String resourceGroupName , String name ) { } }
return restartWebAppsWithServiceResponseAsync ( resourceGroupName , name ) . map ( new Func1 < ServiceResponse < Void > , Void > ( ) { @ Override public Void call ( ServiceResponse < Void > response ) { return response . body ( ) ; } } ) ;
public class ObjectExtensions { /** * The binary < code > + < / code > operator that concatenates two strings . * @ param a * an { @ link Object } . * @ param b * a { @ link String } . * @ return < code > a + b < / code > * @ since 2.3 */ @ Pure /* not guaranteed pure , since toString ( ) is invoked on the argument a */ @ Inline ( "($1 + $2)" ) public static String operator_plus ( Object a , String b ) { } }
return a + b ;
public class SourceNode { /** * Like ` String . prototype . join ` except for SourceNodes . Inserts ` aStr ` between each of ` this . children ` . * @ param aSep * The separator . */ public void join ( String aSep ) { } }
List < Object > newChildren ; int i ; int len = this . children . size ( ) ; if ( len > 0 ) { newChildren = new ArrayList < > ( ) ; for ( i = 0 ; i < len - 1 ; i ++ ) { newChildren . add ( this . children . get ( i ) ) ; newChildren . add ( aSep ) ; } newChildren . add ( this . children . get ( i ) ) ; this . children = newChildren ; }
public class PorterStemmer { /** * / * doublec ( j ) is true < = > j , ( j - 1 ) contain a double consonant . */ private final boolean doublec ( int j ) { } }
if ( j < 1 ) return false ; if ( sb . charAt ( j ) != sb . charAt ( j - 1 ) ) return false ; return cons ( j ) ;
public class WebSocket { /** * 给自身发送消息 , 消息类型是Object [ ] * @ param mapconvable 是否convertMapTo * @ param message 不可为空 , 只能是String或byte [ ] 或可JavaBean对象 , 或Object [ ] * @ param last 是否最后一条 * @ return 0表示成功 , 非0表示错误码 */ private CompletableFuture < Integer > send ( boolean mapconvable , Object message , boolean last ) { } }
if ( message instanceof CompletableFuture ) { return ( ( CompletableFuture ) message ) . thenCompose ( ( json ) -> { if ( json == null || json instanceof CharSequence || json instanceof byte [ ] ) { return sendPacket ( new WebSocketPacket ( ( Serializable ) json , last ) ) ; } else if ( message instanceof WebSocketPacket ) { return sendPacket ( ( WebSocketPacket ) message ) ; } else { return sendPacket ( new WebSocketPacket ( getSendConvert ( ) , mapconvable , json , last ) ) ; }
public class OWLAnnotationImpl { /** * Determines if this annotation is an annotation used to deprecate an IRI . * This is the case if the annotation property has an IRI of * { @ code owl : deprecated } and the value of the annotation is * { @ code " true " ^ ^ xsd : boolean } * @ return { @ code true } if this annotation is an annotation that can be used * to deprecate an IRI , otherwise { @ code false } . */ @ Override public boolean isDeprecatedIRIAnnotation ( ) { } }
return property . isDeprecated ( ) && value instanceof OWLLiteral && ( ( OWLLiteral ) value ) . isBoolean ( ) && ( ( OWLLiteral ) value ) . parseBoolean ( ) ;
public class Transport { /** * Connect the transport * @ param timeout * @ return whether the transport was connected * @ throws TransportException */ public synchronized boolean connect ( long timeout ) throws TransportException { } }
int st = this . state ; try { switch ( st ) { case 0 : break ; case 1 : // already connecting this . thread . wait ( timeout ) ; /* wait for doConnect */ st = this . state ; switch ( st ) { case 1 : /* doConnect never returned */ this . state = 6 ; cleanupThread ( timeout ) ; throw new ConnectionTimeoutException ( "Connection timeout" ) ; case 2 : if ( this . te != null ) { /* doConnect throw Exception */ this . state = 4 ; /* error */ cleanupThread ( timeout ) ; throw this . te ; } this . state = 3 ; /* Success ! */ return true ; } break ; case 3 : return true ; // already connected case 4 : this . state = 6 ; throw new TransportException ( "Connection in error" , this . te ) ; case 5 : case 6 : log . debug ( "Trying to connect a disconnected transport" ) ; return false ; default : TransportException tex = new TransportException ( "Invalid state: " + st ) ; throw tex ; } if ( log . isDebugEnabled ( ) ) { log . debug ( "Connecting " + this . name ) ; } this . state = 1 ; this . te = null ; Thread t = new Thread ( this , this . name ) ; t . setDaemon ( true ) ; this . thread = t ; synchronized ( this . thread ) { t . start ( ) ; t . wait ( timeout ) ; /* wait for doConnect */ st = this . state ; switch ( st ) { case 1 : /* doConnect never returned */ this . state = 6 ; throw new ConnectionTimeoutException ( "Connection timeout" ) ; case 2 : if ( this . te != null ) { /* doConnect throw Exception */ this . state = 4 ; /* error */ throw this . te ; } this . state = 3 ; /* Success ! */ return true ; case 3 : return true ; default : return false ; } } } catch ( ConnectionTimeoutException e ) { cleanupThread ( timeout ) ; // allow to retry the connection this . state = 0 ; throw e ; } catch ( InterruptedException ie ) { this . state = 6 ; cleanupThread ( timeout ) ; throw new TransportException ( ie ) ; } catch ( TransportException e ) { cleanupThread ( timeout ) ; throw e ; } finally { /* * This guarantees that we leave in a valid state */ st = this . state ; if ( st != 0 && st != 3 && st != 4 && st != 5 && st != 6 ) { log . error ( "Invalid state: " + st ) ; this . state = 6 ; cleanupThread ( timeout ) ; } }
public class Transform3D { /** * Multiply this matrix by the tuple t and place the result back into the * tuple ( t = this * t ) . * @ param t * the tuple to be multiplied by this matrix and then replaced */ public void transform ( Tuple3D < ? > t ) { } }
double x , y , z ; x = this . m00 * t . getX ( ) + this . m01 * t . getY ( ) + this . m02 * t . getZ ( ) + this . m03 ; y = this . m10 * t . getX ( ) + this . m11 * t . getY ( ) + this . m12 * t . getZ ( ) + this . m13 ; z = this . m20 * t . getX ( ) + this . m21 * t . getY ( ) + this . m22 * t . getZ ( ) + this . m23 ; t . set ( x , y , z ) ;
public class Grid { /** * Method declaration * @ param i */ private void calcAutoWidth ( int i ) { } }
int w = 10 ; w = Math . max ( w , fMetrics . stringWidth ( sColHead [ i ] ) ) ; for ( int j = 0 ; j < iRowCount ; j ++ ) { String [ ] s = ( String [ ] ) ( vData . elementAt ( j ) ) ; w = Math . max ( w , fMetrics . stringWidth ( s [ i ] ) ) ; } iColWidth [ i ] = w + 6 ;
public class TraceMethod { /** * Stops the elapsed ( cpu ) time since creation of this TraceMethod instance . */ public void stopTime ( ) { } }
this . elapsedTime = System . currentTimeMillis ( ) - this . startTime ; this . elapsedCpuTime = ( ManagementFactory . getThreadMXBean ( ) . getCurrentThreadCpuTime ( ) - this . startCpuTime ) / 1000000 ;
public class HString { /** * Finds all matches to the given token regular expression in this HString . * @ param regex the token regex to match * @ return Stream of matches */ public Stream < HString > findAllPatterns ( @ NonNull TokenRegex regex ) { } }
return Streams . asStream ( new Iterator < HString > ( ) { TokenMatcher m = regex . matcher ( HString . this ) ; HString nextMatch = null ; private boolean advance ( ) { if ( nextMatch == null && m . find ( ) ) { nextMatch = m . group ( ) ; } return nextMatch != null ; } @ Override public boolean hasNext ( ) { return advance ( ) ; } @ Override public HString next ( ) { if ( ! advance ( ) ) { throw new NoSuchElementException ( ) ; } HString toReturn = nextMatch ; nextMatch = null ; return toReturn ; } } ) ;
public class VisLmlSyntax { /** * InputValidator implementations ' attributes . */ protected void registerValidatorAttributes ( ) { } }
// CustomValidator : addAttributeProcessor ( new CustomValidatorLmlAttribute ( ) , "validator" , "validate" , "method" , "action" , "check" ) ; // FormInputValidator : addAttributeProcessor ( new ErrorMessageLmlAttribute ( ) , "error" , "errorMsg" , "errorMessage" , "formError" ) ; addAttributeProcessor ( new HideOnEmptyInputLmlAttribute ( ) , "hideOnEmpty" , "hideErrorOnEmpty" ) ; // GreaterThanValidator : addAttributeProcessor ( new GreaterOrEqualLmlAttribute ( ) , "orEqual" , "allowEqual" , "greaterOrEqual" ) ; addAttributeProcessor ( new GreaterThanLmlAttribute ( ) , "value" , "min" , "greaterThan" ) ; // LesserThanValidator : addAttributeProcessor ( new LesserOrEqualLmlAttribute ( ) , "orEqual" , "allowEqual" , "lesserOrEqual" ) ; addAttributeProcessor ( new LesserThanLmlAttribute ( ) , "value" , "max" , "lesserThan" ) ; // FormValidator ( VisFormTable ) : addAttributeProcessor ( new FormSuccessMessageLmlAttribute ( ) , "success" , "successMsg" , "successMessage" ) ; addAttributeProcessor ( new TreatDisabledFieldsAsValidLmlAttribute ( ) , "treatDisabledFieldsAsValid" , "disabledValid" ) ; // FormValidator children : addAttributeProcessor ( new DisableOnFormErrorLmlAttribute ( ) , "disableOnError" , "disableOnFormError" , "formDisable" ) ; addAttributeProcessor ( new ErrorMessageLabelLmlAttribute ( ) , "errorMessage" , "errorLabel" , "errorMsgLabel" , "errorMessageLabel" ) ; addAttributeProcessor ( new RequireCheckedLmlAttribute ( ) , "requireChecked" , "formChecked" , "notCheckedError" , "uncheckedError" ) ; addAttributeProcessor ( new RequireUncheckedLmlAttribute ( ) , "requireUnchecked" , "requireNotChecked" , "formUnchecked" , "checkedError" ) ;
public class LevelRangeFilter { /** * Return the decision of this filter . */ public int decide ( LoggingEvent event ) { } }
if ( this . levelMin != null && event . getLevel ( ) . isGreaterOrEqual ( levelMin ) == false ) { // level of event is less than minimum return Filter . DENY ; } if ( this . levelMax != null && event . getLevel ( ) . toInt ( ) > levelMax . toInt ( ) ) { // level of event is greater than maximum // Alas , there is no Level . isGreater method . and using // a combo of isGreaterOrEqual & & ! Equal seems worse than // checking the int values of the level objects . . return Filter . DENY ; } if ( acceptOnMatch ) { // this filter set up to bypass later filters and always return // accept if level in range return Filter . ACCEPT ; } else { // event is ok for this filter ; allow later filters to have a look . . return Filter . NEUTRAL ; }
public class ServiceBasedAppLauncher { /** * Starts the { @ link ApplicationLauncher } by starting all associated services . This method also adds a shutdown hook * that invokes { @ link # stop ( ) } and the { @ link # close ( ) } methods . So { @ link # stop ( ) } and { @ link # close ( ) } need not be * called explicitly ; they can be triggered during the JVM shutdown . */ @ Override public synchronized void start ( ) { } }
if ( this . hasStarted ) { LOG . warn ( "ApplicationLauncher has already started" ) ; return ; } this . hasStarted = true ; this . serviceManager = new ServiceManager ( this . services ) ; // A listener that shutdowns the application if any service fails . this . serviceManager . addListener ( new ServiceManager . Listener ( ) { @ Override public void failure ( Service service ) { super . failure ( service ) ; LOG . error ( String . format ( "Service %s has failed." , service . getClass ( ) . getSimpleName ( ) ) , service . failureCause ( ) ) ; try { service . stopAsync ( ) ; ServiceBasedAppLauncher . this . stop ( ) ; } catch ( ApplicationException ae ) { LOG . error ( "Could not shutdown services gracefully. This may cause the application to hang." ) ; } } } ) ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ) { @ Override public void run ( ) { try { ServiceBasedAppLauncher . this . stop ( ) ; } catch ( ApplicationException e ) { LOG . error ( "Failed to shutdown application" , e ) ; } finally { try { ServiceBasedAppLauncher . this . close ( ) ; } catch ( IOException e ) { LOG . error ( "Failed to close application" , e ) ; } } } } ) ; LOG . info ( "Starting the Gobblin application and all its associated Services" ) ; // Start the application this . serviceManager . startAsync ( ) . awaitHealthy ( ) ;
public class LongStreamEx { /** * Returns a { @ code LongStreamEx } object which wraps given * { @ link LongStream } . * The supplied stream must not be consumed or closed when this method is * called . No operation must be performed on the supplied stream after it ' s * wrapped . * @ param stream original stream * @ return the wrapped stream * @ since 0.0.8 */ public static LongStreamEx of ( LongStream stream ) { } }
return stream instanceof LongStreamEx ? ( LongStreamEx ) stream : new LongStreamEx ( stream , StreamContext . of ( stream ) ) ;
public class BeanUtils { /** * 设置属性 * @ param bean 对象 * @ param name 属性名 * @ param clazz 设置值的类 * @ param value 属性值 * @ param < T > 和值对应的类型 * @ throws Exception 设值异常 */ public static < T > void setProperty ( Object bean , String name , Class < T > clazz , T value ) throws Exception { } }
Method method = ReflectUtils . getPropertySetterMethod ( bean . getClass ( ) , name , clazz ) ; if ( method . isAccessible ( ) ) { method . invoke ( bean , value ) ; } else { try { method . setAccessible ( true ) ; method . invoke ( bean , value ) ; } finally { method . setAccessible ( false ) ; } }
public class UnsafeRow { /** * Creates an empty UnsafeRow from a byte array with specified numBytes and numFields . * The returned row is invalid until we call copyFrom on it . */ public static UnsafeRow createFromByteArray ( int numBytes , int numFields ) { } }
final UnsafeRow row = new UnsafeRow ( numFields ) ; row . pointTo ( new byte [ numBytes ] , numBytes ) ; return row ;
public class SubmissionUtils { /** * Re - creates the approved document submitted by the client from * the submission document . * @ param submissionDoc Submission document from the submission database * @ return Document submitted by user for update */ static public JSONObject getApprovedDocumentFromSubmission ( JSONObject submissionDoc ) throws Exception { } }
JSONObject submissionInfo = submissionDoc . getJSONObject ( "nunaliit_submission" ) ; // Check if an approved version of the document is available JSONObject doc = submissionInfo . optJSONObject ( "approved_doc" ) ; if ( null != doc ) { JSONObject reserved = submissionInfo . optJSONObject ( "approved_reserved" ) ; return recreateDocumentFromDocAndReserved ( doc , reserved ) ; } else { // Use submission doc = submissionInfo . getJSONObject ( "submitted_doc" ) ; JSONObject reserved = submissionInfo . optJSONObject ( "submitted_reserved" ) ; return recreateDocumentFromDocAndReserved ( doc , reserved ) ; }
public class LoadBasedAutoScalingConfigurationMarshaller { /** * Marshall the given parameter object . */ public void marshall ( LoadBasedAutoScalingConfiguration loadBasedAutoScalingConfiguration , ProtocolMarshaller protocolMarshaller ) { } }
if ( loadBasedAutoScalingConfiguration == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( loadBasedAutoScalingConfiguration . getLayerId ( ) , LAYERID_BINDING ) ; protocolMarshaller . marshall ( loadBasedAutoScalingConfiguration . getEnable ( ) , ENABLE_BINDING ) ; protocolMarshaller . marshall ( loadBasedAutoScalingConfiguration . getUpScaling ( ) , UPSCALING_BINDING ) ; protocolMarshaller . marshall ( loadBasedAutoScalingConfiguration . getDownScaling ( ) , DOWNSCALING_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class BuildFilterProviderData { /** * Builder */ public static < T > BuildFilterProviderData < T > of ( BuildFilterProvider < T > provider , T data ) { } }
return new BuildFilterProviderData < > ( provider , data ) ;
public class StringUtils { /** * Computes the longest common substring of s and t . * The longest common substring of a and b is the longest run of * characters that appear in order inside both a and b . Both a and b * may have other extraneous characters along the way . This is like * edit distance but with no substitution and a higher number means * more similar . For example , the LCS of " abcD " and " aXbc " is 3 ( abc ) . */ public static int longestCommonSubstring ( String s , String t ) { } }
int [ ] [ ] d ; // matrix int n ; // length of s int m ; // length of t int i ; // iterates through s int j ; // iterates through t // int cost ; / / cost // Step 1 n = s . length ( ) ; m = t . length ( ) ; if ( n == 0 ) { return 0 ; } if ( m == 0 ) { return 0 ; } d = new int [ n + 1 ] [ m + 1 ] ; // Step 2 for ( i = 0 ; i <= n ; i ++ ) { d [ i ] [ 0 ] = 0 ; } for ( j = 0 ; j <= m ; j ++ ) { d [ 0 ] [ j ] = 0 ; } // Step 3 for ( i = 1 ; i <= n ; i ++ ) { char s_i = s . charAt ( i - 1 ) ; // ith character of s // Step 4 for ( j = 1 ; j <= m ; j ++ ) { char t_j = t . charAt ( j - 1 ) ; // jth character of t // Step 5 // js : if the chars match , you can get an extra point // otherwise you have to skip an insertion or deletion ( no subs ) if ( s_i == t_j ) { d [ i ] [ j ] = SloppyMath . max ( d [ i - 1 ] [ j ] , d [ i ] [ j - 1 ] , d [ i - 1 ] [ j - 1 ] + 1 ) ; } else { d [ i ] [ j ] = Math . max ( d [ i - 1 ] [ j ] , d [ i ] [ j - 1 ] ) ; } } } /* / / num chars needed to display longest num int numChars = ( int ) Math . ceil ( Math . log ( d [ n ] [ m ] ) / Math . log ( 10 ) ) ; for ( i = 0 ; i < numChars + 3 ; i + + ) { System . err . print ( ' ' ) ; for ( j = 0 ; j < m ; j + + ) { System . err . print ( t . charAt ( j ) + " " ) ; System . err . println ( ) ; for ( i = 0 ; i < = n ; i + + ) { System . err . print ( ( i = = 0 ? ' ' : s . charAt ( i - 1 ) ) + " " ) ; for ( j = 0 ; j < = m ; j + + ) { System . err . print ( d [ i ] [ j ] + " " ) ; System . err . println ( ) ; */ // Step 7 return d [ n ] [ m ] ;
public class XlsWorksheet { /** * Returns the sheets from the given Excel XLS file . * @ param file The file with the name of the XLS file * @ return The sheet names from the XLS file * @ throws IOException if the file cannot be opened * @ throws jxl . read . biff . BiffException if there is a format error with the file */ public static String [ ] getXlsWorksheets ( File file ) throws IOException , jxl . read . biff . BiffException { } }
Workbook workbook = Workbook . getWorkbook ( file ) ; String [ ] sheets = workbook . getSheetNames ( ) ; workbook . close ( ) ; return sheets ;
public class Vectors { /** * Creates a plus function that adds given { @ code value } to it ' s argument . * @ param arg a value to be added to function ' s argument * @ return a closure object that does { @ code _ + _ } */ public static VectorFunction asPlusFunction ( final double arg ) { } }
return new VectorFunction ( ) { @ Override public double evaluate ( int i , double value ) { return value + arg ; } } ;
public class ResourceUtils { /** * Determine whether the given URL points to a resource in the file system , * that is , has protocol " file " or " vfs " . * @ param url the URL to check * @ return whether the URL has been identified as a file system URL */ public static boolean isFileURL ( URL url ) { } }
String protocol = url . getProtocol ( ) ; return ( URL_PROTOCOL_FILE . equals ( protocol ) || protocol . startsWith ( URL_PROTOCOL_VFS ) ) ;
public class MkAppTree { /** * Creates a new directory entry representing the specified node . * @ param node the node to be represented by the new entry * @ param routingObjectID the id of the routing object of the node * @ param parentDistance the distance from the routing object of the node to * the routing object of the parent node */ @ Override protected MkAppEntry createNewDirectoryEntry ( MkAppTreeNode < O > node , DBID routingObjectID , double parentDistance ) { } }
return new MkAppDirectoryEntry ( routingObjectID , parentDistance , node . getPageID ( ) , node . coveringRadiusFromEntries ( routingObjectID , this ) , null ) ;
public class DCacheBase { /** * This is a helper method to add pre - invalidation listener to all entries . */ public synchronized boolean addPreInvalidationListener ( PreInvalidationListener listener ) { } }
if ( bEnableListener && listener != null ) { if ( eventSource . getPreInvalidationListenerCount ( ) > 0 && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "addPreInvalidationListener() cacheName=" + this . cacheName + " one already exists. Overwriting old listener." ) ; } eventSource . addListener ( listener ) ; return true ; } return false ;
public class Strings { /** * Converts a word sequence into a single camel - case word that starts with a lowercase letter . * @ param text - a word sequence with the given separator * @ param separator - a word separator * @ return a single camel - case word */ public static String toLowerCamelCase ( String text , char separator ) { } }
char [ ] chars = text . toCharArray ( ) ; int base = 0 , top = 0 ; do { while ( top < chars . length && chars [ top ] != separator ) { chars [ base ++ ] = Character . toLowerCase ( chars [ top ++ ] ) ; } while ( top < chars . length && chars [ top ] == separator ) { ++ top ; } if ( top < chars . length ) { chars [ base ++ ] = Character . toUpperCase ( chars [ top ++ ] ) ; } } while ( top < chars . length ) ; return new String ( chars , 0 , base ) ;
public class DiSHPreferenceVectorIndex { /** * Determines the preference vector with the apriori strategy . * @ param relation the database storing the objects * @ param neighborIDs the list of ids of the neighbors in each dimension * @ param msg a string buffer for debug messages * @ return the preference vector */ private long [ ] determinePreferenceVectorByApriori ( Relation < V > relation , ModifiableDBIDs [ ] neighborIDs , StringBuilder msg ) { } }
int dimensionality = neighborIDs . length ; // database for apriori UpdatableDatabase apriori_db = new HashmapDatabase ( ) ; SimpleTypeInformation < ? > bitmeta = VectorFieldTypeInformation . typeRequest ( BitVector . class , dimensionality , dimensionality ) ; for ( DBIDIter it = relation . iterDBIDs ( ) ; it . valid ( ) ; it . advance ( ) ) { long [ ] bits = BitsUtil . zero ( dimensionality ) ; boolean allFalse = true ; for ( int d = 0 ; d < dimensionality ; d ++ ) { if ( neighborIDs [ d ] . contains ( it ) ) { BitsUtil . setI ( bits , d ) ; allFalse = false ; } } if ( ! allFalse ) { SingleObjectBundle oaa = new SingleObjectBundle ( ) ; oaa . append ( bitmeta , new BitVector ( bits , dimensionality ) ) ; apriori_db . insert ( oaa ) ; } } APRIORI apriori = new APRIORI ( minpts ) ; FrequentItemsetsResult aprioriResult = apriori . run ( apriori_db ) ; // result of apriori List < Itemset > frequentItemsets = aprioriResult . getItemsets ( ) ; if ( msg != null ) { msg . append ( "\n Frequent itemsets: " ) . append ( frequentItemsets ) ; } int maxSupport = 0 , maxCardinality = 0 ; long [ ] preferenceVector = BitsUtil . zero ( dimensionality ) ; for ( Itemset itemset : frequentItemsets ) { if ( ( maxCardinality < itemset . length ( ) ) || ( maxCardinality == itemset . length ( ) && maxSupport == itemset . getSupport ( ) ) ) { preferenceVector = Itemset . toBitset ( itemset , BitsUtil . zero ( dimensionality ) ) ; maxCardinality = itemset . length ( ) ; maxSupport = itemset . getSupport ( ) ; } } if ( msg != null ) { msg . append ( "\n preference " ) . append ( BitsUtil . toStringLow ( preferenceVector , dimensionality ) ) . append ( '\n' ) ; LOG . debugFine ( msg . toString ( ) ) ; } return preferenceVector ;
public class ConfigurationImpl { /** * { @ inheritDoc } */ public boolean validOptions ( String options [ ] [ ] , DocErrorReporter reporter ) { } }
boolean helpfile = false ; boolean nohelp = false ; boolean overview = false ; boolean nooverview = false ; boolean splitindex = false ; boolean noindex = false ; boolean hasSystemName = false ; boolean hasBranchName = false ; // check shared options if ( ! generalValidOptions ( options , reporter ) ) { return false ; } // otherwise look at our options for ( int oi = 0 ; oi < options . length ; ++ oi ) { String [ ] os = options [ oi ] ; String opt = os [ 0 ] . toLowerCase ( ) ; if ( opt . equals ( "-helpfile" ) ) { if ( nohelp == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-helpfile" , "-nohelp" ) ) ; return false ; } if ( helpfile == true ) { reporter . printError ( getText ( "doclet.Option_reuse" , "-helpfile" ) ) ; return false ; } File help = new File ( os [ 1 ] ) ; if ( ! help . exists ( ) ) { reporter . printError ( getText ( "doclet.File_not_found" , os [ 1 ] ) ) ; return false ; } helpfile = true ; } else if ( opt . equals ( "-nohelp" ) ) { if ( helpfile == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-nohelp" , "-helpfile" ) ) ; return false ; } nohelp = true ; } else if ( opt . equals ( "-dubboconfigpath" ) ) { File dubboConfig = new File ( os [ 1 ] ) ; if ( ! dubboConfig . exists ( ) ) { reporter . printError ( getText ( "doclet.File_not_found" , os [ 1 ] ) ) ; return false ; } } else if ( opt . equals ( "-springcontextconfigpath" ) ) { File springContextConfigPath = new File ( os [ 1 ] ) ; if ( ! springContextConfigPath . exists ( ) ) { reporter . printError ( getText ( "doclet.File_not_found" , os [ 1 ] ) ) ; return false ; } } else if ( opt . equals ( "-xdocrootparent" ) ) { try { new URL ( os [ 1 ] ) ; } catch ( MalformedURLException e ) { reporter . printError ( getText ( "doclet.MalformedURL" , os [ 1 ] ) ) ; return false ; } } else if ( opt . equals ( "-overview" ) ) { if ( nooverview == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-overview" , "-nooverview" ) ) ; return false ; } if ( overview == true ) { reporter . printError ( getText ( "doclet.Option_reuse" , "-overview" ) ) ; return false ; } overview = true ; } else if ( opt . equals ( "-nooverview" ) ) { if ( overview == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-nooverview" , "-overview" ) ) ; return false ; } nooverview = true ; } else if ( opt . equals ( "-splitindex" ) ) { if ( noindex == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-splitindex" , "-noindex" ) ) ; return false ; } splitindex = true ; } else if ( opt . equals ( "-noindex" ) ) { if ( splitindex == true ) { reporter . printError ( getText ( "doclet.Option_conflict" , "-noindex" , "-splitindex" ) ) ; return false ; } noindex = true ; } else if ( opt . equals ( "-systemname" ) ) { hasSystemName = true ; } else if ( opt . equals ( "-branchname" ) ) { hasBranchName = true ; } else if ( opt . equals ( "-codeurl" ) ) { hasBranchName = true ; } } if ( ! hasBranchName ) { reporter . printError ( "No branchName specified!" ) ; return false ; } if ( ! hasSystemName ) { reporter . printError ( "No systemName specified!" ) ; return false ; } return true ;
public class PropertiesUtil { /** * Returns a filtered version of the specified properties that first looks for a property * starting with the given prefix , then looks for a property without the prefix . For example , * passing the prefix " alt " and using the following properties : * < pre > * alt . texture = sand . png * lighting = off * < / pre > * . . . would return " sand . png " for the property " texture " and " off " for the property " lighting " . * Unlike { @ link # getSubProperties } , the object returned by this method references , rather than * copies , the underlying properties . Only the { @ link Properties # getProperty } methods are * guaranteed to work correctly on the returned object . */ public static Properties getFilteredProperties ( final Properties source , String prefix ) { } }
final String dprefix = prefix + "." ; return new Properties ( ) { @ Override public String getProperty ( String key ) { return getProperty ( key , null ) ; } @ Override public String getProperty ( String key , String defaultValue ) { return source . getProperty ( dprefix + key , source . getProperty ( key , defaultValue ) ) ; } @ Override public Enumeration < ? > propertyNames ( ) { return new Enumeration < Object > ( ) { public boolean hasMoreElements ( ) { return next != null ; } public Object nextElement ( ) { Object onext = next ; next = findNext ( ) ; return onext ; } protected Object findNext ( ) { while ( senum . hasMoreElements ( ) ) { String name = ( String ) senum . nextElement ( ) ; if ( ! name . startsWith ( dprefix ) ) { return name ; } name = name . substring ( dprefix . length ( ) ) ; if ( ! source . containsKey ( name ) ) { return name ; } } return null ; } protected Enumeration < ? > senum = source . propertyNames ( ) ; protected Object next = findNext ( ) ; } ; } } ;
public class CompletionStageFactory { /** * Returns a new CompletionStage that is asynchronously completed * by a task running in the given executor with the value obtained * by calling the given Supplier . Subsequent completion stages will * use defaultAsyncExecutor as their default executor . * @ param supplier a function returning the value to be used * to complete the returned CompletionStage * @ param executor the executor to use for asynchronous execution * @ param < U > the function ' s return type * @ return the new CompletionStage */ public final < U > CompletionStage < U > supplyAsync ( Supplier < U > supplier , Executor executor ) { } }
Objects . requireNonNull ( supplier , "supplier must not be null" ) ; return completedStage ( null ) . thenApplyAsync ( ( ignored ) -> supplier . get ( ) , executor ) ;
public class OpusFile { /** * In Reading mode , will close the underlying ogg * file and free its resources . * In Writing mode , will write out the Info and * Tags objects , and then the audio data . */ public void close ( ) throws IOException { } }
if ( r != null ) { r = null ; ogg . close ( ) ; ogg = null ; } if ( w != null ) { w . bufferPacket ( info . write ( ) , true ) ; w . bufferPacket ( tags . write ( ) , false ) ; // The Granule Position on each Ogg Page needs to be // the total number of PCM samples , including the last // full Opus Packet in the page . // See https : / / wiki . xiph . org / OggOpus # Granule _ Position final List < OpusAudioData > packets = writtenPackets ; final int packetsSize = packets . size ( ) ; final int maxPacketsPerPage = this . maxPacketsPerPage ; OpusAudioData packet ; int pageSize = 0 ; int pageSamples = 0 ; long lastGranule = 0 ; boolean doneFlush = false ; boolean flushAfter = false ; for ( int i = 0 ; i < packetsSize ; i ++ ) { packet = packets . get ( i ) ; flushAfter = false ; pageSize ++ ; // Should we flush before this packet ? if ( maxPacketsPerPage == - 1 ) { // User is handling granule positions // Do we need to flush for them ? if ( packet . getGranulePosition ( ) >= 0 && lastGranule != packet . getGranulePosition ( ) ) { w . flush ( ) ; lastGranule = packet . getGranulePosition ( ) ; w . setGranulePosition ( lastGranule ) ; doneFlush = true ; } } else { // We are doing the granule position // Will we need to flush after this packet ? if ( pageSize >= maxPacketsPerPage ) { flushAfter = true ; } // Calculate the packet granule pageSamples += packet . getNumberOfSamples ( ) ; packet . setGranulePosition ( lastGranule + pageSamples ) ; } // Write the data , flushing if needed w . bufferPacket ( packet . write ( ) ) ; if ( flushAfter || w . getSizePendingFlush ( ) > 16384 ) { lastGranule = packet . getGranulePosition ( ) ; w . setGranulePosition ( lastGranule ) ; if ( i != packetsSize - 1 ) { w . flush ( ) ; doneFlush = true ; } } if ( doneFlush ) { pageSize = 0 ; pageSamples = 0 ; } } w . close ( ) ; w = null ; ogg . close ( ) ; ogg = null ; }
public class BezierCurve { /** * Create second derivative function for fixed control points . * @ param controlPoints * @ return */ private ParameterizedOperator secondDerivative ( double ... controlPoints ) { } }
if ( controlPoints . length != 2 * length ) { throw new IllegalArgumentException ( "control-points length not " + length ) ; } return secondDerivative ( controlPoints , 0 ) ;
public class NetworkConfig { /** * Returns a new NetworkConfig instance and populates it from the specified JSON object * @ param jsonConfig The JSON object containing the config details * @ return A populated NetworkConfig instance * @ throws InvalidArgumentException */ private static NetworkConfig load ( JsonObject jsonConfig ) throws InvalidArgumentException , NetworkConfigurationException { } }
// Sanity check if ( jsonConfig == null ) { throw new InvalidArgumentException ( "config must be specified" ) ; } return new NetworkConfig ( jsonConfig ) ;
public class XParserRegistry { /** * / * ( non - Javadoc ) * @ see org . deckfour . xes . util . XRegistry # areEqual ( java . lang . Object , java . lang . Object ) */ @ Override protected boolean areEqual ( XParser a , XParser b ) { } }
return a . getClass ( ) . equals ( b . getClass ( ) ) ;
public class CodedOutputStream { /** * Write a { @ code uint64 } field , including tag , to the stream . */ public void writeUInt64 ( final int fieldNumber , final long value ) throws IOException { } }
writeTag ( fieldNumber , WireFormat . WIRETYPE_VARINT ) ; writeUInt64NoTag ( value ) ;
public class AbstractHttp2ConnectionHandlerBuilder { /** * Create a new { @ link Http2ConnectionHandler } . */ protected T build ( ) { } }
if ( encoder != null ) { assert decoder != null ; return buildFromCodec ( decoder , encoder ) ; } Http2Connection connection = this . connection ; if ( connection == null ) { connection = new DefaultHttp2Connection ( isServer ( ) , maxReservedStreams ( ) ) ; } return buildFromConnection ( connection ) ;
public class BaseSessionHolder { /** * If this is a session , convert to a proxy session and return , if object , convert and return . * @ param out The return output stream . * @ param strReturn The string to return . */ public void setReturnSessionOrObject ( PrintWriter out , Object objReturn ) { } }
String strID = null ; String strSessionClass = null ; if ( objReturn instanceof RemoteTable ) { strSessionClass = REMOTE_TABLE ; strID = this . add ( new TableHolder ( this , ( RemoteTable ) objReturn ) ) ; } else if ( objReturn instanceof RemoteSession ) { strSessionClass = REMOTE_SESSION ; strID = this . add ( new SessionHolder ( this , ( RemoteSession ) objReturn ) ) ; } else if ( objReturn instanceof RemoteBaseSession ) { strSessionClass = REMOTE_BASE_SESSION ; strID = this . add ( new BaseSessionHolder ( this , ( RemoteBaseSession ) objReturn ) ) ; } if ( strID != null ) this . setReturnString ( out , strSessionClass + CLASS_SEPARATOR + strID ) ; else this . setReturnObject ( out , objReturn ) ;
public class RemovePreferenceHeaderDialogBuilder { /** * Creates and returns a listener , which allows to notify the registered listener , when the user * closes the dialog confirmatively . * @ return The listener , which has been created , as an instance of the type { @ link * OnClickListener } */ private OnClickListener createRemovePreferenceHeaderClickListener ( ) { } }
return new OnClickListener ( ) { @ Override public void onClick ( final DialogInterface dialog , final int which ) { int position = spinner . getSelectedItemPosition ( ) ; listener . onRemovePreferenceHeader ( position ) ; } } ;
public class DescribeAssessmentTemplatesResult { /** * Information about the assessment templates . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setAssessmentTemplates ( java . util . Collection ) } or { @ link # withAssessmentTemplates ( java . util . Collection ) } * if you want to override the existing values . * @ param assessmentTemplates * Information about the assessment templates . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeAssessmentTemplatesResult withAssessmentTemplates ( AssessmentTemplate ... assessmentTemplates ) { } }
if ( this . assessmentTemplates == null ) { setAssessmentTemplates ( new java . util . ArrayList < AssessmentTemplate > ( assessmentTemplates . length ) ) ; } for ( AssessmentTemplate ele : assessmentTemplates ) { this . assessmentTemplates . add ( ele ) ; } return this ;
public class CPAttachmentFileEntryPersistenceImpl { /** * Returns a range of all the cp attachment file entries where classNameId = & # 63 ; and classPK = & # 63 ; and displayDate & lt ; & # 63 ; and status = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPAttachmentFileEntryModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param classNameId the class name ID * @ param classPK the class pk * @ param displayDate the display date * @ param status the status * @ param start the lower bound of the range of cp attachment file entries * @ param end the upper bound of the range of cp attachment file entries ( not inclusive ) * @ return the range of matching cp attachment file entries */ @ Override public List < CPAttachmentFileEntry > findByC_C_LtD_S ( long classNameId , long classPK , Date displayDate , int status , int start , int end ) { } }
return findByC_C_LtD_S ( classNameId , classPK , displayDate , status , start , end , null ) ;
public class Broker { /** * create a broker with given broker info * @ param id broker id * @ param brokerInfoString broker info format : < b > creatorId : host : port : autocreated < / b > * @ return broker instance with connection config * @ see # getZKString ( ) */ public static Broker createBroker ( int id , String brokerInfoString ) { } }
String [ ] brokerInfo = brokerInfoString . split ( ":" ) ; String creator = brokerInfo [ 0 ] . replace ( '#' , ':' ) ; String hostname = brokerInfo [ 1 ] . replace ( '#' , ':' ) ; String port = brokerInfo [ 2 ] ; boolean autocreated = Boolean . valueOf ( brokerInfo . length > 3 ? brokerInfo [ 3 ] : "true" ) ; return new Broker ( id , creator , hostname , Integer . parseInt ( port ) , autocreated ) ;
public class Validator { /** * Validate an injection point * @ param ij the injection point to validate * @ param beanManager the bean manager */ public void validateInjectionPoint ( InjectionPoint ij , BeanManagerImpl beanManager ) { } }
validateInjectionPointForDefinitionErrors ( ij , ij . getBean ( ) , beanManager ) ; validateMetadataInjectionPoint ( ij , ij . getBean ( ) , ValidatorLogger . INJECTION_INTO_NON_BEAN ) ; validateEventMetadataInjectionPoint ( ij ) ; validateInjectionPointForDeploymentProblems ( ij , ij . getBean ( ) , beanManager ) ;
public class A_CmsSerialDateValue { /** * Set the week of the month the events should occur . * @ param weekOfMonth the week of month to set ( first to fifth , where fifth means last ) . */ public final void setWeekOfMonth ( WeekOfMonth weekOfMonth ) { } }
SortedSet < WeekOfMonth > woms = new TreeSet < > ( ) ; if ( null != weekOfMonth ) { woms . add ( weekOfMonth ) ; } setWeeksOfMonth ( woms ) ;
public class ExecutorTemplate { /** * 调整一下线程池 */ public void adjustPoolSize ( int newPoolSize ) { } }
if ( newPoolSize != poolSize ) { poolSize = newPoolSize ; if ( executor instanceof ThreadPoolExecutor ) { ThreadPoolExecutor pool = ( ThreadPoolExecutor ) executor ; pool . setCorePoolSize ( newPoolSize ) ; pool . setMaximumPoolSize ( newPoolSize ) ; } }
public class ThreadedServer { /** * Set the server InetAddress and port . * @ param address The Address to listen on , or 0.0.0.0 : port for all interfaces . */ public synchronized void setInetAddrPort ( InetAddrPort address ) { } }
if ( _address != null && _address . equals ( address ) ) return ; if ( isStarted ( ) ) log . warn ( this + " is started" ) ; _address = address ;
public class PropertyKey { /** * / * package */ Object extractValue ( Annotation annot ) { } }
try { return getMethod ( ) . invoke ( annot , new Object [ ] { } ) ; } // TODO - - cleanup exception handling , property defining a PropertyException catch ( RuntimeException re ) { throw re ; } catch ( Exception e ) { throw new RuntimeException ( "Unable to extract value for " + _propertyName , e ) ; }
public class BuilderMolecule { /** * method to build a molecule from a Peptide or RNA component * @ param id name of the molecule * @ param validMonomers all valid monomers of the component * @ return generated molecule * @ throws BuilderMoleculeException if the molecule can ' t be built * @ throws ChemistryException if the Chemistry Engine can not be initialized */ private static RgroupStructure buildMoleculefromPeptideOrRNA ( final String id , final List < Monomer > validMonomers ) throws BuilderMoleculeException , ChemistryException { } }
try { String input = null ; AbstractMolecule currentMolecule = null ; input = getInput ( validMonomers . get ( 0 ) ) ; AbstractMolecule prevMolecule = Chemistry . getInstance ( ) . getManipulator ( ) . getMolecule ( input , generateAttachmentList ( validMonomers . get ( 0 ) . getAttachmentList ( ) ) ) ; AbstractMolecule firstMolecule = null ; Monomer prevMonomer = null ; RgroupStructure first = new RgroupStructure ( ) ; RgroupStructure current = new RgroupStructure ( ) ; int prev = 1 ; if ( validMonomers . size ( ) == 0 || validMonomers == null ) { LOG . error ( "Polymer (Peptide/RNA) has no contents" ) ; throw new BuilderMoleculeException ( "Polymer (Peptide/RNA) has no contents" ) ; } int i = 0 ; /* First catch all IAtomBases */ for ( Monomer currentMonomer : validMonomers ) { LOG . debug ( "Monomer " + currentMonomer . getAlternateId ( ) ) ; i ++ ; if ( prevMonomer != null ) { input = getInput ( currentMonomer ) ; currentMolecule = Chemistry . getInstance ( ) . getManipulator ( ) . getMolecule ( input , generateAttachmentList ( currentMonomer . getAttachmentList ( ) ) ) ; current . setMolecule ( currentMolecule ) ; current . setRgroupMap ( generateRgroupMap ( id + ":" + String . valueOf ( i ) , currentMolecule ) ) ; /* Backbone Connection */ if ( currentMonomer . getMonomerType ( ) . equals ( Monomer . BACKBONE_MOMONER_TYPE ) ) { prevMolecule = Chemistry . getInstance ( ) . getManipulator ( ) . merge ( first . getMolecule ( ) , first . getRgroupMap ( ) . get ( id + ":" + prev + ":R2" ) , current . getMolecule ( ) , current . getRgroupMap ( ) . get ( id + ":" + i + ":R1" ) ) ; first . getRgroupMap ( ) . remove ( id + ":" + prev + ":R2" ) ; current . getRgroupMap ( ) . remove ( id + ":" + i + ":R1" ) ; first . setMolecule ( prevMolecule ) ; Map < String , IAtomBase > map = new HashMap < String , IAtomBase > ( ) ; map . putAll ( first . getRgroupMap ( ) ) ; map . putAll ( current . getRgroupMap ( ) ) ; first . setRgroupMap ( map ) ; prev = i ; } /* Backbone to Branch Connection */ else if ( currentMonomer . getMonomerType ( ) . equals ( Monomer . BRANCH_MOMONER_TYPE ) ) { prevMolecule = Chemistry . getInstance ( ) . getManipulator ( ) . merge ( first . getMolecule ( ) , first . getRgroupMap ( ) . get ( id + ":" + prev + ":R3" ) , current . getMolecule ( ) , current . getRgroupMap ( ) . get ( id + ":" + i + ":R1" ) ) ; first . getRgroupMap ( ) . remove ( id + ":" + prev + ":R3" ) ; current . getRgroupMap ( ) . remove ( id + ":" + i + ":R1" ) ; first . setMolecule ( prevMolecule ) ; Map < String , IAtomBase > map = new HashMap < String , IAtomBase > ( ) ; map . putAll ( first . getRgroupMap ( ) ) ; map . putAll ( current . getRgroupMap ( ) ) ; first . setRgroupMap ( map ) ; } /* Unknown connection */ else { LOG . error ( "Intra connection is unknown" ) ; throw new BuilderMoleculeException ( "Intra connection is unknown" ) ; } } /* first Monomer ! */ else { prevMonomer = currentMonomer ; input = getInput ( prevMonomer ) ; prevMolecule = Chemistry . getInstance ( ) . getManipulator ( ) . getMolecule ( input , generateAttachmentList ( prevMonomer . getAttachmentList ( ) ) ) ; firstMolecule = prevMolecule ; first . setMolecule ( firstMolecule ) ; first . setRgroupMap ( generateRgroupMap ( id + ":" + String . valueOf ( i ) , firstMolecule ) ) ; } } LOG . debug ( first . getRgroupMap ( ) . keySet ( ) . toString ( ) ) ; return first ; } catch ( IOException | CTKException e ) { LOG . error ( "Polymer(Peptide/RNA) molecule can't be built " + e . getMessage ( ) ) ; throw new BuilderMoleculeException ( "Polymer(Peptide/RNA) molecule can't be built " + e . getMessage ( ) ) ; }
public class FxFlowableTransformers { /** * Performs an action on onComplete with the provided emission count * @ param onComplete * @ param < T > */ public static < T > FlowableTransformer < T , T > doOnCompleteCount ( Consumer < Integer > onComplete ) { } }
return obs -> obs . lift ( new FlowableEmissionCounter < > ( new CountObserver ( null , onComplete , null ) ) ) ;
public class OffsetTime { /** * Obtains an instance of { @ code OffsetTime } from an { @ code Instant } and zone ID . * This creates an offset time with the same instant as that specified . * Finding the offset from UTC / Greenwich is simple as there is only one valid * offset for each instant . * The date component of the instant is dropped during the conversion . * This means that the conversion can never fail due to the instant being * out of the valid range of dates . * @ param instant the instant to create the time from , not null * @ param zone the time - zone , which may be an offset , not null * @ return the offset time , not null */ public static OffsetTime ofInstant ( Instant instant , ZoneId zone ) { } }
Objects . requireNonNull ( instant , "instant" ) ; Objects . requireNonNull ( zone , "zone" ) ; ZoneRules rules = zone . getRules ( ) ; ZoneOffset offset = rules . getOffset ( instant ) ; long localSecond = instant . getEpochSecond ( ) + offset . getTotalSeconds ( ) ; // overflow caught later int secsOfDay = ( int ) Math . floorMod ( localSecond , SECONDS_PER_DAY ) ; LocalTime time = LocalTime . ofNanoOfDay ( secsOfDay * NANOS_PER_SECOND + instant . getNano ( ) ) ; return new OffsetTime ( time , offset ) ;
public class DeploymentInfoHandler { /** * / * ( non - Javadoc ) * @ see org . jboss . as . cli . OperationCommand # buildRequest ( org . jboss . as . cli . CommandContext ) */ @ Override public ModelNode buildRequestWithoutHeaders ( CommandContext ctx ) throws CommandFormatException { } }
final ParsedCommandLine parsedCmd = ctx . getParsedCommandLine ( ) ; ic . deploymentName = null ; ic . serverGroup = null ; String deploymentName = name . getValue ( parsedCmd ) ; if ( name != null ) { ic . deploymentName = deploymentName ; } ic . serverGroup = serverGroup . getValue ( parsedCmd ) ; return ic . buildRequest ( ctx ) ;
public class ImgCompressUtils { /** * 根据指定宽高和压缩质量进行压缩 , 当isForceWh为false时 , 如果指定宽或者高大于源图片则按照源图片大小宽高压缩 , * 当isForceWh为true时 , 不论怎样均按照指定宽高压缩 * @ param srcImg 指定原图片对象 * @ param width 指定压缩宽 * @ param height 指定压缩高 * @ param quality 指定压缩质量 , 范围 [ 0.0,1.0 ] , 如果指定为null则按照默认值 * @ param isForceWh 指定是否强制使用指定宽高进行压缩 , true代表强制 , false反之 */ public static BufferedImage imgCompressByWH ( Image srcImg , int width , int height , Float quality , boolean isForceWh ) { } }
if ( ! isForceWh && ( srcImg . getHeight ( null ) < height || srcImg . getWidth ( null ) < width ) ) { width = srcImg . getWidth ( null ) ; height = srcImg . getHeight ( null ) ; } // 指定目标图片 BufferedImage desImg = new BufferedImage ( width , height , BufferedImage . TYPE_INT_RGB ) ; // 根据源图片绘制目标图片 desImg . getGraphics ( ) . drawImage ( srcImg , 0 , 0 , width , height , null ) ; return ImgCompressUtils . encodeImg ( desImg , quality ) ;
public class HdfsStreamWriter { /** * ファイルに対してテキストを追記し 、 改行する 。 * @ param outputLine 出力行 * @ throws IOException 追記失敗時 */ public void appendLine ( String outputLine ) throws IOException { } }
this . delegateStream . writeChars ( outputLine ) ; this . delegateStream . writeChars ( System . getProperty ( "line.separator" ) ) ; if ( this . isFileSyncEachTime ) { sync ( ) ; }
public class MatchingStrategy { /** * Exact match is best , * then path match with longest URI matched , * finally extension match . */ protected CollectionMatch selectBestCollectionMatch ( CollectionMatch previousMatch , CollectionMatch currentMatch ) { } }
CollectionMatch bestMatch = null ; if ( previousMatch == null ) { bestMatch = currentMatch ; } else if ( previousMatch . isDenyMatchByOmission ( ) ) { bestMatch = previousMatch ; } else if ( currentMatch . isDenyMatchByOmission ( ) ) { bestMatch = currentMatch ; } else if ( previousMatch . isDenyMatch ( ) ) { bestMatch = previousMatch ; } else if ( currentMatch . isDenyMatch ( ) ) { bestMatch = currentMatch ; } else if ( previousMatch . isPermitMatch ( ) ) { bestMatch = previousMatch ; } else if ( currentMatch . isPermitMatch ( ) ) { bestMatch = currentMatch ; } else if ( previousMatch . isExactMatch ( ) ) { bestMatch = previousMatch ; } else if ( currentMatch . isExactMatch ( ) ) { bestMatch = currentMatch ; } else if ( previousMatch . isPathMatch ( ) && previousMatch . getUrlPattern ( ) . length ( ) >= currentMatch . getUrlPattern ( ) . length ( ) ) { bestMatch = previousMatch ; } else { bestMatch = currentMatch ; } return bestMatch ;
public class InstanceClient { /** * Gets the access control policy for a resource . May be empty if no such policy or resource * exists . * < p > Sample code : * < pre > < code > * try ( InstanceClient instanceClient = InstanceClient . create ( ) ) { * ProjectZoneInstanceResourceName resource = ProjectZoneInstanceResourceName . of ( " [ PROJECT ] " , " [ ZONE ] " , " [ RESOURCE ] " ) ; * Policy response = instanceClient . getIamPolicyInstance ( resource . toString ( ) ) ; * < / code > < / pre > * @ param resource Name or id of the resource for this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Policy getIamPolicyInstance ( String resource ) { } }
GetIamPolicyInstanceHttpRequest request = GetIamPolicyInstanceHttpRequest . newBuilder ( ) . setResource ( resource ) . build ( ) ; return getIamPolicyInstance ( request ) ;
public class hqlLexer { /** * $ ANTLR start " ESCAPE " */ public final void mESCAPE ( ) throws RecognitionException { } }
try { int _type = ESCAPE ; int _channel = DEFAULT_TOKEN_CHANNEL ; // hql . g : 25:8 : ( ' escape ' ) // hql . g : 25:10 : ' escape ' { match ( "escape" ) ; if ( state . failed ) return ; } state . type = _type ; state . channel = _channel ; } finally { // do for sure before leaving }
public class HystrixSampleSseServlet { /** * Handle incoming GETs */ @ Override protected void doGet ( HttpServletRequest request , HttpServletResponse response ) throws ServletException , IOException { } }
if ( isDestroyed ) { response . sendError ( 503 , "Service has been shut down." ) ; } else { handleRequest ( request , response ) ; }
public class ServerStatisticsServlet { /** * < pre > * fill the result set with the CPU usage . * Note : As the ' Top ' bash call doesn ' t yield accurate result for the system load , * the implementation has been changed to load from the " proc / loadavg " which keeps * the moving average of the system load , we are pulling the average for the recent 1 min . * < / pre > * @ param stats reference to the result container which contains all the results , this specific * method will only work on the property " cpuUsage " . */ protected void fillCpuUsage ( final ExecutorInfo stats ) { } }
if ( exists_Bash && exists_Cat && exists_LoadAvg ) { try { final ArrayList < String > output = Utils . runProcess ( "/bin/bash" , "-c" , "/bin/cat /proc/loadavg" ) ; // process the output from bash call . if ( output . size ( ) > 0 ) { final String [ ] splitedresult = output . get ( 0 ) . split ( "\\s+" ) ; double cpuUsage = 0.0 ; try { cpuUsage = Double . parseDouble ( splitedresult [ 0 ] ) ; } catch ( final NumberFormatException e ) { logger . error ( "yielding 0.0 for CPU usage as output is invalid -" + output . get ( 0 ) ) ; } logger . info ( "System load : " + cpuUsage ) ; stats . setCpuUpsage ( cpuUsage ) ; } } catch ( final Exception ex ) { logger . error ( "failed fetch system load info " + "as exception is captured when fetching result from bash call. Ex -" + ex . getMessage ( ) ) ; } } else { logger . error ( "failed fetch system load info, one or more files from the following list are missing - " + "'/bin/bash'," + "'/bin/cat'," + "'/proc/loadavg'" ) ; }
public class ValueEnforcer { /** * Check that the passed value is not < code > null < / code > and not equal to the * provided value . * @ param < T > * Type to be checked and returned * @ param aValue * The value to check . May not be < code > null < / code > . * @ param aName * The name of the value ( e . g . the parameter name ) * @ param aUnexpectedValue * The value that may not be equal to aValue . May not be * < code > null < / code > . * @ return The passed value . */ public static < T > T notNullNotEquals ( final T aValue , @ Nonnull final Supplier < ? extends String > aName , @ Nonnull final T aUnexpectedValue ) { } }
notNull ( aValue , aName ) ; notNull ( aUnexpectedValue , "UnexpectedValue" ) ; if ( isEnabled ( ) ) if ( aValue . equals ( aUnexpectedValue ) ) throw new IllegalArgumentException ( "The value of '" + aName . get ( ) + "' may not be equal to " + aUnexpectedValue + "!" ) ; return aValue ;
public class ExpandableRecyclerAdapter { /** * Notify any registered observers that the parent at { @ code parentPosition } has * { @ code itemCount } children starting at { @ code childPositionStart } that have changed . * This is an item change event , not a structural change event . It indicates that any * The parent at { @ code childPositionStart } retains the same identity . * reflection of the set of { @ code itemCount } children starting at { @ code childPositionStart } * are out of date and should be updated . * @ param parentPosition Position of the parent who has a child that has changed * @ param childPositionStart Position of the first child that has changed * @ param itemCount number of children changed */ @ UiThread public void notifyChildRangeChanged ( int parentPosition , int childPositionStart , int itemCount ) { } }
P parent = mParentList . get ( parentPosition ) ; int flatParentPosition = getFlatParentPosition ( parentPosition ) ; ExpandableWrapper < P , C > parentWrapper = mFlatItemList . get ( flatParentPosition ) ; parentWrapper . setParent ( parent ) ; if ( parentWrapper . isExpanded ( ) ) { int flatChildPosition = flatParentPosition + childPositionStart + 1 ; for ( int i = 0 ; i < itemCount ; i ++ ) { ExpandableWrapper < P , C > child = parentWrapper . getWrappedChildList ( ) . get ( childPositionStart + i ) ; mFlatItemList . set ( flatChildPosition + i , child ) ; } notifyItemRangeChanged ( flatChildPosition , itemCount ) ; }