idx int64 0 41.2k | question stringlengths 74 4.04k | target stringlengths 7 750 |
|---|---|---|
30,100 | public void setSigma ( double sigma ) { if ( sigma <= 0 ) throw new IllegalArgumentException ( "Sigma must be a positive constant, not " + sigma ) ; this . sigma = sigma ; this . sigmaSqrd2Inv = 0.5 / ( sigma * sigma ) ; } | Sets the sigma parameter which must be a positive value |
30,101 | public void setMean ( double mean ) { if ( Double . isInfinite ( mean ) || Double . isNaN ( mean ) ) throw new ArithmeticException ( "Mean must be a real number, not " + mean ) ; ( ( Normal ) getDistribution ( ) ) . setMean ( mean ) ; } | Sets the mean value used for the normal distribution |
30,102 | public void setStandardDeviations ( double devs ) { if ( devs <= 0 || Double . isInfinite ( devs ) || Double . isNaN ( devs ) ) throw new ArithmeticException ( "The stnd devs must be a positive value" ) ; ( ( Normal ) getDistribution ( ) ) . setStndDev ( devs ) ; } | Sets the standard deviations used for the normal distribution |
30,103 | protected void registerCurrencies ( ) throws Exception { parseCurrencies ( loadFromFile ( "/org/joda/money/CurrencyData.csv" ) ) ; parseCountries ( loadFromFile ( "/org/joda/money/CountryData.csv" ) ) ; parseCurrencies ( loadFromFiles ( "META-INF/org/joda/money/CurrencyDataExtension.csv" ) ) ; parseCountries ( loadFromFiles ( "META-INF/org/joda/money/CountryDataExtension.csv" ) ) ; } | Registers all the currencies known by this provider . |
30,104 | private void parseCurrencies ( List < String > content ) throws Exception { for ( String line : content ) { Matcher matcher = CURRENCY_REGEX_LINE . matcher ( line ) ; if ( matcher . matches ( ) ) { String currencyCode = matcher . group ( 1 ) ; int numericCode = Integer . parseInt ( matcher . group ( 2 ) ) ; int digits = Integer . parseInt ( matcher . group ( 3 ) ) ; registerCurrency ( currencyCode , numericCode , digits ) ; } } } | parse the currencies |
30,105 | private void parseCountries ( List < String > content ) throws Exception { for ( String line : content ) { Matcher matcher = COUNTRY_REGEX_LINE . matcher ( line ) ; if ( matcher . matches ( ) ) { String countryCode = matcher . group ( 1 ) ; String currencyCode = matcher . group ( 2 ) ; registerCountry ( countryCode , currencyCode ) ; } } } | parse the countries |
30,106 | public MoneyAmountStyle withGroupingSize ( Integer groupingSize ) { int sizeVal = ( groupingSize == null ? - 1 : groupingSize ) ; if ( groupingSize != null && sizeVal <= 0 ) { throw new IllegalArgumentException ( "Grouping size must be greater than zero" ) ; } if ( sizeVal == this . groupingSize ) { return this ; } return new MoneyAmountStyle ( zeroCharacter , positiveCharacter , negativeCharacter , decimalPointCharacter , groupingStyle , groupingCharacter , sizeVal , extendedGroupingSize , forceDecimalPoint , absValue ) ; } | Returns a copy of this style with the specified grouping size . |
30,107 | public MoneyAmountStyle withExtendedGroupingSize ( Integer extendedGroupingSize ) { int sizeVal = ( extendedGroupingSize == null ? - 1 : extendedGroupingSize ) ; if ( extendedGroupingSize != null && sizeVal < 0 ) { throw new IllegalArgumentException ( "Extended grouping size must not be negative" ) ; } if ( sizeVal == this . extendedGroupingSize ) { return this ; } return new MoneyAmountStyle ( zeroCharacter , positiveCharacter , negativeCharacter , decimalPointCharacter , groupingStyle , groupingCharacter , groupingSize , sizeVal , forceDecimalPoint , absValue ) ; } | Returns a copy of this style with the specified extended grouping size . |
30,108 | public MoneyAmountStyle withGroupingStyle ( GroupingStyle groupingStyle ) { MoneyFormatter . checkNotNull ( groupingStyle , "groupingStyle" ) ; if ( this . groupingStyle == groupingStyle ) { return this ; } return new MoneyAmountStyle ( zeroCharacter , positiveCharacter , negativeCharacter , decimalPointCharacter , groupingStyle , groupingCharacter , groupingSize , extendedGroupingSize , forceDecimalPoint , absValue ) ; } | Returns a copy of this style with the specified grouping setting . |
30,109 | public MoneyAmountStyle withForcedDecimalPoint ( boolean forceDecimalPoint ) { if ( this . forceDecimalPoint == forceDecimalPoint ) { return this ; } return new MoneyAmountStyle ( zeroCharacter , positiveCharacter , negativeCharacter , decimalPointCharacter , groupingStyle , groupingCharacter , groupingSize , extendedGroupingSize , forceDecimalPoint , absValue ) ; } | Returns a copy of this style with the specified decimal point setting . |
30,110 | private BigMoney checkCurrencyEqual ( BigMoneyProvider moneyProvider ) { BigMoney money = of ( moneyProvider ) ; if ( isSameCurrency ( money ) == false ) { throw new CurrencyMismatchException ( getCurrencyUnit ( ) , money . getCurrencyUnit ( ) ) ; } return money ; } | Validates that the currency of this money and the specified money match . |
30,111 | public int compareTo ( BigMoneyProvider other ) { BigMoney otherMoney = of ( other ) ; if ( currency . equals ( otherMoney . currency ) == false ) { throw new CurrencyMismatchException ( getCurrencyUnit ( ) , otherMoney . getCurrencyUnit ( ) ) ; } return amount . compareTo ( otherMoney . amount ) ; } | Compares this monetary value to another . The compared values must be in the same currency . |
30,112 | void mergeChild ( MoneyParseContext child ) { setLocale ( child . getLocale ( ) ) ; setText ( child . getText ( ) ) ; setIndex ( child . getIndex ( ) ) ; setErrorIndex ( child . getErrorIndex ( ) ) ; setCurrency ( child . getCurrency ( ) ) ; setAmount ( child . getAmount ( ) ) ; } | Merges the child context back into this instance . |
30,113 | public ParsePosition toParsePosition ( ) { ParsePosition pp = new ParsePosition ( textIndex ) ; pp . setErrorIndex ( textErrorIndex ) ; return pp ; } | Converts the indexes to a parse position . |
30,114 | public Iterator < Entry < K , V > > iterator ( ) { checkClosed ( ) ; final Iterator < K > _keyIterator = cache . keys ( ) . iterator ( ) ; return new Iterator < Entry < K , V > > ( ) { CacheEntry < K , V > entry ; public boolean hasNext ( ) { while ( _keyIterator . hasNext ( ) ) { entry = cache . getEntry ( _keyIterator . next ( ) ) ; if ( entry . getException ( ) == null ) { return true ; } } entry = null ; return false ; } public Entry < K , V > next ( ) { if ( entry == null && ! hasNext ( ) ) { throw new NoSuchElementException ( ) ; } return new Entry < K , V > ( ) { public K getKey ( ) { return entry . getKey ( ) ; } public V getValue ( ) { return entry . getValue ( ) ; } @ SuppressWarnings ( "unchecked" ) public < T > T unwrap ( Class < T > _class ) { if ( CacheEntry . class . equals ( _class ) ) { return ( T ) entry ; } return null ; } } ; } public void remove ( ) { if ( entry == null ) { throw new IllegalStateException ( "hasNext() / next() not called or end of iteration reached" ) ; } cache . remove ( entry . getKey ( ) ) ; } } ; } | Iterate with the help of cache2k key iterator . |
30,115 | public static CacheManager getInstance ( ) { ClassLoader _defaultClassLoader = PROVIDER . getDefaultClassLoader ( ) ; return PROVIDER . getManager ( _defaultClassLoader , PROVIDER . getDefaultManagerName ( _defaultClassLoader ) ) ; } | Get the default cache manager for the default class loader . The default class loader is the class loader used to load the cache2k implementation classes . |
30,116 | public static CacheManager getInstance ( ClassLoader cl ) { return PROVIDER . getManager ( cl , PROVIDER . getDefaultManagerName ( cl ) ) ; } | Get the default cache manager for the specified class loader . |
30,117 | public static CacheManager getInstance ( ClassLoader cl , String managerName ) { return PROVIDER . getManager ( cl , managerName ) ; } | Retrieve a cache manager with the specified name using the specified classloader . If not existing a manager with that name is created . Different cache managers are created for different class loaders . Manager names should be unique within one VM instance . |
30,118 | public static < K , T > Cache2kBuilder < K , T > of ( Class < K > _keyType , Class < T > _valueType ) { return new Cache2kBuilder < K , T > ( CacheTypeCapture . of ( _keyType ) , CacheTypeCapture . of ( _valueType ) ) ; } | Create a new cache builder for key and value types of classes with no generic parameters . |
30,119 | public static < K , T > Cache2kBuilder < K , T > of ( Cache2kConfiguration < K , T > c ) { Cache2kBuilder < K , T > cb = new Cache2kBuilder < K , T > ( c ) ; return cb ; } | Create a builder from the configuration . |
30,120 | public final Cache2kBuilder < K , V > manager ( CacheManager manager ) { if ( this . manager != null ) { throw new IllegalStateException ( "manager() must be first operation on builder." ) ; } this . manager = manager ; return this ; } | The manager the created cache will belong to . If this is set it must be the first method called . |
30,121 | @ SuppressWarnings ( "unchecked" ) public final < T2 > Cache2kBuilder < K , T2 > valueType ( CacheType < T2 > t ) { Cache2kBuilder < K , T2 > me = ( Cache2kBuilder < K , T2 > ) this ; me . config ( ) . setValueType ( t ) ; return me ; } | Sets the value type to use . Arrays are not supported . |
30,122 | public final Cache2kBuilder < K , V > name ( Class < ? > _class ) { config ( ) . setName ( _class . getName ( ) ) ; return this ; } | Sets a cache name from the fully qualified class name . |
30,123 | private static < T > CustomizationReferenceSupplier < T > wrapCustomizationInstance ( T obj ) { if ( obj == null ) { return null ; } return new CustomizationReferenceSupplier < T > ( obj ) ; } | Wraps to factory but passes on nulls . |
30,124 | @ SuppressWarnings ( "unchecked" ) public final Cache2kBuilder < K , V > wrappingLoader ( AdvancedCacheLoader < K , LoadDetail < V > > l ) { config ( ) . setAdvancedLoader ( ( CustomizationSupplier < AdvancedCacheLoader < K , V > > ) ( Object ) wrapCustomizationInstance ( l ) ) ; return this ; } | Enables read through operation and sets a cache loader |
30,125 | public final Cache2kBuilder < K , V > writer ( CacheWriter < K , V > w ) { config ( ) . setWriter ( wrapCustomizationInstance ( w ) ) ; return this ; } | Enables write through operation and sets a writer customization that gets called synchronously upon cache mutations . By default write through is not enabled . |
30,126 | public final Cache2kBuilder < K , V > addCacheClosedListener ( CacheClosedListener listener ) { config ( ) . getCacheClosedListeners ( ) . add ( wrapCustomizationInstance ( listener ) ) ; return this ; } | Listener that is called after a cache is closed . This is mainly used for the JCache integration . |
30,127 | public final Cache2kBuilder < K , V > addListener ( CacheEntryOperationListener < K , V > listener ) { config ( ) . getListeners ( ) . add ( wrapCustomizationInstance ( listener ) ) ; return this ; } | Add a listener . The listeners will be executed in a synchronous mode meaning further processing for an entry will stall until a registered listener is executed . The expiry will be always executed asynchronously . |
30,128 | public final Cache2kBuilder < K , V > addAsyncListener ( CacheEntryOperationListener < K , V > listener ) { config ( ) . getAsyncListeners ( ) . add ( wrapCustomizationInstance ( listener ) ) ; return this ; } | A set of listeners . Listeners added in this collection will be executed in a asynchronous mode . |
30,129 | public final Cache2kBuilder < K , V > expiryPolicy ( ExpiryPolicy < K , V > c ) { config ( ) . setExpiryPolicy ( wrapCustomizationInstance ( c ) ) ; return this ; } | Set expiry policy to use . |
30,130 | public final Cache2kBuilder < K , V > maxRetryInterval ( long v , TimeUnit u ) { config ( ) . setMaxRetryInterval ( u . toMillis ( v ) ) ; return this ; } | If a loader exception happens this is the maximum time interval after a retry attempt is made . For retries an exponential backoff algorithm is used . It starts with the retry time and then increases the time to the maximum according to an exponential pattern . |
30,131 | public final Cache2kBuilder < K , V > with ( ConfigurationSectionBuilder < ? extends ConfigurationSection > ... sectionBuilders ) { for ( ConfigurationSectionBuilder < ? extends ConfigurationSection > b : sectionBuilders ) { config ( ) . getSections ( ) . add ( b . buildConfigurationSection ( ) ) ; } return this ; } | Add a new configuration sub section . |
30,132 | public final Cache2kBuilder < K , V > asyncListenerExecutor ( Executor v ) { config ( ) . setAsyncListenerExecutor ( new CustomizationReferenceSupplier < Executor > ( v ) ) ; return this ; } | Executor for asynchronous listeners . If no executor is specified an internal executor is used that has unbounded thread capacity . |
30,133 | public final Cache2kBuilder < K , V > timeReference ( TimeReference v ) { config ( ) . setTimeReference ( new CustomizationReferenceSupplier < TimeReference > ( v ) ) ; return this ; } | Clock to be used by the cache as time reference . |
30,134 | static long limitExpiryToMaxLinger ( long now , long _maxLinger , long _requestedExpiryTime , boolean _sharpExpiryEnabled ) { if ( _sharpExpiryEnabled && _requestedExpiryTime > ExpiryPolicy . REFRESH && _requestedExpiryTime < ExpiryPolicy . ETERNAL ) { _requestedExpiryTime = - _requestedExpiryTime ; } return Expiry . mixTimeSpanAndPointInTime ( now , _maxLinger , _requestedExpiryTime ) ; } | Ignore the value of the expiry policy if later then the maximum expiry time . If max linger takes over we do not request sharp expiry . |
30,135 | public String getDefaultManagerName ( ClassLoader cl ) { ConfigurationContext ctx = classLoader2config . get ( cl ) ; if ( ctx == null ) { ctx = createContext ( cl , null , DEFAULT_CONFIGURATION_FILE ) ; Map < ClassLoader , ConfigurationContext > m2 = new HashMap < ClassLoader , ConfigurationContext > ( classLoader2config ) ; m2 . put ( cl , ctx ) ; classLoader2config = m2 ; } return ctx . getManagerConfiguration ( ) . getDefaultManagerName ( ) ; } | The name of the default manager may be changed in the configuration file . Load the default configuration file and save the loaded context for the respective classloader so we do not load the context twice when we create the first cache . |
30,136 | private ConfigurationContext getManagerContext ( final CacheManager mgr ) { ConfigurationContext ctx = manager2defaultConfig . get ( mgr ) ; if ( ctx != null ) { return ctx ; } synchronized ( this ) { ctx = manager2defaultConfig . get ( mgr ) ; if ( ctx != null ) { return ctx ; } if ( mgr . isDefaultManager ( ) ) { ctx = classLoader2config . get ( mgr . getClassLoader ( ) ) ; } if ( ctx == null ) { ctx = createContext ( mgr . getClassLoader ( ) , mgr . getName ( ) , getFileName ( mgr ) ) ; } Map < CacheManager , ConfigurationContext > m2 = new HashMap < CacheManager , ConfigurationContext > ( manager2defaultConfig ) ; m2 . put ( mgr , ctx ) ; manager2defaultConfig = m2 ; return ctx ; } } | Hold the cache default configuration of a manager in a hash table . This is reused for all caches of one manager . |
30,137 | void apply ( final ConfigurationContext ctx , final ParsedConfiguration _parsedCfg , final Object cfg ) { ParsedConfiguration _templates = ctx . getTemplates ( ) ; ConfigurationTokenizer . Property _include = _parsedCfg . getPropertyMap ( ) . get ( "include" ) ; if ( _include != null ) { for ( String _template : _include . getValue ( ) . split ( "," ) ) { ParsedConfiguration c2 = null ; if ( _templates != null ) { c2 = _templates . getSection ( _template ) ; } if ( c2 == null ) { throw new ConfigurationException ( "Template not found \'" + _template + "\'" , _include ) ; } apply ( ctx , c2 , cfg ) ; } } applyPropertyValues ( _parsedCfg , cfg ) ; if ( ! ( cfg instanceof ConfigurationWithSections ) ) { return ; } ConfigurationWithSections _configurationWithSections = ( ConfigurationWithSections ) cfg ; for ( ParsedConfiguration _parsedSection : _parsedCfg . getSections ( ) ) { String _sectionType = ctx . getPredefinedSectionTypes ( ) . get ( _parsedSection . getName ( ) ) ; if ( _sectionType == null ) { _sectionType = _parsedSection . getType ( ) ; } if ( _sectionType == null ) { throw new ConfigurationException ( "type missing or unknown" , _parsedSection ) ; } Class < ? > _type ; try { _type = Class . forName ( _sectionType ) ; } catch ( ClassNotFoundException ex ) { throw new ConfigurationException ( "class not found '" + _sectionType + "'" , _parsedSection ) ; } if ( ! handleSection ( ctx , _type , _configurationWithSections , _parsedSection ) && ! handleCollection ( ctx , _type , cfg , _parsedSection ) && ! handleBean ( ctx , _type , cfg , _parsedSection ) ) { throw new ConfigurationException ( "Unknown property '" + _parsedSection . getContainer ( ) + "'" , _parsedSection ) ; } } } | Set properties in configuration bean based on the parsed configuration . Called by unit test . |
30,138 | private boolean handleBean ( final ConfigurationContext ctx , final Class < ? > _type , final Object cfg , final ParsedConfiguration _parsedCfg ) { String _containerName = _parsedCfg . getContainer ( ) ; BeanPropertyMutator m = provideMutator ( cfg . getClass ( ) ) ; Class < ? > _targetType = m . getType ( _containerName ) ; if ( _targetType == null ) { return false ; } if ( ! _targetType . isAssignableFrom ( _type ) ) { throw new ConfigurationException ( "Type mismatch, expected: '" + _targetType . getName ( ) + "'" , _parsedCfg ) ; } Object _bean = createBeanAndApplyConfiguration ( ctx , _type , _parsedCfg ) ; mutateAndCatch ( cfg , m , _containerName , _bean , _parsedCfg , _bean ) ; return true ; } | Create the bean apply configuration to it and set it . |
30,139 | private boolean handleSection ( final ConfigurationContext ctx , final Class < ? > _type , final ConfigurationWithSections cfg , final ParsedConfiguration sc ) { String _containerName = sc . getContainer ( ) ; if ( ! "sections" . equals ( _containerName ) ) { return false ; } @ SuppressWarnings ( "unchecked" ) ConfigurationSection _sectionBean = cfg . getSections ( ) . getSection ( ( Class < ConfigurationSection > ) _type ) ; if ( ! ( _sectionBean instanceof SingletonConfigurationSection ) ) { try { _sectionBean = ( ConfigurationSection ) _type . newInstance ( ) ; } catch ( Exception ex ) { throw new ConfigurationException ( "Cannot instantiate section class: " + ex , sc ) ; } cfg . getSections ( ) . add ( _sectionBean ) ; } apply ( ctx , sc , _sectionBean ) ; return true ; } | Create a new configuration section or reuse an existing section if it is a singleton . |
30,140 | public void checkKeepOrRemove ( ) { boolean _hasKeepAfterExpired = heapCache . isKeepAfterExpired ( ) ; if ( expiry != 0 || remove || _hasKeepAfterExpired ) { mutationUpdateHeap ( ) ; return ; } if ( _hasKeepAfterExpired ) { expiredImmediatelyKeepData ( ) ; return ; } expiredImmediatelyAndRemove ( ) ; } | In case we have a expiry of 0 this means that the entry should not be cached . If there is a valid entry we remove it if we do not keep the data . |
30,141 | public void asyncOperationStarted ( ) { if ( syncThread == Thread . currentThread ( ) ) { synchronized ( entry ) { while ( entry . isProcessing ( ) ) { try { entry . wait ( ) ; } catch ( InterruptedException ex ) { Thread . currentThread ( ) . interrupt ( ) ; } } } } else { entryLocked = false ; } } | If thread is a synchronous call wait until operation is complete . There is a little chance that the call back completes before we get here as well as some other operation changing the entry again . |
30,142 | private static void initializeLogFactory ( ) { ServiceLoader < LogFactory > loader = ServiceLoader . load ( LogFactory . class ) ; for ( LogFactory lf : loader ) { logFactory = lf ; log ( "New instance, using: " + logFactory . getClass ( ) . getName ( ) ) ; return ; } try { final org . slf4j . ILoggerFactory lf = org . slf4j . LoggerFactory . getILoggerFactory ( ) ; logFactory = new LogFactory ( ) { public Log getLog ( String s ) { return new Slf4jLogger ( lf . getLogger ( s ) ) ; } } ; log ( "New instance, using SLF4J logging" ) ; return ; } catch ( NoClassDefFoundError ignore ) { } try { final org . apache . commons . logging . LogFactory cl = org . apache . commons . logging . LogFactory . getFactory ( ) ; logFactory = new LogFactory ( ) { public Log getLog ( String s ) { return new CommonsLogger ( cl . getInstance ( s ) ) ; } } ; log ( "New instance, using commons logging" ) ; return ; } catch ( NoClassDefFoundError ignore ) { } logFactory = new LogFactory ( ) { public Log getLog ( String s ) { return new JdkLogger ( Logger . getLogger ( s ) ) ; } } ; log ( "New instance, using JDK logging" ) ; } | Finds a logger we can use . First we start with looking for a registered service provider . Then apache commons logging . As a fallback we use JDK logging . |
30,143 | private static String readFile ( String _name ) throws IOException { InputStream in = SingleProviderResolver . class . getClassLoader ( ) . getResourceAsStream ( _name ) ; if ( in == null ) { return null ; } try { LineNumberReader r = new LineNumberReader ( new InputStreamReader ( in ) ) ; String l = r . readLine ( ) ; while ( l != null ) { if ( ! l . startsWith ( "#" ) ) { return l ; } l = r . readLine ( ) ; } } finally { in . close ( ) ; } return null ; } | Read the first line of a file in the classpath into a string . |
30,144 | @ SuppressWarnings ( "unchecked" ) private static < S > Iterable < S > constructAllServiceImplementations ( Class < S > _service ) { ClassLoader cl = CacheManagerImpl . class . getClassLoader ( ) ; ArrayList < S > li = new ArrayList < S > ( ) ; Iterator < S > it = ServiceLoader . load ( _service , cl ) . iterator ( ) ; while ( it . hasNext ( ) ) { try { li . add ( it . next ( ) ) ; } catch ( ServiceConfigurationError ex ) { Log . getLog ( CacheManager . class . getName ( ) ) . debug ( "Error loading service '" + _service + "'" , ex ) ; } } final S [ ] a = ( S [ ] ) Array . newInstance ( _service , li . size ( ) ) ; li . toArray ( a ) ; return new Iterable < S > ( ) { public Iterator < S > iterator ( ) { return new Iterator < S > ( ) { private int pos = 0 ; public boolean hasNext ( ) { return pos < a . length ; } public S next ( ) { return a [ pos ++ ] ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } } ; } | The service loader works lazy however we want to have all implementations constructed . Retrieve all implementations from the service loader and return an read - only iterable backed by an array . |
30,145 | public static void checkName ( String s ) { for ( char c : s . toCharArray ( ) ) { if ( c == '.' || c == '-' || c == '~' || c == ',' || c == '@' || c == ' ' || c == '(' || c == ')' || c == '+' || c == '!' || c == '\'' || c == '%' || c == '#' ) { continue ; } if ( c < 32 || c >= 127 || ! Character . isJavaIdentifierPart ( c ) ) { throw new IllegalArgumentException ( "Cache name contains illegal character: '" + c + "', name=\"" + s + "\"" ) ; } } } | Don t accept a cache or manager names with too weird characters . |
30,146 | public void close ( ) { if ( isDefaultManager ( ) && getClass ( ) . getClassLoader ( ) == classLoader ) { log . info ( "Closing default CacheManager" ) ; } Iterable < Cache > _caches ; synchronized ( lock ) { if ( closing ) { return ; } _caches = cachesCopy ( ) ; closing = true ; } logPhase ( "close" ) ; List < Throwable > _suppressedExceptions = new ArrayList < Throwable > ( ) ; for ( Cache c : _caches ) { ( ( InternalCache ) c ) . cancelTimerJobs ( ) ; } for ( Cache c : _caches ) { try { c . close ( ) ; } catch ( Throwable t ) { _suppressedExceptions . add ( t ) ; } } try { for ( CacheManagerLifeCycleListener lc : cacheManagerLifeCycleListeners ) { lc . managerDestroyed ( this ) ; } } catch ( Throwable t ) { _suppressedExceptions . add ( t ) ; } ( ( Cache2kCoreProviderImpl ) PROVIDER ) . removeManager ( this ) ; synchronized ( lock ) { for ( Cache c : cacheNames . values ( ) ) { log . warn ( "unable to close cache: " + c . getName ( ) ) ; } } eventuallyThrowException ( _suppressedExceptions ) ; cacheNames = null ; } | The shutdown takes place in two phases . First all caches are notified to cancel their scheduled timer jobs after that the shutdown is done . Cancelling the timer jobs first is needed because there may be cache stacking and a timer job of one cache may call an already closed cache . |
30,147 | static void eventuallyThrowException ( List < Throwable > _suppressedExceptions ) { if ( _suppressedExceptions . isEmpty ( ) ) { return ; } Throwable _error = null ; for ( Throwable t : _suppressedExceptions ) { if ( t instanceof Error ) { _error = t ; break ; } if ( t instanceof ExecutionException && t . getCause ( ) instanceof Error ) { _error = t . getCause ( ) ; break ; } } String _text = "Exception(s) during shutdown" ; if ( _suppressedExceptions . size ( ) > 1 ) { _text = " (" + ( _suppressedExceptions . size ( ) - 1 ) + " more suppressed exceptions)" ; } if ( _error != null ) { throw new CacheInternalError ( _text , _error ) ; } throw new CacheException ( _text , _suppressedExceptions . get ( 0 ) ) ; } | During the shutdown of the cache manager multiple exceptions can happen from various caches . The list of exceptions gets examined to throw one exception . |
30,148 | private String getManagerId ( ) { return "name='" + name + "', objectId=" + Integer . toString ( System . identityHashCode ( this ) , 36 ) + ", classloaderId=" + Integer . toString ( System . identityHashCode ( classLoader ) , 36 ) + ", default=" + defaultManager ; } | Relevant information to id a manager . Since there could be multiple cache managers for each class loader better |
30,149 | public boolean checkAndSwitchProcessingState ( int ps0 , int ps ) { long rt = refreshTimeAndState ; long _expect = withState ( rt , ps0 ) ; long _update = withState ( rt , ps ) ; return STATE_UPDATER . compareAndSet ( this , _expect , _update ) ; } | Check and switch the processing state atomically . |
30,150 | private PiggyBack existingPiggyBackForInserting ( ) { Object _misc = misc ; if ( _misc instanceof SimpleTimerTask ) { return new TaskPiggyBack ( ( SimpleTimerTask ) _misc , null ) ; } return ( PiggyBack ) _misc ; } | We want to add a new piggy back . Check for timer task and convert it to piggy back . |
30,151 | public void resetSuppressedLoadExceptionInformation ( ) { LoadExceptionPiggyBack inf = getPiggyBack ( LoadExceptionPiggyBack . class ) ; if ( inf != null ) { inf . info = null ; } } | If the entry carries information about a suppressed exception clear it . |
30,152 | public Entry < K , V > lookup ( K key , int _hash , int _keyValue ) { OptimisticLock [ ] _locks = locks ; int si = _hash & LOCK_MASK ; OptimisticLock l = _locks [ si ] ; long _stamp = l . tryOptimisticRead ( ) ; Entry < K , V > [ ] tab = entries ; if ( tab == null ) { throw new CacheClosedException ( cache ) ; } Entry < K , V > e ; int n = tab . length ; int _mask = n - 1 ; int idx = _hash & ( _mask ) ; e = tab [ idx ] ; while ( e != null ) { if ( e . hashCode == _keyValue && keyObjIsEqual ( key , e ) ) { return e ; } e = e . another ; } if ( l . validate ( _stamp ) ) { return null ; } _stamp = l . readLock ( ) ; try { tab = entries ; if ( tab == null ) { throw new CacheClosedException ( cache ) ; } n = tab . length ; _mask = n - 1 ; idx = _hash & ( _mask ) ; e = tab [ idx ] ; while ( e != null ) { if ( e . hashCode == _keyValue && ( keyObjIsEqual ( key , e ) ) ) { return e ; } e = e . another ; } return null ; } finally { l . unlockRead ( _stamp ) ; } } | Lookup the entry in the hash table and return it . First tries an optimistic read . |
30,153 | public Entry < K , V > insertWithinLock ( Entry < K , V > e , int _hash , int _keyValue ) { K key = e . getKeyObj ( ) ; int si = _hash & LOCK_MASK ; Entry < K , V > f ; Object ek ; Entry < K , V > [ ] tab = entries ; if ( tab == null ) { throw new CacheClosedException ( cache ) ; } int n = tab . length , _mask = n - 1 , idx = _hash & ( _mask ) ; f = tab [ idx ] ; while ( f != null ) { if ( f . hashCode == _keyValue && ( ( ek = f . getKeyObj ( ) ) == key || ( ek . equals ( key ) ) ) ) { return f ; } f = f . another ; } e . another = tab [ idx ] ; tab [ idx ] = e ; segmentSize [ si ] . incrementAndGet ( ) ; return e ; } | Insert an entry . Checks if an entry already exists . |
30,154 | public boolean remove ( Entry < K , V > e ) { int _hash = modifiedHashCode ( e . hashCode ) ; OptimisticLock [ ] _locks = locks ; int si = _hash & LOCK_MASK ; OptimisticLock l = _locks [ si ] ; long _stamp = l . writeLock ( ) ; try { Entry < K , V > f ; Entry < K , V > [ ] tab = entries ; if ( tab == null ) { throw new CacheClosedException ( cache ) ; } int n = tab . length , _mask = n - 1 , idx = _hash & ( _mask ) ; f = tab [ idx ] ; if ( f == e ) { tab [ idx ] = f . another ; segmentSize [ si ] . decrementAndGet ( ) ; return true ; } while ( f != null ) { Entry < K , V > _another = f . another ; if ( _another == e ) { f . another = _another . another ; segmentSize [ si ] . decrementAndGet ( ) ; return true ; } f = _another ; } } finally { l . unlockWrite ( _stamp ) ; } return false ; } | Remove existing entry from the hash . |
30,155 | private void eventuallyExpand ( int _segmentIndex ) { long [ ] _stamps = lockAll ( ) ; try { long _size = segmentSize [ _segmentIndex ] . get ( ) ; if ( _size <= segmentMaxFill ) { return ; } rehash ( ) ; } finally { unlockAll ( _stamps ) ; } } | Acquire all segment locks and rehash if really needed . |
30,156 | private long [ ] lockAll ( ) { OptimisticLock [ ] _locks = locks ; int sn = _locks . length ; long [ ] _stamps = new long [ locks . length ] ; for ( int i = 0 ; i < sn ; i ++ ) { OptimisticLock l = _locks [ i ] ; _stamps [ i ] = l . writeLock ( ) ; } return _stamps ; } | Acquire all segment locks and return an array with the lock stamps . |
30,157 | private void unlockAll ( long [ ] _stamps ) { OptimisticLock [ ] _locks = locks ; int sn = _locks . length ; for ( int i = 0 ; i < sn ; i ++ ) { _locks [ i ] . unlockWrite ( _stamps [ i ] ) ; } } | Release the all segment locks . |
30,158 | @ SuppressWarnings ( "unchecked" ) void rehash ( ) { Entry < K , V > [ ] src = entries ; if ( src == null ) { throw new CacheClosedException ( cache ) ; } int i , sl = src . length , n = sl * 2 , _mask = n - 1 , idx ; Entry < K , V > [ ] tab = new Entry [ n ] ; long _count = 0 ; Entry _next , e ; for ( i = 0 ; i < sl ; i ++ ) { e = src [ i ] ; while ( e != null ) { _count ++ ; _next = e . another ; idx = modifiedHashCode ( e . hashCode ) & _mask ; e . another = tab [ idx ] ; tab [ idx ] = e ; e = _next ; } } entries = tab ; calcMaxFill ( ) ; } | Double the hash table size and rehash the entries . Assumes total lock . |
30,159 | public < T > T runTotalLocked ( Job < T > j ) { long [ ] _stamps = lockAll ( ) ; try { return j . call ( ) ; } finally { unlockAll ( _stamps ) ; } } | Lock all segments and run the job . |
30,160 | public long calcEntryCount ( ) { long _count = 0 ; for ( Entry e : entries ) { while ( e != null ) { _count ++ ; e = e . another ; } } return _count ; } | Count the entries in the hash table by scanning through the hash table . This is used for integrity checks . |
30,161 | private static JCacheJmxSupport findJCacheJmxSupportInstance ( ) { for ( CacheLifeCycleListener l : CacheManagerImpl . getCacheLifeCycleListeners ( ) ) { if ( l instanceof JCacheJmxSupport ) { return ( JCacheJmxSupport ) l ; } } throw new LinkageError ( "JCacheJmxSupport not loaded" ) ; } | The JMX support is already created via the serviceloader |
30,162 | public Cache resolveCacheWrapper ( org . cache2k . Cache _c2kCache ) { synchronized ( getLockObject ( ) ) { return c2k2jCache . get ( _c2kCache ) ; } } | Return the JCache wrapper for a c2k cache . |
30,163 | protected void removeFromReplacementList ( Entry e ) { if ( e . isHot ( ) ) { hotHits += e . hitCnt ; handHot = Entry . removeFromCyclicList ( handHot , e ) ; hotSize -- ; } else { coldHits += e . hitCnt ; handCold = Entry . removeFromCyclicList ( handCold , e ) ; coldSize -- ; } } | Remove expire or eviction of an entry happens . Remove the entry from the replacement list data structure . |
30,164 | protected Entry findEvictionCandidate ( Entry _previous ) { coldRunCnt ++ ; Entry _hand = handCold ; int _scanCnt = 1 ; if ( _hand == null ) { _hand = refillFromHot ( _hand ) ; } if ( _hand . hitCnt > 0 ) { _hand = refillFromHot ( _hand ) ; do { _scanCnt ++ ; coldHits += _hand . hitCnt ; _hand . hitCnt = 0 ; Entry e = _hand ; _hand = Entry . removeFromCyclicList ( e ) ; coldSize -- ; e . setHot ( true ) ; hotSize ++ ; handHot = Entry . insertIntoTailCyclicList ( handHot , e ) ; } while ( _hand != null && _hand . hitCnt > 0 ) ; } if ( _hand == null ) { _hand = refillFromHot ( _hand ) ; } coldScanCnt += _scanCnt ; handCold = _hand . next ; return _hand ; } | Runs cold hand an in turn hot hand to find eviction candidate . |
30,165 | public Hash2 < Integer , V > createHashTable ( ) { return new Hash2 < Integer , V > ( this ) { protected int modifiedHashCode ( final int hc ) { return IntHeapCache . this . modifiedHash ( hc ) ; } protected boolean keyObjIsEqual ( final Integer key , final Entry e ) { return true ; } } ; } | Modified hash table implementation . Rehash needs to calculate the correct hash code again . |
30,166 | @ SuppressWarnings ( "unchecked" ) public static < V > LoadDetail < V > wrapRefreshedTime ( V value , long refreshedTimeInMillis ) { return new RefreshedTimeWrapper < V > ( value , refreshedTimeInMillis ) ; } | Wraps a loaded value to add the refreshed value . |
30,167 | private void registerExtensions ( ) { Iterator < Cache2kExtensionProvider > it = ServiceLoader . load ( Cache2kExtensionProvider . class , CacheManager . class . getClassLoader ( ) ) . iterator ( ) ; while ( it . hasNext ( ) ) { try { it . next ( ) . registerCache2kExtension ( ) ; } catch ( ServiceConfigurationError ex ) { Log . getLog ( CacheManager . class . getName ( ) ) . debug ( "Error loading cache2k extension" , ex ) ; } } } | ignore load errors so we can remove the serverSide or the xmlConfiguration code and cache2k core still works |
30,168 | void removeManager ( CacheManager cm ) { synchronized ( getLockObject ( ) ) { Map < String , CacheManager > _name2managers = loader2name2manager . get ( cm . getClassLoader ( ) ) ; _name2managers = new HashMap < String , CacheManager > ( _name2managers ) ; Object _removed = _name2managers . remove ( cm . getName ( ) ) ; Map < ClassLoader , Map < String , CacheManager > > _copy = new WeakHashMap < ClassLoader , Map < String , CacheManager > > ( loader2name2manager ) ; _copy . put ( cm . getClassLoader ( ) , _name2managers ) ; loader2name2manager = _copy ; if ( cm . isDefaultManager ( ) ) { Map < ClassLoader , String > _defaultNameCopy = new WeakHashMap < ClassLoader , String > ( loader2defaultName ) ; _defaultNameCopy . remove ( cm . getClassLoader ( ) ) ; loader2defaultName = _defaultNameCopy ; } } } | Called from the manager after a close . Removes the manager from the known managers . |
30,169 | private void loadAllWithAsyncLoader ( final CacheOperationCompletionListener _listener , final Set < K > _keysToLoad ) { final AtomicInteger _countDown = new AtomicInteger ( _keysToLoad . size ( ) ) ; EntryAction . ActionCompletedCallback cb = new EntryAction . ActionCompletedCallback ( ) { public void entryActionCompleted ( final EntryAction ea ) { int v = _countDown . decrementAndGet ( ) ; if ( v == 0 ) { _listener . onCompleted ( ) ; return ; } } } ; for ( K k : _keysToLoad ) { final K key = k ; executeAsync ( key , null , SPEC . GET , cb ) ; } } | Load the keys into the cache via the async path . The key set must always be non empty . The completion listener is called when all keys are loaded . |
30,170 | public Map < K , V > peekAll ( final Iterable < ? extends K > keys ) { Map < K , CacheEntry < K , V > > map = new HashMap < K , CacheEntry < K , V > > ( ) ; for ( K k : keys ) { CacheEntry < K , V > e = execute ( k , SPEC . peekEntry ( k ) ) ; if ( e != null ) { map . put ( k , e ) ; } } return heapCache . convertCacheEntry2ValueMap ( map ) ; } | We need to deal with possible null values and exceptions . This is a simple placeholder implementation that covers it all by working on the entry . |
30,171 | public void onEvictionFromHeap ( final Entry < K , V > e ) { CacheEntry < K , V > _currentEntry = heapCache . returnCacheEntry ( e ) ; if ( syncEntryEvictedListeners != null ) { for ( CacheEntryEvictedListener < K , V > l : syncEntryEvictedListeners ) { l . onEntryEvicted ( this , _currentEntry ) ; } } } | Nothing done here . Will notify the storage about eviction in some future version . |
30,172 | public static long mixTimeSpanAndPointInTime ( long loadTime , long refreshAfter , long pointInTime ) { long _refreshTime = loadTime + refreshAfter ; if ( _refreshTime < 0 ) { _refreshTime = ETERNAL ; } if ( pointInTime == ETERNAL ) { return _refreshTime ; } if ( pointInTime > _refreshTime ) { return _refreshTime ; } long _absPointInTime = Math . abs ( pointInTime ) ; if ( _absPointInTime <= _refreshTime ) { return pointInTime ; } long _pointInTimeMinusDelta = _absPointInTime - refreshAfter ; if ( _pointInTimeMinusDelta < _refreshTime ) { return _pointInTimeMinusDelta ; } return _refreshTime ; } | Combine a refresh time span and an expiry at a specified point in time . |
30,173 | private static Object getLockObject ( Object key ) { int hc = key . hashCode ( ) ; return KEY_LOCKS [ hc & KEY_LOCKS_MASK ] ; } | Simulate locking by key use the hash code to spread and avoid lock contention . The additional locking we introduce here is currently run synchronously inside the entry mutation operation . |
30,174 | public void queue ( final AsyncEvent < K > _event ) { final K key = _event . getKey ( ) ; synchronized ( getLockObject ( key ) ) { Queue < AsyncEvent < K > > q = keyQueue . get ( key ) ; if ( q != null ) { q . add ( _event ) ; return ; } q = new LinkedList < AsyncEvent < K > > ( ) ; keyQueue . put ( key , q ) ; } Runnable r = new Runnable ( ) { public void run ( ) { runMoreOrStop ( _event ) ; } } ; executor . execute ( r ) ; } | Immediately executes an event with the provided executor . If an event is already executing for the identical key queue the event and execute the event with FIFO scheme preserving the order of the arrival . |
30,175 | public void runMoreOrStop ( AsyncEvent < K > _event ) { for ( ; ; ) { try { _event . execute ( ) ; } catch ( Throwable t ) { cache . getLog ( ) . warn ( "Async event exception" , t ) ; } final K key = _event . getKey ( ) ; synchronized ( getLockObject ( key ) ) { Queue < AsyncEvent < K > > q = keyQueue . get ( key ) ; if ( q . isEmpty ( ) ) { keyQueue . remove ( key ) ; return ; } _event = q . remove ( ) ; } } } | Run as long there is still an event for the key . |
30,176 | public Iterable < K > keys ( ) { return new Iterable < K > ( ) { public Iterator < K > iterator ( ) { final Iterator < CacheEntry < K , V > > it = BaseCache . this . iterator ( ) ; return new Iterator < K > ( ) { public boolean hasNext ( ) { return it . hasNext ( ) ; } public K next ( ) { return it . next ( ) . getKey ( ) ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } } ; } | Key iteration on top of normal iterator . |
30,177 | public void setCacheConfig ( final Cache2kConfiguration c ) { valueType = c . getValueType ( ) ; keyType = c . getKeyType ( ) ; if ( name != null ) { throw new IllegalStateException ( "already configured" ) ; } setName ( c . getName ( ) ) ; setFeatureBit ( KEEP_AFTER_EXPIRED , c . isKeepDataAfterExpired ( ) ) ; setFeatureBit ( REJECT_NULL_VALUES , ! c . isPermitNullValues ( ) ) ; setFeatureBit ( BACKGROUND_REFRESH , c . isRefreshAhead ( ) ) ; setFeatureBit ( UPDATE_TIME_NEEDED , c . isRecordRefreshedTime ( ) ) ; setFeatureBit ( RECORD_REFRESH_TIME , c . isRecordRefreshedTime ( ) ) ; metrics = TUNABLE . commonMetricsFactory . create ( new CommonMetricsFactory . Parameters ( ) { public boolean isDisabled ( ) { return c . isDisableStatistics ( ) ; } public boolean isPrecise ( ) { return false ; } } ) ; if ( c . getLoaderExecutor ( ) != null ) { loaderExecutor = createCustomization ( ( CustomizationSupplier < Executor > ) c . getLoaderExecutor ( ) ) ; } else { if ( c . getLoaderThreadCount ( ) > 0 ) { loaderExecutor = provideDefaultLoaderExecutor ( c . getLoaderThreadCount ( ) ) ; } } if ( c . getPrefetchExecutor ( ) != null ) { prefetchExecutor = createCustomization ( ( CustomizationSupplier < Executor > ) c . getPrefetchExecutor ( ) ) ; } } | called from CacheBuilder |
30,178 | public void setName ( String n ) { if ( n == null ) { n = this . getClass ( ) . getSimpleName ( ) + "#" + cacheCnt ++ ; } name = n ; } | Set the name and configure a logging used within cache construction . |
30,179 | protected CacheEntry < K , V > returnEntry ( final ExaminationEntry < K , V > e ) { if ( e == null ) { return null ; } return returnCacheEntry ( e ) ; } | Wrap entry in a separate object instance . We can return the entry directly however we lock on the entry object . |
30,180 | protected final void putValue ( final Entry e , final V _value ) { if ( ! isUpdateTimeNeeded ( ) ) { insertOrUpdateAndCalculateExpiry ( e , _value , 0 , 0 , 0 , INSERT_STAT_PUT ) ; } else { long t = clock . millis ( ) ; insertOrUpdateAndCalculateExpiry ( e , _value , t , t , t , INSERT_STAT_PUT ) ; } } | Update the value directly within entry lock . Since we did not start entry processing we do not need to notify any waiting threads . |
30,181 | protected boolean replace ( final K key , final boolean _compare , final V _oldValue , final V _newValue ) { Entry e = lookupEntry ( key ) ; if ( e == null ) { metrics . peekMiss ( ) ; return false ; } synchronized ( e ) { e . waitForProcessing ( ) ; if ( e . isGone ( ) || ! e . hasFreshData ( clock ) ) { return false ; } if ( _compare && ! e . equalsValue ( _oldValue ) ) { return false ; } putValue ( e , _newValue ) ; } return true ; } | replace if value matches . if value not matches return the existing entry or the dummy entry . |
30,182 | final protected Entry < K , V > peekEntryInternal ( K key ) { int hc = modifiedHash ( key . hashCode ( ) ) ; return peekEntryInternal ( key , hc , extractIntKeyValue ( key , hc ) ) ; } | Return the entry if it is in the cache without invoking the cache source . |
30,183 | public boolean removeIfEquals ( K key , V _value ) { Entry e = lookupEntry ( key ) ; if ( e == null ) { metrics . peekMiss ( ) ; return false ; } synchronized ( e ) { e . waitForProcessing ( ) ; if ( e . isGone ( ) ) { metrics . peekMiss ( ) ; return false ; } boolean f = e . hasFreshData ( clock ) ; if ( f ) { if ( ! e . equalsValue ( _value ) ) { return false ; } } else { metrics . peekHitNotFresh ( ) ; return false ; } removeEntry ( e ) ; return f ; } } | Remove the object from the cache . |
30,184 | protected void loadAndReplace ( K key ) { Entry e ; for ( ; ; ) { e = lookupOrNewEntry ( key ) ; synchronized ( e ) { e . waitForProcessing ( ) ; if ( e . isGone ( ) ) { metrics . goneSpin ( ) ; continue ; } e . startProcessing ( Entry . ProcessingState . LOAD ) ; break ; } } boolean _finished = false ; try { load ( e ) ; _finished = true ; } finally { e . ensureAbort ( _finished ) ; } } | Always fetch the value from the source . That is a copy of getEntryInternal without freshness checks . |
30,185 | private void checkForHashCodeChange ( Entry < K , V > e ) { K key = extractKeyObj ( e ) ; if ( extractIntKeyValue ( key , modifiedHash ( key . hashCode ( ) ) ) != e . hashCode ) { if ( keyMutationCnt == 0 ) { getLog ( ) . warn ( "Key mismatch! Key hashcode changed! keyClass=" + e . getKey ( ) . getClass ( ) . getName ( ) ) ; String s ; try { s = e . getKey ( ) . toString ( ) ; if ( s != null ) { getLog ( ) . warn ( "Key mismatch! key.toString(): " + s ) ; } } catch ( Throwable t ) { getLog ( ) . warn ( "Key mismatch! key.toString() threw exception" , t ) ; } } keyMutationCnt ++ ; } } | Check whether the key was modified during the stay of the entry in the cache . We only need to check this when the entry is removed since we expect that if the key has changed the stored hash code in the cache will not match any more and the item is evicted very fast . |
30,186 | private boolean entryInRefreshProbationAccessed ( final Entry < K , V > e , final long now ) { long nrt = e . getRefreshProbationNextRefreshTime ( ) ; if ( nrt > now ) { reviveRefreshedEntry ( e , nrt ) ; return true ; } return false ; } | Entry was refreshed before reset timer and make entry visible again . |
30,187 | private void resiliencePolicyException ( final Entry < K , V > e , final long t0 , final long t , Throwable _exception ) { ExceptionWrapper < K > _value = new ExceptionWrapper ( extractKeyObj ( e ) , _exception , t0 , e ) ; insert ( e , ( V ) _value , t0 , t , t0 , INSERT_STAT_LOAD , 0 ) ; } | Exception from the resilience policy . We have two exceptions now . One from the loader or the expiry policy one from the resilience policy . We propagate the more severe one from the resilience policy . |
30,188 | private void refreshEntry ( final Entry < K , V > e ) { synchronized ( e ) { e . waitForProcessing ( ) ; if ( e . isGone ( ) ) { return ; } e . startProcessing ( Entry . ProcessingState . REFRESH ) ; } boolean _finished = false ; try { load ( e ) ; _finished = true ; } catch ( CacheClosedException ignore ) { } catch ( Throwable ex ) { logAndCountInternalException ( "Refresh exception" , ex ) ; try { synchronized ( e ) { expireEntry ( e ) ; } } catch ( CacheClosedException ignore ) { } } finally { e . ensureAbort ( _finished ) ; } } | Executed in loader thread . Load the entry again . After the load we copy the entry to the refresh hash and expire it in the main hash . The entry needs to stay in the main hash during the load to block out concurrent reads . |
30,189 | private void expireAndRemoveEventually ( final Entry e ) { if ( isKeepAfterExpired ( ) || e . isProcessing ( ) ) { metrics . expiredKept ( ) ; } else { removeEntry ( e ) ; } } | Remove expired from heap and increment statistics . The entry is not removed when there is processing going on in parallel . |
30,190 | public Map < K , V > getAll ( final Iterable < ? extends K > _inputKeys ) { Map < K , ExaminationEntry < K , V > > map = new HashMap < K , ExaminationEntry < K , V > > ( ) ; for ( K k : _inputKeys ) { Entry < K , V > e = getEntryInternal ( k ) ; if ( e != null ) { map . put ( extractKeyObj ( e ) , ReadOnlyCacheEntry . of ( e ) ) ; } } return convertValueMap ( map ) ; } | JSR107 bulk interface . The behaviour is compatible to the JSR107 TCK . We also need to be compatible to the exception handling policy which says that the exception is to be thrown when most specific . So exceptions are only thrown when the value which has produced an exception is requested from the map . |
30,191 | public final void checkIntegrity ( ) { executeWithGlobalLock ( new Job < Void > ( ) { public Void call ( ) { IntegrityState is = getIntegrityState ( ) ; if ( is . getStateFlags ( ) > 0 ) { throw new Error ( "cache2k integrity error: " + is . getStateDescriptor ( ) + ", " + is . getFailingChecks ( ) + ", " + generateInfoUnderLock ( HeapCache . this , clock . millis ( ) ) . toString ( ) ) ; } return null ; } } ) ; } | Check internal data structures and throw and exception if something is wrong used for unit testing |
30,192 | public boolean add ( final CustomizationSupplier < T > entry ) { if ( list . contains ( entry ) ) { throw new IllegalArgumentException ( "duplicate entry" ) ; } return list . add ( entry ) ; } | Adds a customization to the collection . |
30,193 | public static OptimisticLock newOptimistic ( ) { if ( optimisticLockImplementation == null ) { initializeOptimisticLock ( ) ; } try { return optimisticLockImplementation . newInstance ( ) ; } catch ( Exception ex ) { throw new Error ( ex ) ; } } | Returns a new lock implementation depending on the platform support . |
30,194 | private Entry < K , V > returnEntry ( final Entry < K , V > e ) { touchEntry ( e . getKey ( ) ) ; return e ; } | Entry is accessed update expiry if needed . |
30,195 | private V returnValue ( K key , V _value ) { if ( _value != null ) { Duration d = expiryPolicy . getExpiryForAccess ( ) ; if ( d != null ) { c2kCache . expireAt ( key , calculateExpiry ( d ) ) ; } return _value ; } return null ; } | Entry was accessed update expiry if value is non null . |
30,196 | public Map < K , V > loadAll ( Iterable < ? extends K > keys , Executor executor ) throws Exception { throw new UnsupportedOperationException ( ) ; } | Loads multiple values to the cache . |
30,197 | private void setupTypes ( ) { if ( ! cache2kConfigurationWasProvided ) { cache2kConfiguration . setKeyType ( config . getKeyType ( ) ) ; cache2kConfiguration . setValueType ( config . getValueType ( ) ) ; } else { if ( cache2kConfiguration . getKeyType ( ) == null ) { cache2kConfiguration . setKeyType ( config . getKeyType ( ) ) ; } if ( cache2kConfiguration . getValueType ( ) == null ) { cache2kConfiguration . setValueType ( config . getValueType ( ) ) ; } } keyType = cache2kConfiguration . getKeyType ( ) ; valueType = cache2kConfiguration . getValueType ( ) ; if ( ! config . getKeyType ( ) . equals ( Object . class ) && ! config . getKeyType ( ) . equals ( keyType . getType ( ) ) ) { throw new IllegalArgumentException ( "Key type mismatch between JCache and cache2k configuration" ) ; } if ( ! config . getValueType ( ) . equals ( Object . class ) && ! config . getValueType ( ) . equals ( valueType . getType ( ) ) ) { throw new IllegalArgumentException ( "Value type mismatch between JCache and cache2k configuration" ) ; } } | If there is a cache2k configuration we take the types from there . |
30,198 | private void setupExceptionPropagator ( ) { if ( cache2kConfiguration . getExceptionPropagator ( ) != null ) { return ; } cache2kConfiguration . setExceptionPropagator ( new CustomizationReferenceSupplier < ExceptionPropagator < K > > ( new ExceptionPropagator < K > ( ) { public RuntimeException propagateException ( Object key , final ExceptionInformation exceptionInformation ) { return new CacheLoaderException ( "propagate previous loader exception" , exceptionInformation . getException ( ) ) ; } } ) ) ; } | If an exception propagator is configured take this one otherwise go with default that is providing JCache compatible behavior . |
30,199 | private void setupCacheThrough ( ) { if ( config . getCacheLoaderFactory ( ) != null ) { final CacheLoader < K , V > clf = config . getCacheLoaderFactory ( ) . create ( ) ; cache2kConfiguration . setAdvancedLoader ( new CustomizationReferenceSupplier < AdvancedCacheLoader < K , V > > ( new CloseableLoader ( ) { public void close ( ) throws IOException { if ( clf instanceof Closeable ) { ( ( Closeable ) clf ) . close ( ) ; } } public V load ( final K key , final long currentTime , final CacheEntry < K , V > currentEntry ) { return clf . load ( key ) ; } } ) ) ; } if ( config . getCacheWriterFactory ( ) != null ) { final javax . cache . integration . CacheWriter < ? super K , ? super V > cw = config . getCacheWriterFactory ( ) . create ( ) ; cache2kConfiguration . setWriter ( new CustomizationReferenceSupplier < CacheWriter < K , V > > ( new CloseableWriter ( ) { public void write ( final K key , final V value ) { Cache . Entry < K , V > ce = new Cache . Entry < K , V > ( ) { public K getKey ( ) { return key ; } public V getValue ( ) { return value ; } public < T > T unwrap ( Class < T > clazz ) { throw new UnsupportedOperationException ( "unwrap entry not supported" ) ; } } ; cw . write ( ce ) ; } public void delete ( final Object key ) { cw . delete ( key ) ; } public void close ( ) throws IOException { if ( cw instanceof Closeable ) { ( ( Closeable ) cw ) . close ( ) ; } } } ) ) ; } } | Configure loader and writer . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.