idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
159,300
|
@ Trivial public static String getRequestStringForTrace ( HttpServletRequest request , String [ ] secretStrings ) { if ( request == null || request . getRequestURL ( ) == null ) { return "[]" ; } StringBuffer sb = new StringBuffer ( "[" + stripSecretsFromUrl ( request . getRequestURL ( ) . toString ( ) , secretStrings ) + "]" ) ; String query = request . getQueryString ( ) ; if ( query != null ) { String queryString = stripSecretsFromUrl ( query , secretStrings ) ; if ( queryString != null ) { sb . append ( ", queryString[" + queryString + "]" ) ; } } else { Map < String , String [ ] > pMap = request . getParameterMap ( ) ; String paramString = stripSecretsFromParameters ( pMap , secretStrings ) ; if ( paramString != null ) { sb . append ( ", parameters[" + paramString + "]" ) ; } } return sb . toString ( ) ; }
|
information and returns a string for tracing
| 228
| 7
|
159,301
|
public void deleteAbstractAliasDestinationHandler ( AbstractAliasDestinationHandler abstractAliasDestinationHandler ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "deleteAbstractAliasDestinationHandler" ) ; //The destination is an alias or a foreign destination, so its not persisted //It is removed immediately from the appropriate index if ( abstractAliasDestinationHandler instanceof BusHandler ) { _foreignBusIndex . remove ( abstractAliasDestinationHandler ) ; } else { _destinationIndex . remove ( abstractAliasDestinationHandler ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "deleteAbstractAliasDestinationHandler" ) ; }
|
Delete the abstract alias destination handler
| 169
| 6
|
159,302
|
public int add ( Object dependency , ValueSet valueSet , Object entry ) { int returnCode = HTODDynacache . NO_EXCEPTION ; dependencyNotUpdatedTable . remove ( dependency ) ; valueSet . add ( entry ) ; if ( valueSet . size ( ) > this . delayOffloadEntriesLimit ) { if ( this . type == DEP_ID_TABLE ) { returnCode = this . htod . writeValueSet ( HTODDynacache . DEP_ID_DATA , dependency , valueSet , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . depIdsOffloadedToDisk ( dependency ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "***** add dependency id=" + dependency + " size=" + valueSet . size ( ) ) ; } else { returnCode = this . htod . writeValueSet ( HTODDynacache . TEMPLATE_ID_DATA , dependency , valueSet , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . templatesOffloadedToDisk ( dependency ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "***** add dependency id=" + dependency + " size=" + valueSet . size ( ) ) ; } dependencyToEntryTable . remove ( dependency ) ; if ( returnCode == HTODDynacache . DISK_SIZE_OVER_LIMIT_EXCEPTION && valueSet . size ( ) > 0 ) { this . htod . delCacheEntry ( valueSet , CachePerf . DISK_OVERFLOW , CachePerf . LOCAL , ! Cache . FROM_DEPID_TEMPLATE_INVALIDATION , HTODInvalidationBuffer . FIRE_EVENT ) ; returnCode = HTODDynacache . NO_EXCEPTION ; } } return returnCode ; }
|
This adds a entry to the ValueSet for the specified dependency . The dependency is found in the dependencyToEntryTable .
| 449
| 24
|
159,303
|
public int add ( Object dependency , ValueSet valueSet ) { int returnCode = HTODDynacache . NO_EXCEPTION ; if ( dependencyToEntryTable . size ( ) >= this . maxSize ) { returnCode = reduceTableSize ( ) ; } dependencyNotUpdatedTable . put ( dependency , valueSet ) ; dependencyToEntryTable . put ( dependency , valueSet ) ; return returnCode ; }
|
This adds a new dependency with its valueSet to the DependencyToEntryTable .
| 88
| 17
|
159,304
|
public int replace ( Object dependency , ValueSet valueSet ) { int returnCode = HTODDynacache . NO_EXCEPTION ; dependencyNotUpdatedTable . remove ( dependency ) ; if ( valueSet != null && valueSet . size ( ) > this . delayOffloadEntriesLimit ) { dependencyToEntryTable . remove ( dependency ) ; if ( this . type == DEP_ID_TABLE ) { returnCode = this . htod . writeValueSet ( HTODDynacache . DEP_ID_DATA , dependency , valueSet , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . depIdsOffloadedToDisk ( dependency ) ; //System.out.println("***** replace dependency id=" + dependency + " size=" + valueSet.size()); } else { returnCode = this . htod . writeValueSet ( HTODDynacache . TEMPLATE_ID_DATA , dependency , valueSet , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . templatesOffloadedToDisk ( dependency ) ; //System.out.println("***** replace template id=" + dependency + " size=" + valueSet.size()); } } else { if ( valueSet . size ( ) > 0 ) { dependencyToEntryTable . put ( dependency , valueSet ) ; } else { dependencyToEntryTable . remove ( dependency ) ; } } if ( returnCode == HTODDynacache . DISK_SIZE_OVER_LIMIT_EXCEPTION && valueSet . size ( ) > 0 ) { this . htod . delCacheEntry ( valueSet , CachePerf . DISK_OVERFLOW , CachePerf . LOCAL , ! Cache . FROM_DEPID_TEMPLATE_INVALIDATION , HTODInvalidationBuffer . FIRE_EVENT ) ; returnCode = HTODDynacache . NO_EXCEPTION ; } return returnCode ; }
|
This replaces the existing dependency with new valueSet in DependencyToEntryTable .
| 459
| 16
|
159,305
|
public Result removeEntry ( Object dependency , Object entry ) { Result result = this . htod . getFromResultPool ( ) ; ValueSet valueSet = ( ValueSet ) dependencyToEntryTable . get ( dependency ) ; if ( valueSet == null ) { return result ; } result . bExist = HTODDynacache . EXIST ; valueSet . remove ( entry ) ; dependencyNotUpdatedTable . remove ( dependency ) ; if ( valueSet . isEmpty ( ) ) { dependencyToEntryTable . remove ( dependency ) ; if ( this . type == DEP_ID_TABLE ) { result . returnCode = this . htod . delValueSet ( HTODDynacache . DEP_ID_DATA , dependency ) ; } else { result . returnCode = this . htod . delValueSet ( HTODDynacache . TEMPLATE_ID_DATA , dependency ) ; } } return result ; }
|
This removes the specified entry from the specified dependency .
| 203
| 10
|
159,306
|
public ValueSet getEntries ( Object dependency ) { ValueSet valueSet = ( ValueSet ) dependencyToEntryTable . get ( dependency ) ; return valueSet ; }
|
This returns the ValueSet for the specified dependency from the DependencyToEntryTable .
| 35
| 17
|
159,307
|
private int reduceTableSize ( ) { int returnCode = HTODDynacache . NO_EXCEPTION ; int count = this . entryRemove ; if ( count > 0 ) { int removeSize = 5 ; while ( count > 0 ) { int minSize = Integer . MAX_VALUE ; Iterator < Map . Entry < Object , Set < Object > > > e = dependencyToEntryTable . entrySet ( ) . iterator ( ) ; while ( e . hasNext ( ) ) { Map . Entry entry = ( Map . Entry ) e . next ( ) ; Object id = entry . getKey ( ) ; ValueSet vs = ( ValueSet ) entry . getValue ( ) ; int vsSize = vs . size ( ) ; if ( vsSize < removeSize ) { if ( this . type == DEP_ID_TABLE ) { returnCode = this . htod . writeValueSet ( HTODDynacache . DEP_ID_DATA , id , vs , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . depIdsOffloadedToDisk ( id ) ; Tr . debug ( tc , " reduceTableSize dependency id=" + id + " vs=" + vs . size ( ) + " returnCode=" + returnCode ) ; } else { returnCode = this . htod . writeValueSet ( HTODDynacache . TEMPLATE_ID_DATA , id , vs , HTODDynacache . ALL ) ; // valueSet may be empty after writeValueSet this . htod . cache . getCacheStatisticsListener ( ) . templatesOffloadedToDisk ( id ) ; Tr . debug ( tc , "reduceTableSize template id=" + id + " vs=" + vs . size ( ) + " returnCode=" + returnCode ) ; } dependencyToEntryTable . remove ( id ) ; dependencyNotUpdatedTable . remove ( id ) ; count -- ; if ( returnCode == HTODDynacache . DISK_EXCEPTION ) { return returnCode ; } else if ( returnCode == HTODDynacache . DISK_SIZE_OVER_LIMIT_EXCEPTION ) { this . htod . delCacheEntry ( vs , CachePerf . DISK_OVERFLOW , CachePerf . LOCAL , ! Cache . FROM_DEPID_TEMPLATE_INVALIDATION , HTODInvalidationBuffer . FIRE_EVENT ) ; returnCode = HTODDynacache . NO_EXCEPTION ; return returnCode ; } } else { minSize = vsSize < minSize ? vsSize : minSize ; } if ( count == 0 ) { break ; } } removeSize = minSize ; removeSize += 3 ; } } return returnCode ; }
|
This reduces the DependencyToEntryTable size by offloading some dependencies to the disk .
| 613
| 18
|
159,308
|
private static String getClassName ( String implClassName ) { implClassName = implClassName . substring ( 0 , implClassName . length ( ) - 4 ) ; return implClassName + "ComponentImpl" ; }
|
The model interface type has a name ending in Type . For the moment modify it here .
| 48
| 18
|
159,309
|
public void registerThread ( StoppableThread thread ) { if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "registerThread" , thread ) ; synchronized ( this ) { _threadCache . add ( thread ) ; } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "registerThread" ) ; }
|
Registers a new thread for stopping
| 77
| 7
|
159,310
|
public void deregisterThread ( StoppableThread thread ) { if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "deregisterThread" , thread ) ; synchronized ( this ) { _threadCache . remove ( thread ) ; } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "deregisterThread" ) ; }
|
Deregisters a thread for stopping
| 84
| 8
|
159,311
|
public void stopAllThreads ( ) { if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "stopAllThreads" ) ; synchronized ( this ) { Iterator iterator = ( ( ArrayList ) _threadCache . clone ( ) ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { StoppableThread thread = ( StoppableThread ) iterator . next ( ) ; if ( tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Attempting to stop thread " + thread ) ; // Stop the thread thread . stopThread ( this ) ; // Remove from the iterator iterator . remove ( ) ; // Remove from the cache _threadCache . remove ( thread ) ; } } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "stopAllThreads" ) ; }
|
Stops all the stoppable threads that haven t already been stopped
| 183
| 13
|
159,312
|
public ArrayList getThreads ( ) { if ( tc . isEntryEnabled ( ) ) { SibTr . entry ( tc , "getThreads" ) ; SibTr . exit ( tc , "getThreads" , _threadCache ) ; } return _threadCache ; }
|
Unit test hook to check on connected threads
| 61
| 8
|
159,313
|
private int getAndUpdateTail ( ) { int retMe ; do { retMe = tailIndex . get ( ) ; } while ( tailIndex . compareAndSet ( retMe , getNext ( retMe ) ) == false ) ; return retMe ; }
|
Atomically update and return the tailIndex .
| 55
| 10
|
159,314
|
public boolean put ( ExpirableReference expirable ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "put" , "ObjId=" + expirable . getID ( ) + " ET=" + expirable . getExpiryTime ( ) ) ; boolean reply = tree . insert ( expirable ) ; if ( reply ) { size ++ ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "put" , "reply=" + reply ) ; return reply ; }
|
Add an ExpirableReference to the expiry index .
| 142
| 12
|
159,315
|
protected static Map < String , ProductInfo > getAllProductInfo ( File wlpInstallationDirectory ) throws VersionParsingException { File versionPropertyDirectory = new File ( wlpInstallationDirectory , ProductInfo . VERSION_PROPERTY_DIRECTORY ) ; if ( ! versionPropertyDirectory . exists ( ) ) { throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_NO_PROPERTIES_DIRECTORY" , versionPropertyDirectory . getAbsoluteFile ( ) ) ) ; } if ( ! versionPropertyDirectory . isDirectory ( ) ) { throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_NOT_PROPERTIES_DIRECTORY" , versionPropertyDirectory . getAbsoluteFile ( ) ) ) ; } if ( ! versionPropertyDirectory . canRead ( ) ) { throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_UNABLE_READ_PROPERTIES_DIRECTORY" , versionPropertyDirectory . getAbsoluteFile ( ) ) ) ; } Map < String , ProductInfo > productIdToVersionPropertiesMap ; try { productIdToVersionPropertiesMap = ProductInfo . getAllProductInfo ( wlpInstallationDirectory ) ; } catch ( IllegalArgumentException e ) { throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_UNABLE_READ_PROPERTIES_DIRECTORY" , versionPropertyDirectory . getAbsoluteFile ( ) ) ) ; } catch ( ProductInfoParseException e ) { String missingKey = e . getMissingKey ( ) ; if ( missingKey != null ) { throw new VersionParsingException ( CommandUtils . getMessage ( "version.missing.key" , missingKey , e . getFile ( ) . getAbsoluteFile ( ) ) ) ; } throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_UNABLE_READ_FILE" , e . getFile ( ) . getAbsoluteFile ( ) , e . getCause ( ) . getMessage ( ) ) ) ; } catch ( DuplicateProductInfoException e ) { throw new VersionParsingException ( CommandUtils . getMessage ( "version.duplicated.productId" , ProductInfo . COM_IBM_WEBSPHERE_PRODUCTID_KEY , e . getProductInfo1 ( ) . getFile ( ) . getAbsoluteFile ( ) , e . getProductInfo2 ( ) . getFile ( ) . getAbsoluteFile ( ) ) ) ; } catch ( ProductInfoReplaceException e ) { ProductInfo productInfo = e . getProductInfo ( ) ; String replacesId = productInfo . getReplacesId ( ) ; if ( replacesId . equals ( productInfo . getId ( ) ) ) { throw new VersionParsingException ( CommandUtils . getMessage ( "version.replaced.product.can.not.itself" , productInfo . getFile ( ) . getAbsoluteFile ( ) ) ) ; } throw new VersionParsingException ( CommandUtils . getMessage ( "version.replaced.product.not.exist" , replacesId , productInfo . getFile ( ) . getAbsoluteFile ( ) ) ) ; } if ( productIdToVersionPropertiesMap . isEmpty ( ) ) { throw new VersionParsingException ( CommandUtils . getMessage ( "ERROR_NO_PROPERTIES_FILE" , versionPropertyDirectory . getAbsoluteFile ( ) ) ) ; } return productIdToVersionPropertiesMap ; }
|
This method will create a map of product ID to the VersionProperties for that product .
| 776
| 18
|
159,316
|
public boolean remove ( V value ) { // peek at what is in the map K key = value . getKey ( ) ; Ref < V > ref = map . get ( key ) ; // only try to remove the mapping if it matches the provided class loader return ( ref != null && ref . get ( ) == value ) ? map . remove ( key , ref ) : false ; }
|
Remove any mapping for the provided id
| 80
| 7
|
159,317
|
public V retrieveOrCreate ( K key , Factory < V > factory ) { // Clean up stale entries on every put. // This should avoid a slow memory leak of reference objects. this . cleanUpStaleEntries ( ) ; return retrieveOrCreate ( key , factory , new FutureRef < V > ( ) ) ; }
|
Create a value for the given key iff one has not already been stored . This method is safe to be called concurrently from multiple threads . It will ensure that only one thread succeeds to create the value for the given key .
| 68
| 45
|
159,318
|
void cleanUpStaleEntries ( ) { for ( KeyedRef < K , V > ref = q . poll ( ) ; ref != null ; ref = q . poll ( ) ) { map . remove ( ref . getKey ( ) , ref ) ; // CONCURRENT remove() operation } }
|
clean up stale entries
| 63
| 4
|
159,319
|
public Object nextElement ( ) { if ( _array == null ) { return null ; } else { synchronized ( this ) { if ( _index < _array . length ) { Object obj = _array [ _index ] ; _index ++ ; return obj ; } else { return null ; } } } }
|
nextElement method comment .
| 64
| 5
|
159,320
|
public int execute ( String [ ] args ) { Map < String , LevelDetails > levels = readLevels ( System . getProperty ( "logviewer.custom.levels" ) ) ; String [ ] header = readHeader ( System . getProperty ( "logviewer.custom.header" ) ) ; return execute ( args , levels , header ) ; }
|
Runs LogViewer using values in System Properties to find custom levels and header .
| 76
| 17
|
159,321
|
public int execute ( String [ ] args , Map < String , LevelDetails > levels , String [ ] header ) { levelString = getLevelsString ( levels ) ; RepositoryReaderImpl logRepository ; try { // Parse the command line arguments and validate arguments if ( parseCmdLineArgs ( args ) || validateSettings ( ) ) { return 0 ; } // Setup custom header here since parseCmdLineArgs may alter the formatter. if ( header != null ) { theFormatter . setCustomHeader ( header ) ; } // Call HPEL repository API to get log entries logRepository = new RepositoryReaderImpl ( binaryRepositoryDir ) ; if ( mainInstanceId != null ) { // Verify requested instance ID. ServerInstanceLogRecordList silrl = logRepository . getLogListForServerInstance ( mainInstanceId ) ; if ( silrl == null || silrl . getStartTime ( ) == null || ! silrl . getStartTime ( ) . equals ( mainInstanceId ) || ( subInstanceId != null && ! subInstanceId . isEmpty ( ) && ! silrl . getChildren ( ) . containsKey ( subInstanceId ) ) ) { throw new IllegalArgumentException ( getLocalizedString ( "LVM_ERROR_INSTANCEID" ) ) ; } } // Create the output stream (either an output file or Console) PrintStream outps = createOutputStream ( ) ; /* * Create a filter object with our search criteria, passing null for startDate and stopDate as we will * be using the API to search by date for efficiency. */ LogViewerFilter searchCriteria = new LogViewerFilter ( startDate , stopDate , minLevel , maxLevel , includeLoggers , excludeLoggers , hexThreadID , message , excludeMessages , extensions ) ; //Determine if we just display instances or start displaying records based on the -listInstances option if ( listInstances ) { Iterable < ServerInstanceLogRecordList > results = logRepository . getLogLists ( ) ; displayInstances ( outps , results ) ; } else { Properties initialProps = logRepository . getLogListForCurrentServerInstance ( ) . getHeader ( ) ; if ( initialProps != null ) { boolean isZOS = "Y" . equalsIgnoreCase ( initialProps . getProperty ( ServerInstanceLogRecordList . HEADER_ISZOS ) ) ; //instanceId is required for z/OS. If it was not provided, then list the possible instances if ( isZOS && ! latestInstance && mainInstanceId == null ) { Iterable < ServerInstanceLogRecordList > results = logRepository . getLogLists ( ) ; displayInstances ( outps , results ) ; } else { displayRecords ( outps , searchCriteria , logRepository , mainInstanceId , subInstanceId ) ; } } } } catch ( IllegalArgumentException e ) { System . err . println ( e . getMessage ( ) ) ; return 1 ; } return 0 ; }
|
Runs LogViewer .
| 643
| 6
|
159,322
|
private long collectAllKids ( ArrayList < DisplayInstance > result , ServerInstanceLogRecordList list ) { long timestamp = - 1 ; RepositoryLogRecord first = list . iterator ( ) . next ( ) ; if ( first != null ) { timestamp = first . getMillis ( ) ; } for ( Entry < String , ServerInstanceLogRecordList > kid : list . getChildren ( ) . entrySet ( ) ) { ArrayList < DisplayInstance > kidResult = new ArrayList < DisplayInstance > ( ) ; long curTimestamp = collectAllKids ( kidResult , kid . getValue ( ) ) ; // Add this kid only if there's a record among its descendants if ( curTimestamp > 0 ) { result . add ( new DisplayInstance ( kid . getKey ( ) , Long . toString ( list . getStartTime ( ) . getTime ( ) ) , curTimestamp , kidResult ) ) ; if ( timestamp < curTimestamp ) { timestamp = curTimestamp ; } } } return timestamp ; }
|
collects all descendant instances into an array and calculates largest timestamp of their first records .
| 215
| 17
|
159,323
|
private Level createLevelByString ( String levelString ) throws IllegalArgumentException { try { return Level . parse ( levelString . toUpperCase ( ) ) ; //return WsLevel.parse(levelString.toUpperCase()); } catch ( Exception npe ) { throw new IllegalArgumentException ( getLocalizedParmString ( "CWTRA0013E" , new Object [ ] { levelString } ) ) ; } }
|
This method creates a java . util . Level object from a string with the level name .
| 95
| 18
|
159,324
|
void setInstanceId ( String instanceId ) throws IllegalArgumentException { if ( instanceId != null && ! "" . equals ( instanceId ) ) { subInstanceId = getSubProcessInstanceId ( instanceId ) ; try { long id = getProcessInstanceId ( instanceId ) ; mainInstanceId = id < 0 ? null : new Date ( id ) ; } catch ( NumberFormatException nfe ) { throw new IllegalArgumentException ( getLocalizedString ( "LVM_ERROR_INSTANCEID" ) , nfe ) ; } } }
|
Parses the instanceId into the requested main process instanceId and the subprocess instanceid . The main process instanceId must be a long value as the main instance Id is a timestamp .
| 117
| 39
|
159,325
|
protected File [ ] listRepositoryChoices ( ) { // check current location String currentDir = System . getProperty ( "log.repository.root" ) ; if ( currentDir == null ) currentDir = System . getProperty ( "user.dir" ) ; File logDir = new File ( currentDir ) ; if ( logDir . isDirectory ( ) ) { File [ ] result = RepositoryReaderImpl . listRepositories ( logDir ) ; if ( result . length == 0 && ( RepositoryReaderImpl . containsLogFiles ( logDir ) || tailInterval > 0 ) ) { return new File [ ] { logDir } ; } else { return result ; } } else { return new File [ ] { } ; } }
|
Lists directories containing HPEL repositories . It is called when repository is not specified explicitly in the arguments
| 158
| 20
|
159,326
|
private int skipPast ( byte [ ] data , int pos , byte target ) { int index = pos ; while ( index < data . length ) { if ( target == data [ index ++ ] ) { return index ; } } return index ; }
|
Skip until it runs out of input data or finds the target byte .
| 51
| 14
|
159,327
|
private int parseTrailer ( byte [ ] input , int inOffset , List < WsByteBuffer > list ) throws DataFormatException { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parsing trailer, offset=" + this . parseOffset + " val=" + this . parseInt ) ; } int offset = inOffset ; long val = 0L ; // bytes are in lowest order first while ( 8 > this . parseOffset && offset < input . length ) { switch ( this . parseOffset ) { // even bytes are just going to save the first byte of an int case 0 : case 2 : case 4 : case 6 : this . parseFirstByte = input [ offset ] & 0xff ; break ; // bytes 1 and 5 are the 2nd byte of that int case 1 : case 5 : this . parseInt = ( ( input [ offset ] & 0xff ) << 8 ) | this . parseFirstByte ; break ; // 3 and 7 mark the final bytes of the 2 int values case 3 : // reached the end of the checksum int val = ( ( input [ offset ] & 0xff ) << 8 ) | this . parseFirstByte ; val = ( val << 16 ) | this . parseInt ; if ( this . checksum . getValue ( ) != val ) { String msg = "Checksum does not match; crc=" + this . checksum . getValue ( ) + " trailer=" + val ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , msg ) ; } release ( list ) ; throw new DataFormatException ( msg ) ; } break ; case 7 : // reached the end of the "num bytes" int val = ( ( input [ offset ] & 0xff ) << 8 ) | this . parseFirstByte ; val = ( val << 16 ) | this . parseInt ; if ( this . inflater . getBytesWritten ( ) != val ) { String msg = "BytesWritten does not match; inflater=" + this . inflater . getBytesWritten ( ) + " trailer=" + val ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , msg ) ; } release ( list ) ; throw new DataFormatException ( msg ) ; } // having fully parsed the trailer, if we are going to re-enter decompression, then we need to reset this . resetNeededToProceed = true ; break ; default : break ; } offset ++ ; this . parseOffset ++ ; } return offset ; }
|
Parse past the GZIP trailer information . This is the two ints for the CRC32 checksum validation .
| 566
| 24
|
159,328
|
public void logClosedException ( Exception e ) { // Note: this may be a normal occurance so don't log error, just debug. if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Connection closed with exception: " + e . getMessage ( ) ) ; } }
|
This logs the error if the connection was closed by a higher level channel with an error .
| 76
| 18
|
159,329
|
void outOfScope ( ) { final String methodName = "outOfScope" ; if ( TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , methodName ) ; } _outOfScope = true ; try { // Close cloned connection so that it, and any resource created // from it, also throw SIObjectClosedException _connectionClone . close ( ) ; } catch ( final SIException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.inbound.impl.SibRaAbstractConsumerSession.outOfScope" , FFDC_PROBE_1 , this ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } // Swallow exception } catch ( final SIErrorException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.inbound.impl.SibRaAbstractConsumerSession.outOfScope" , FFDC_PROBE_2 , this ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } // Swallow exception } if ( TRACE . isEntryEnabled ( ) ) { SibTr . exit ( this , TRACE , methodName ) ; } }
|
Called to indicate that this session is now out of scope .
| 301
| 13
|
159,330
|
public void createAppToSecurityRolesMapping ( String appName , Collection < SecurityRole > securityRoles ) { //only add it if we don't have a cached copy appToSecurityRolesMap . putIfAbsent ( appName , securityRoles ) ; }
|
Creates the application to security roles mapping for a given application .
| 58
| 13
|
159,331
|
public void removeRoleToRunAsMapping ( String appName ) { Map < String , RunAs > roleToRunAsMap = roleToRunAsMappingPerApp . get ( appName ) ; if ( roleToRunAsMap != null ) { roleToRunAsMap . clear ( ) ; } appToSecurityRolesMap . remove ( appName ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Updated the appToSecurityRolesMap: " + appToSecurityRolesMap . toString ( ) ) ; } removeRoleToWarningMapping ( appName ) ; }
|
Removes the role to RunAs mappings for a given application .
| 144
| 14
|
159,332
|
public void removeRoleToWarningMapping ( String appName ) { Map < String , Boolean > roleToWarningMap = roleToWarningMappingPerApp . get ( appName ) ; if ( roleToWarningMap != null ) { roleToWarningMap . clear ( ) ; } roleToWarningMappingPerApp . remove ( appName ) ; }
|
Removes the role to warning mappings for a given application .
| 74
| 13
|
159,333
|
private static void determineHandlers ( ) { /* * find the handlers that we are dispatching to */ if ( textHandler != null && ! logRepositoryConfiguration . isTextEnabled ( ) ) textHandler = null ; // Don't do this work if only text and text is not enabled (if so, it will always be null) 666241.1 if ( binaryHandler != null && ! logRepositoryConfiguration . isTextEnabled ( ) ) return ; Handler [ ] handlers = Logger . getLogger ( "" ) . getHandlers ( ) ; for ( Handler handler : handlers ) { String name = handler . getClass ( ) . getName ( ) ; if ( BINLOGGER_HANDLER_NAME . equals ( name ) ) { binaryHandler = ( LogRecordHandler ) handler ; } else if ( TEXTLOGGER_HANDLER_NAME . equals ( name ) ) { textHandler = ( LogRecordTextHandler ) handler ; } } }
|
determine two handlers needed by the repository
| 201
| 9
|
159,334
|
public void close ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "close" ) ; Enumeration streams = null ; synchronized ( this ) { synchronized ( streamTable ) { closed = true ; closeBrowserSessionsInternal ( ) ; streams = streamTable . elements ( ) ; } } // since already set closed to true, no more AOStreams will be created and added to the streamTable, // even if there is an // asynchronous stream creation in progress (the itemStream will be created, but no AOStream will // be added to the streamTable). while ( streams . hasMoreElements ( ) ) { StreamInfo sinfo = ( StreamInfo ) streams . nextElement ( ) ; if ( sinfo . stream != null ) { sinfo . stream . close ( ) ; } } synchronized ( streamTable ) { streamTable . clear ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "close" ) ; }
|
Cleans up the non - persistent state . No methods on the ControlHandler interface should be called after this is called .
| 237
| 24
|
159,335
|
public boolean cleanup ( boolean flushStreams , boolean redriveDeletionThread ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "cleanup" , new Object [ ] { Boolean . valueOf ( flushStreams ) , Boolean . valueOf ( redriveDeletionThread ) } ) ; boolean retvalue = false ; // first check if already finishedCloseAndFlush, since this call can be redriven synchronized ( streamTable ) { if ( finishedCloseAndFlush ) { retvalue = true ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "cleanup" , Boolean . valueOf ( retvalue ) ) ; return retvalue ; } } if ( ! flushStreams ) { // Simply cleanup the non-persistent state. The persistent state will get cleaned up when // the caller deletes everything from the AOContainerItemStream close ( ) ; retvalue = true ; } else { // have to flush all the streams closeAndFlush ( redriveDeletionThread ) ; synchronized ( streamTable ) { retvalue = finishedCloseAndFlush ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "cleanup" , Boolean . valueOf ( retvalue ) ) ; return retvalue ; }
|
Cleanup the state in this AnycastOutputHandler . Called when this localisation is being deleted .
| 314
| 20
|
159,336
|
public final AOStream getAOStream ( String streamKey , SIBUuid12 streamId ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getAOStream" , new Object [ ] { streamKey , streamId } ) ; StreamInfo streamInfo = getStreamInfo ( streamKey , streamId ) ; if ( streamInfo != null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getAOStream" , streamInfo . stream ) ; return streamInfo . stream ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getAOStream" , null ) ; return null ; }
|
for unit testing
| 188
| 3
|
159,337
|
private final void handleControlBrowseStatus ( SIBUuid8 remoteME , SIBUuid12 gatheringTargetDestUuid , long browseId , int status ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "handleControlBrowseStatus" , new Object [ ] { remoteME , gatheringTargetDestUuid , Long . valueOf ( browseId ) , Integer . valueOf ( status ) } ) ; // first we see if there is an existing AOBrowseSession AOBrowserSessionKey key = new AOBrowserSessionKey ( remoteME , gatheringTargetDestUuid , browseId ) ; AOBrowserSession session = ( AOBrowserSession ) browserSessionTable . get ( key ) ; if ( session != null ) { if ( status == SIMPConstants . BROWSE_CLOSE ) { session . close ( ) ; browserSessionTable . remove ( key ) ; } else if ( status == SIMPConstants . BROWSE_ALIVE ) { session . keepAlive ( ) ; } } else { // session == null. ignore the status message } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "handleControlBrowseStatus" ) ; }
|
Method to handle a ControlBrowseStatus message from an RME
| 290
| 13
|
159,338
|
public final void removeBrowserSession ( AOBrowserSessionKey key ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "removeBrowserSession" , key ) ; browserSessionTable . remove ( key ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "removeBrowserSession" ) ; }
|
Remove an AOBrowserSession that is already closed
| 100
| 11
|
159,339
|
private final StreamInfo getStreamInfo ( String streamKey , SIBUuid12 streamId ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getStreamInfo" , new Object [ ] { streamKey , streamId } ) ; StreamInfo sinfo = streamTable . get ( streamKey ) ; if ( ( sinfo != null ) && sinfo . streamId . equals ( streamId ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getStreamInfo" , sinfo ) ; return sinfo ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getStreamInfo" , null ) ; return null ; }
|
Helper method used to dispatch a message received for a particular stream . Handles its own synchronization
| 191
| 18
|
159,340
|
public final void streamIsFlushed ( AOStream stream ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "streamIsFlushed" , stream ) ; // we schedule an asynchronous removal of the persistent data synchronized ( streamTable ) { String key = SIMPUtils . getRemoteGetKey ( stream . getRemoteMEUuid ( ) , stream . getGatheringTargetDestUuid ( ) ) ; StreamInfo sinfo = streamTable . get ( key ) ; if ( ( sinfo != null ) && sinfo . streamId . equals ( stream . streamId ) ) { RemovePersistentStream update = null ; synchronized ( sinfo ) { // synchronized since reading sinfo.item update = new RemovePersistentStream ( key , sinfo . streamId , sinfo . itemStream , sinfo . item ) ; } doEnqueueWork ( update ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "streamIsFlushed" ) ; }
|
Callback from a stream that it has been flushed
| 241
| 9
|
159,341
|
public final AOValue persistLockAndTick ( TransactionCommon t , AOStream stream , long tick , SIMPMessage msg , int storagePolicy , long waitTime , long prevTick ) throws Exception { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "persistLockAndTick" , new Object [ ] { t , stream , Long . valueOf ( tick ) , msg , Integer . valueOf ( storagePolicy ) , Long . valueOf ( waitTime ) , Long . valueOf ( prevTick ) } ) ; AOValue retvalue = null ; try { Transaction msTran = mp . resolveAndEnlistMsgStoreTransaction ( t ) ; msg . persistLock ( msTran ) ; long plockId = msg . getLockID ( ) ; retvalue = new AOValue ( tick , msg , msg . getID ( ) , storagePolicy , plockId , waitTime , prevTick ) ; stream . itemStream . addItem ( retvalue , msTran ) ; } catch ( Exception e ) { // No FFDC code needed retvalue = null ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "persistLockAndTick" , e ) ; throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "persistLockAndTick" , retvalue ) ; return retvalue ; }
|
Helper method called by the AOStream when to persistently lock a message and create a persistent tick in the protocol stream
| 342
| 24
|
159,342
|
public final void cleanupTicks ( StreamInfo sinfo , TransactionCommon t , ArrayList valueTicks ) throws MessageStoreException , SIResourceException { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "cleanupTicks" , new Object [ ] { sinfo , t , valueTicks } ) ; try { int length = valueTicks . size ( ) ; for ( int i = 0 ; i < length ; i ++ ) { AOValue storedTick = ( AOValue ) valueTicks . get ( i ) ; // If we are here then we do not know which consumerDispatcher originally // persistently locked the message. We therefore have to use the meUuid in // the AOValue to find/reconstitute the consumerDispatcher associated with it. This // potentially involves creating AIHs which is not ideal. ConsumerDispatcher cd = null ; if ( storedTick . getSourceMEUuid ( ) == null || storedTick . getSourceMEUuid ( ) . equals ( getMessageProcessor ( ) . getMessagingEngineUuid ( ) ) ) { cd = ( ConsumerDispatcher ) destinationHandler . getLocalPtoPConsumerManager ( ) ; } else { AnycastInputHandler aih = destinationHandler . getAnycastInputHandler ( storedTick . getSourceMEUuid ( ) , null , true ) ; cd = aih . getRCD ( ) ; } SIMPMessage msg = null ; synchronized ( storedTick ) { msg = ( SIMPMessage ) cd . getMessageByValue ( storedTick ) ; if ( msg == null ) { storedTick . setToBeFlushed ( ) ; } } Transaction msTran = mp . resolveAndEnlistMsgStoreTransaction ( t ) ; if ( msg != null && msg . getLockID ( ) == storedTick . getPLockId ( ) ) msg . unlockMsg ( storedTick . getPLockId ( ) , msTran , true ) ; storedTick . lockItemIfAvailable ( controlItemLockID ) ; // should always be successful storedTick . remove ( msTran , controlItemLockID ) ; } } catch ( MessageStoreException e ) { // No FFDC code needed if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "cleanupTicks" , e ) ; throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "cleanupTicks" ) ; }
|
Helper method called by the AOStream when a persistent tick representing a persistently locked message should be removed since we are flushing or cleaning up state .
| 579
| 31
|
159,343
|
public final Item writeStartedFlush ( TransactionCommon t , AOStream stream ) throws Exception { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "writeStartedFlush" ) ; String key = SIMPUtils . getRemoteGetKey ( stream . getRemoteMEUuid ( ) , stream . getGatheringTargetDestUuid ( ) ) ; StreamInfo sinfo = streamTable . get ( key ) ; if ( ( sinfo != null ) && sinfo . streamId . equals ( stream . streamId ) ) { AOStartedFlushItem item = new AOStartedFlushItem ( key , stream . streamId ) ; Transaction msTran = mp . resolveAndEnlistMsgStoreTransaction ( t ) ; this . containerItemStream . addItem ( item , msTran ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "writeStartedFlush" , item ) ; return item ; } // this should not occur // log error and throw exception SIErrorException e = new SIErrorException ( nls . getFormattedMessage ( "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.AnycastOutputHandler" , "1:2810:1.89.4.1" } , null ) ) ; // FFDC FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.AnycastOutputHandler.writeStartedFlush" , "1:2817:1.89.4.1" , this ) ; SibTr . exception ( tc , e ) ; SibTr . error ( tc , "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.AnycastOutputHandler" , "1:2822:1.89.4.1" } ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "writeStartedFlush" , e ) ; throw e ; }
|
Helper method used by AOStream to persistently record that flush has been started
| 512
| 16
|
159,344
|
public final void writtenStartedFlush ( AOStream stream , Item startedFlushItem ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "writtenStartedFlush" ) ; String key = SIMPUtils . getRemoteGetKey ( stream . getRemoteMEUuid ( ) , stream . getGatheringTargetDestUuid ( ) ) ; StreamInfo sinfo = streamTable . get ( key ) ; if ( ( sinfo != null ) && sinfo . streamId . equals ( stream . streamId ) ) { synchronized ( sinfo ) { sinfo . item = ( AOStartedFlushItem ) startedFlushItem ; } } else { // this should not occur // log error and throw exception SIErrorException e = new SIErrorException ( nls . getFormattedMessage ( "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.AnycastOutputHandler" , "1:2858:1.89.4.1" } , null ) ) ; // FFDC FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.AnycastOutputHandler.writtenStartedFlush" , "1:2865:1.89.4.1" , this ) ; SibTr . exception ( tc , e ) ; SibTr . error ( tc , "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.AnycastOutputHandler" , "1:2872:1.89.4.1" } ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "writtenStartedFlush" , e ) ; throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "writtenStartedFlush" ) ; }
|
Callback when the Item that records that flush has been started has been committed to persistent storage
| 476
| 17
|
159,345
|
public SIMPItemStream getItemStream ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getItemStream" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getItemStream" , containerItemStream ) ; return ( SIMPItemStream ) containerItemStream ; }
|
Return the SIMPItemStream associated with this AOH . This method is needed for proper cleanup of remote durable subscriptions since the usual item stream owned by the DestinationHandler is not used .
| 99
| 37
|
159,346
|
public final String getDestName ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getDestName" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getDestName" , destName ) ; return destName ; }
|
Return the destination name which this AOH is associated with .
| 89
| 12
|
159,347
|
public final SIBUuid12 getDestUUID ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { SibTr . entry ( tc , "getDestUUID" ) ; SibTr . exit ( tc , "getDestUUID" , destName ) ; } return destUuid ; }
|
Return the destination UUID which this AOH is associated with . This method is needed for proper cleanup of remote durable subscriptions since pseudo destinations are used rather than the destination normally associated with the DestinationHandler .
| 78
| 40
|
159,348
|
private void deleteAndUnlockPersistentStream ( StreamInfo sinfo , ArrayList valueTicks ) throws MessageStoreException , SIRollbackException , SIConnectionLostException , SIIncorrectCallException , SIResourceException , SIErrorException { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "deleteAndUnlockPersistentStream" , new Object [ ] { sinfo , valueTicks } ) ; // create a transaction, and delete all AOValue ticks, unlock the persistent locks, // and then remove sinfo.itemStream, and sinfo.item LocalTransaction tran = getLocalTransaction ( ) ; cleanupTicks ( sinfo , tran , valueTicks ) ; Transaction msTran = mp . resolveAndEnlistMsgStoreTransaction ( tran ) ; sinfo . itemStream . lockItemIfAvailable ( controlItemLockID ) ; // will always be available sinfo . itemStream . remove ( msTran , controlItemLockID ) ; if ( sinfo . item != null ) { sinfo . item . lockItemIfAvailable ( controlItemLockID ) ; // will always be available sinfo . item . remove ( msTran , controlItemLockID ) ; } tran . commit ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "deleteAndUnlockPersistentStream" ) ; }
|
remove the persistent ticks and the itemstream and started - flush item
| 324
| 13
|
159,349
|
protected void init ( boolean useDirect , int outSize , int inSize , int cacheSize ) { this . useDirectBuffer = useDirect ; this . outgoingHdrBufferSize = outSize ; this . incomingBufferSize = inSize ; // if cache size has increased, then allocate the larger bytecache // array, but don't change to a smaller array if ( cacheSize > this . byteCacheSize ) { this . byteCacheSize = cacheSize ; this . byteCache = new byte [ cacheSize ] ; } }
|
Initialize this class instance with the chosen parse configuration options .
| 109
| 12
|
159,350
|
public void addParseBuffer ( WsByteBuffer buffer ) { // increment where we're about to put the new buffer in int index = ++ this . parseIndex ; if ( null == this . parseBuffers ) { // first parse buffer to track this . parseBuffers = new WsByteBuffer [ BUFFERS_INITIAL_SIZE ] ; this . parseBuffersStartPos = new int [ BUFFERS_INITIAL_SIZE ] ; for ( int i = 0 ; i < BUFFERS_INITIAL_SIZE ; i ++ ) { this . parseBuffersStartPos [ i ] = HeaderStorage . NOTSET ; } } else if ( index == this . parseBuffers . length ) { // grow the array int size = index + BUFFERS_MIN_GROWTH ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Increasing parse buffer array size to " + size ) ; } WsByteBuffer [ ] tempNew = new WsByteBuffer [ size ] ; System . arraycopy ( this . parseBuffers , 0 , tempNew , 0 , index ) ; this . parseBuffers = tempNew ; int [ ] posNew = new int [ size ] ; System . arraycopy ( this . parseBuffersStartPos , 0 , posNew , 0 , index ) ; for ( int i = index ; i < size ; i ++ ) { posNew [ i ] = HeaderStorage . NOTSET ; } this . parseBuffersStartPos = posNew ; } this . parseBuffers [ index ] = buffer ; }
|
Save a reference to a new buffer with header parse information . This is not part of the created list and will not be released by this message class .
| 349
| 30
|
159,351
|
public void addToCreatedBuffer ( WsByteBuffer buffer ) { // increment where we're about to put the new buffer in int index = ++ this . createdIndex ; if ( null == this . myCreatedBuffers ) { // first allocation this . myCreatedBuffers = new WsByteBuffer [ BUFFERS_INITIAL_SIZE ] ; } else if ( index == this . myCreatedBuffers . length ) { // grow the array int size = index + BUFFERS_MIN_GROWTH ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Increasing created buffer array size to " + size ) ; } WsByteBuffer [ ] tempNew = new WsByteBuffer [ size ] ; System . arraycopy ( this . myCreatedBuffers , 0 , tempNew , 0 , index ) ; this . myCreatedBuffers = tempNew ; } this . myCreatedBuffers [ index ] = buffer ; }
|
Add a buffer on the list that will be manually released later .
| 215
| 13
|
159,352
|
public void clear ( ) { final boolean bTrace = TraceComponent . isAnyTracingEnabled ( ) ; if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "clear" ) ; } clearAllHeaders ( ) ; this . eohPosition = HeaderStorage . NOTSET ; this . currentElem = null ; this . stateOfParsing = PARSING_CRLF ; this . binaryParsingState = GenericConstants . PARSING_HDR_FLAG ; this . parsedToken = null ; this . parsedTokenLength = 0 ; this . bytePosition = 0 ; this . byteLimit = 0 ; this . currentReadBB = null ; clearBuffers ( ) ; this . debugContext = this ; this . numCRLFs = 0 ; this . bIsMultiLine = false ; this . lastCRLFBufferIndex = HeaderStorage . NOTSET ; this . lastCRLFPosition = HeaderStorage . NOTSET ; this . lastCRLFisCR = false ; this . headerChangeCount = 0 ; this . headerAddCount = 0 ; this . bOverChangeLimit = false ; this . compactHeaderFlag = false ; this . table = null ; this . isPushPromise = false ; this . processedXForwardedHeader = false ; this . processedForwardedHeader = false ; this . forwardHeaderErrorState = false ; this . forwardedByList = null ; this . forwardedForList = null ; this . forwardedHost = null ; this . forwardedPort = null ; this . forwardedProto = null ; if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "clear" ) ; } }
|
Clear out information on this object so that it can be re - used .
| 364
| 15
|
159,353
|
private void clearBuffers ( ) { // simply null out the parse buffers list, then release all the created buffers final boolean bTrace = TraceComponent . isAnyTracingEnabled ( ) ; for ( int i = 0 ; i <= this . parseIndex ; i ++ ) { if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Removing reference to parse buffer: " + this . parseBuffers [ i ] ) ; } this . parseBuffers [ i ] = null ; this . parseBuffersStartPos [ i ] = HeaderStorage . NOTSET ; } this . parseIndex = HeaderStorage . NOTSET ; for ( int i = 0 ; i <= this . createdIndex ; i ++ ) { if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Releasing marshall buffer: " + this . myCreatedBuffers [ i ] ) ; } this . myCreatedBuffers [ i ] . release ( ) ; this . myCreatedBuffers [ i ] = null ; } this . createdIndex = HeaderStorage . NOTSET ; }
|
Clear the array of buffers used during the parsing or marshalling of headers .
| 239
| 15
|
159,354
|
public void debug ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "*** Begin Header Debug ***" ) ; HeaderElement elem = this . hdrSequence ; while ( null != elem ) { Tr . debug ( tc , elem . getName ( ) + ": " + elem . getDebugValue ( ) ) ; elem = elem . nextSequence ; } Tr . debug ( tc , "*** End Header Debug ***" ) ; } }
|
Print debug information on the headers to the RAS tracing log .
| 119
| 13
|
159,355
|
protected void destroy ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Destroying these headers: " + this ) ; } // if we have headers present, or reference parse buffers (i.e. // the first header parsed threw an error perhaps), then clear // the message now if ( null != this . hdrSequence || HeaderStorage . NOTSET != this . parseIndex ) { clear ( ) ; } this . byteCacheSize = DEFAULT_CACHESIZE ; this . incomingBufferSize = DEFAULT_BUFFERSIZE ; this . outgoingHdrBufferSize = DEFAULT_BUFFERSIZE ; this . useDirectBuffer = true ; this . limitNumHeaders = DEFAULT_LIMIT_NUMHEADERS ; this . limitTokenSize = DEFAULT_LIMIT_TOKENSIZE ; this . headerChangeLimit = HeaderStorage . NOTSET ; }
|
Completely clear out all the information on this object when it is no longer used .
| 206
| 17
|
159,356
|
protected void writeByteArray ( ObjectOutput output , byte [ ] data ) throws IOException { if ( null == data || 0 == data . length ) { output . writeInt ( - 1 ) ; } else { output . writeInt ( data . length ) ; output . write ( data ) ; } }
|
Write information for the input data to the output stream . If the input data is null or empty this will write a - 1 length marker .
| 63
| 28
|
159,357
|
private void scribbleWhiteSpace ( WsByteBuffer buffer , int start , int stop ) { if ( buffer . hasArray ( ) ) { // buffer has a backing array so directly update that final byte [ ] data = buffer . array ( ) ; final int offset = buffer . arrayOffset ( ) ; int myStart = start + offset ; int myStop = stop + offset ; for ( int i = myStart ; i < myStop ; i ++ ) { data [ i ] = BNFHeaders . SPACE ; } } else { // overlay whitespace into the buffer byte [ ] localWhitespace = whitespace ; if ( null == localWhitespace ) { localWhitespace = getWhiteSpace ( ) ; } buffer . position ( start ) ; int len = stop - start ; while ( len > 0 ) { if ( localWhitespace . length >= len ) { buffer . put ( localWhitespace , 0 , len ) ; break ; // out of while } int partial = localWhitespace . length ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Scribbling " + partial + " bytes of whitespace" ) ; } buffer . put ( localWhitespace , 0 , partial ) ; len -= partial ; } } }
|
Overlay whitespace into the input buffer using the provided starting and stopping positions .
| 282
| 16
|
159,358
|
private void eraseValue ( HeaderElement elem ) { // wipe out the removed value if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Erasing existing header: " + elem . getName ( ) ) ; } int next_index = this . lastCRLFBufferIndex ; int next_pos = this . lastCRLFPosition ; if ( null != elem . nextSequence && ! elem . nextSequence . wasAdded ( ) ) { next_index = elem . nextSequence . getLastCRLFBufferIndex ( ) ; next_pos = elem . nextSequence . getLastCRLFPosition ( ) ; } int start = elem . getLastCRLFPosition ( ) ; // if it's only in one buffer, this for loop does nothing for ( int x = elem . getLastCRLFBufferIndex ( ) ; x < next_index ; x ++ ) { // wiping out this buffer from start to limit this . parseBuffers [ x ] . position ( start ) ; this . parseBuffers [ x ] . limit ( start ) ; start = 0 ; } // last buffer, scribble from start until next_pos scribbleWhiteSpace ( this . parseBuffers [ next_index ] , start , next_pos ) ; }
|
Method to completely erase the input header from the parse buffers .
| 292
| 12
|
159,359
|
private int overlayBytes ( byte [ ] data , int inOffset , int inLength , int inIndex ) { int length = inLength ; int offset = inOffset ; int index = inIndex ; WsByteBuffer buffer = this . parseBuffers [ index ] ; if ( - 1 == length ) { length = data . length ; } while ( index <= this . parseIndex ) { int remaining = buffer . remaining ( ) ; if ( remaining >= length ) { // it all fits now buffer . put ( data , offset , length ) ; return index ; } // put what we can, loop through the next buffer buffer . put ( data , offset , remaining ) ; offset += remaining ; length -= remaining ; buffer = this . parseBuffers [ ++ index ] ; buffer . position ( 0 ) ; } return index ; }
|
Utility method to overlay the input bytes into the parse buffers starting at the input index and moving forward as needed .
| 171
| 23
|
159,360
|
private void overlayValue ( HeaderElement elem ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Overlaying existing header: " + elem . getName ( ) ) ; } int next_index = this . lastCRLFBufferIndex ; int next_pos = this . lastCRLFPosition ; if ( null != elem . nextSequence && ! elem . nextSequence . wasAdded ( ) ) { next_index = elem . nextSequence . getLastCRLFBufferIndex ( ) ; next_pos = elem . nextSequence . getLastCRLFPosition ( ) ; } WsByteBuffer buffer = this . parseBuffers [ elem . getLastCRLFBufferIndex ( ) ] ; buffer . position ( elem . getLastCRLFPosition ( ) + ( elem . isLastCRLFaCR ( ) ? 2 : 1 ) ) ; if ( next_index == elem . getLastCRLFBufferIndex ( ) ) { // all in one buffer buffer . put ( elem . getKey ( ) . getMarshalledByteArray ( foundCompactHeader ( ) ) ) ; buffer . put ( elem . asRawBytes ( ) , elem . getOffset ( ) , elem . getValueLength ( ) ) ; } else { // header straddles buffers int index = elem . getLastCRLFBufferIndex ( ) ; index = overlayBytes ( elem . getKey ( ) . getMarshalledByteArray ( foundCompactHeader ( ) ) , 0 , - 1 , index ) ; index = overlayBytes ( elem . asRawBytes ( ) , elem . getOffset ( ) , elem . getValueLength ( ) , index ) ; buffer = this . parseBuffers [ index ] ; } // pad trailing whitespace if we need it int start = buffer . position ( ) ; if ( start < next_pos ) { scribbleWhiteSpace ( buffer , start , next_pos ) ; } }
|
Method to overlay the new header value onto the older value in the parse buffers .
| 445
| 16
|
159,361
|
private WsByteBuffer [ ] marshallAddedHeaders ( WsByteBuffer [ ] inBuffers , int index ) { WsByteBuffer [ ] buffers = inBuffers ; buffers [ index ] = allocateBuffer ( this . outgoingHdrBufferSize ) ; for ( HeaderElement elem = this . hdrSequence ; null != elem ; elem = elem . nextSequence ) { if ( elem . wasAdded ( ) ) { buffers = marshallHeader ( buffers , elem ) ; } } // add second EOL buffers = putBytes ( BNFHeaders . EOL , buffers ) ; buffers = flushCache ( buffers ) ; // flip the last buffer now that we're done buffers [ buffers . length - 1 ] . flip ( ) ; return buffers ; }
|
Marshall the newly added headers from the sequence list to the output buffers starting at the input index on the list .
| 167
| 23
|
159,362
|
private void clearAllHeaders ( ) { final boolean bTrace = TraceComponent . isAnyTracingEnabled ( ) ; if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "clearAllHeaders()" ) ; } HeaderElement elem = this . hdrSequence ; while ( null != elem ) { final HeaderElement next = elem . nextSequence ; final HeaderKeys key = elem . getKey ( ) ; final int ord = key . getOrdinal ( ) ; if ( storage . containsKey ( ord ) ) { // first instance being removed if ( key . useFilters ( ) ) { filterRemove ( key , null ) ; } storage . remove ( ord ) ; } elem . destroy ( ) ; elem = next ; } this . hdrSequence = null ; this . lastHdrInSequence = null ; this . numberOfHeaders = 0 ; if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "clearAllHeaders()" ) ; } }
|
Clear all traces of the headers from storage .
| 232
| 9
|
159,363
|
public void removeSpecialHeader ( HeaderKeys key ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "removeSpecialHeader(h): " + key . getName ( ) ) ; } removeHdrInstances ( findHeader ( key ) , FILTER_NO ) ; }
|
Remove all instances of a special header that does not require the headerkey filterRemove method to be called .
| 76
| 21
|
159,364
|
public WsByteBuffer returnCurrentBuffer ( ) { WsByteBuffer buff = null ; if ( HeaderStorage . NOTSET != this . parseIndex ) { buff = this . parseBuffers [ this . parseIndex ] ; this . parseIndex -- ; } return buff ; }
|
Method to remove the current parsing buffer from this object s ownership so it can be used by others .
| 58
| 20
|
159,365
|
private void createSingleHeader ( HeaderKeys key , byte [ ] value , int offset , int length ) { HeaderElement elem = findHeader ( key ) ; if ( null != elem ) { // delete all secondary instances first if ( null != elem . nextInstance ) { HeaderElement temp = elem . nextInstance ; while ( null != temp ) { temp . remove ( ) ; temp = temp . nextInstance ; } } if ( HeaderStorage . NOTSET != this . headerChangeLimit ) { // parse buffer reuse is enabled, see if we can use existing obj if ( length <= elem . getValueLength ( ) ) { this . headerChangeCount ++ ; elem . setByteArrayValue ( value , offset , length ) ; } else { elem . remove ( ) ; elem = null ; } } else { // parse buffer reuse is disabled elem . setByteArrayValue ( value , offset , length ) ; } } if ( null == elem ) { // either it didn't exist or we chose not to re-use the object elem = getElement ( key ) ; elem . setByteArrayValue ( value , offset , length ) ; addHeader ( elem , FILTER_NO ) ; } else if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Replacing header " + key . getName ( ) + " [" + elem . getDebugValue ( ) + "]" ) ; } }
|
Utility method to create a single header instance with the given information . If elements already exist this will delete secondary ones and overlay the value on the first element .
| 318
| 32
|
159,366
|
private void addHeader ( HeaderElement elem , boolean bFilter ) { final HeaderKeys key = elem . getKey ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( tc , "Adding header [" + key . getName ( ) + "] with value [" + elem . getDebugValue ( ) + "]" ) ; } if ( getRemoteIp ( ) && key . getName ( ) . toLowerCase ( ) . startsWith ( "x-forwarded" ) && ! forwardHeaderErrorState ) { processForwardedHeader ( elem , true ) ; } else if ( getRemoteIp ( ) && key . getName ( ) . toLowerCase ( ) . startsWith ( "forwarded" ) && ! forwardHeaderErrorState ) { processForwardedHeader ( elem , false ) ; } if ( bFilter ) { if ( key . useFilters ( ) && ! filterAdd ( key , elem . asBytes ( ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "filter disallowed: " + elem . getDebugValue ( ) ) ; } return ; } } if ( HttpHeaderKeys . isWasPrivateHeader ( key . getName ( ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "checking to see if private header is allowed: " + key . getName ( ) ) ; } if ( ! filterAdd ( key , elem . asBytes ( ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , key . getName ( ) + " is not trusted for this host; not adding header" ) ; } return ; } } incrementHeaderCounter ( ) ; HeaderElement root = findHeader ( key ) ; boolean rc = addInstanceOfElement ( root , elem ) ; // did we change the root node? if ( rc ) { final int ord = key . getOrdinal ( ) ; storage . put ( ord , elem ) ; } }
|
Add this new instance of a header to storage .
| 482
| 10
|
159,367
|
private HeaderElement findHeader ( HeaderKeys key , int instance ) { final int ord = key . getOrdinal ( ) ; if ( ! storage . containsKey ( ord ) && ord <= HttpHeaderKeys . ORD_MAX ) { return null ; } HeaderElement elem = null ; //If the ordinal created for this key is larger than 1024, the header key //storage has been capped. As such, search the internal header storage //to see if we have a header with this name already added. if ( ord > HttpHeaderKeys . ORD_MAX ) { for ( HeaderElement header : storage . values ( ) ) { if ( header . getKey ( ) . getName ( ) . equals ( key . getName ( ) ) ) { elem = header ; break ; } } } else { elem = storage . get ( ord ) ; } int i = - 1 ; while ( null != elem ) { if ( ! elem . wasRemoved ( ) ) { if ( ++ i == instance ) { return elem ; } } elem = elem . nextInstance ; } return null ; }
|
Find the specific instance of this header in storage .
| 236
| 10
|
159,368
|
private void removeHdr ( HeaderElement elem ) { if ( null == elem ) { return ; } HeaderKeys key = elem . getKey ( ) ; elem . remove ( ) ; if ( key . useFilters ( ) ) { filterRemove ( key , elem . asBytes ( ) ) ; } }
|
Remove this single instance of a header .
| 69
| 8
|
159,369
|
private void removeHdrInstances ( HeaderElement root , boolean bFilter ) { if ( null == root ) { return ; } HeaderKeys key = root . getKey ( ) ; if ( bFilter && key . useFilters ( ) ) { filterRemove ( key , null ) ; } HeaderElement elem = root ; while ( null != elem ) { elem . remove ( ) ; elem = elem . nextInstance ; } }
|
Remove all instances of this header .
| 94
| 7
|
159,370
|
protected void setSpecialHeader ( HeaderKeys key , byte [ ] value ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "setSpecialHeader(h,b[]): " + key . getName ( ) ) ; } removeHdrInstances ( findHeader ( key ) , FILTER_NO ) ; HeaderElement elem = getElement ( key ) ; elem . setByteArrayValue ( value ) ; addHeader ( elem , FILTER_NO ) ; }
|
Set one of the special headers that does not require the headerkey filterX methods to be called .
| 118
| 20
|
159,371
|
public void setHeaderChangeLimit ( int limit ) { this . headerChangeLimit = limit ; this . bOverChangeLimit = false ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Setting header change limit to " + limit ) ; } }
|
Set the limit on the number of allowed header changes before this message must be remarshalled .
| 69
| 19
|
159,372
|
public WsByteBuffer allocateBuffer ( int size ) { WsByteBufferPoolManager mgr = HttpDispatcher . getBufferManager ( ) ; WsByteBuffer wsbb = ( this . useDirectBuffer ) ? mgr . allocateDirect ( size ) : mgr . allocate ( size ) ; addToCreatedBuffer ( wsbb ) ; return wsbb ; }
|
Allocate a buffer according to the requested input size .
| 82
| 11
|
159,373
|
@ Override public void setDebugContext ( Object o ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "debugContext set to " + o + " for " + this ) ; } if ( null != o ) { this . debugContext = o ; } }
|
Allow the debug context object to be set to the input Object for more specialized debugging . A null input object will be ignored .
| 74
| 25
|
159,374
|
private void incrementHeaderCounter ( ) { this . numberOfHeaders ++ ; this . headerAddCount ++ ; if ( this . limitNumHeaders < this . numberOfHeaders ) { String msg = "Too many headers in storage: " + this . numberOfHeaders ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , msg ) ; } throw new IllegalArgumentException ( msg ) ; } }
|
Increment the number of headers in storage counter by one . If this puts it over the limit for the message then an exception is thrown .
| 103
| 28
|
159,375
|
private void checkHeaderValue ( byte [ ] data , int offset , int length ) { // if the last character is a CR or LF, then this fails int index = ( offset + length ) - 1 ; if ( index < 0 ) { // empty data, quit now with success return ; } String error = null ; if ( BNFHeaders . LF == data [ index ] || BNFHeaders . CR == data [ index ] ) { error = "Illegal trailing EOL" ; } // scan through the data now for invalid CRLF presence. Note that CRLFs // may be followed by whitespace for valid multiline headers. for ( int i = offset ; null == error && i < index ; i ++ ) { if ( BNFHeaders . CR == data [ i ] ) { // next char must be an LF if ( BNFHeaders . LF != data [ i + 1 ] ) { error = "Invalid CR not followed by LF" ; } else if ( getCharacterValidation ( ) ) { data [ i ] = BNFHeaders . SPACE ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Found a CR replacing it with a SP" ) ; } } } else if ( BNFHeaders . LF == data [ i ] ) { // if it is not followed by whitespace then this value is bad if ( BNFHeaders . TAB != data [ i + 1 ] && BNFHeaders . SPACE != data [ i + 1 ] ) { error = "Invalid LF not followed by whitespace" ; } else if ( getCharacterValidation ( ) ) { data [ i ] = BNFHeaders . SPACE ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Found a LF replacing it with a SP" ) ; } } } } // if we found an error, throw the exception now if ( null != error ) { IllegalArgumentException iae = new IllegalArgumentException ( error ) ; FFDCFilter . processException ( iae , getClass ( ) . getName ( ) + ".checkHeaderValue(byte[])" , "1" , this ) ; throw iae ; } }
|
Check the input header value for validity starting at the offset and continuing for the input length of characters .
| 491
| 20
|
159,376
|
private void checkHeaderValue ( String data ) { // if the last character is a CR or LF, then this fails int index = data . length ( ) - 1 ; if ( index < 0 ) { // empty string, quit now with success return ; } String error = null ; char c = data . charAt ( index ) ; if ( BNFHeaders . LF == c || BNFHeaders . CR == c ) { error = "Illegal trailing EOL" ; } // scan through the data now for invalid CRLF presence. Note that CRLFs // may be followed by whitespace for valid multiline headers. for ( int i = 0 ; null == error && i < index ; i ++ ) { c = data . charAt ( i ) ; if ( BNFHeaders . CR == c ) { // next char must be an LF if ( BNFHeaders . LF != data . charAt ( i + 1 ) ) { error = "Invalid CR not followed by LF" ; } } else if ( BNFHeaders . LF == c ) { c = data . charAt ( ++ i ) ; // if it is not followed by whitespace then this value is bad if ( BNFHeaders . TAB != c && BNFHeaders . SPACE != c ) { error = "Invalid LF not followed by whitespace" ; } } } // if we found an error, throw the exception now if ( null != error ) { IllegalArgumentException iae = new IllegalArgumentException ( error ) ; FFDCFilter . processException ( iae , getClass ( ) . getName ( ) + ".checkHeaderValue(String)" , "1" , this ) ; throw iae ; } }
|
Check the input header value for validity .
| 364
| 8
|
159,377
|
private int countInstances ( HeaderElement root ) { int count = 0 ; HeaderElement elem = root ; while ( null != elem ) { if ( ! elem . wasRemoved ( ) ) { count ++ ; } elem = elem . nextInstance ; } return count ; }
|
Count the number of instances of this header starting at the given element .
| 61
| 14
|
159,378
|
private boolean skipWhiteSpace ( WsByteBuffer buff ) { // keep reading until we hit the end of the buffer or a non-space char byte b ; do { if ( this . bytePosition >= this . byteLimit ) { if ( ! fillByteCache ( buff ) ) { // not filled return false ; } } b = this . byteCache [ this . bytePosition ++ ] ; } while ( BNFHeaders . SPACE == b || BNFHeaders . TAB == b ) ; // move byte position back one. this . bytePosition -- ; return true ; }
|
Skip any whitespace that might be at the start of this buffer .
| 121
| 14
|
159,379
|
private boolean addInstanceOfElement ( HeaderElement root , HeaderElement elem ) { // first add to the overall sequence list if ( null == this . hdrSequence ) { this . hdrSequence = elem ; this . lastHdrInSequence = elem ; } else { // find the end of the list and append this new element this . lastHdrInSequence . nextSequence = elem ; elem . prevSequence = this . lastHdrInSequence ; this . lastHdrInSequence = elem ; } if ( null == root ) { return true ; } HeaderElement prev = root ; while ( null != prev . nextInstance ) { prev = prev . nextInstance ; } prev . nextInstance = elem ; return false ; }
|
Helper method to add a new instance of a HeaderElement to root s internal list . This might be the first instance or an additional instance in which case it will be added at the end of the list .
| 166
| 41
|
159,380
|
protected WsByteBuffer [ ] putInt ( int data , WsByteBuffer [ ] buffers ) { return putBytes ( GenericUtils . asBytes ( data ) , buffers ) ; }
|
Place the input int value into the outgoing cache . This will return the buffer array as it may have changed if the cache need to be flushed .
| 40
| 29
|
159,381
|
protected WsByteBuffer [ ] flushCache ( WsByteBuffer [ ] buffers ) { // PK13351 - use the offset/length version to write only what we need // to and avoid the extra memory allocation int pos = this . bytePosition ; if ( 0 == pos ) { // nothing to write return buffers ; } this . bytePosition = 0 ; return GenericUtils . putByteArray ( buffers , this . byteCache , 0 , pos , this ) ; }
|
Method to flush whatever is in the cache into the input buffers . These buffers are then returned to the caller as the flush may have needed to expand the list .
| 98
| 32
|
159,382
|
final protected void decrementBytePositionIgnoringLFs ( ) { // PK15898 - added for just LF after first line this . bytePosition -- ; if ( BNFHeaders . LF == this . byteCache [ this . bytePosition ] ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "decrementILF found an LF character" ) ; } this . bytePosition ++ ; } }
|
Decrement the byte position unless it points to an LF character in which case just leave the byte position alone .
| 103
| 22
|
159,383
|
final protected void resetCacheToken ( int len ) { if ( null == this . parsedToken || len != this . parsedToken . length ) { this . parsedToken = new byte [ len ] ; } this . parsedTokenLength = 0 ; }
|
Reset the parse byte token based on the input length . If the existing array is the same size then this is a simple reset . This is intended to only be used when the contents have already been extracted and can be overwritten with new data .
| 51
| 50
|
159,384
|
final protected boolean fillCacheToken ( WsByteBuffer buff ) { // figure out how much we have left to copy out, append to any existing // parsed token (multiple passes through here). int curr_len = this . parsedTokenLength ; int need_len = this . parsedToken . length - curr_len ; int copy_len = need_len ; // keep going until we have all we need or we run out of buffer data while ( 0 < need_len ) { if ( this . bytePosition >= this . byteLimit ) { if ( ! fillByteCache ( buff ) ) { // save a reference to how much we've pulled so far this . parsedTokenLength = curr_len ; return false ; } } // byte cache is now prepped int available = this . byteLimit - this . bytePosition ; if ( available < need_len ) { // copy what we can from the current cache copy_len = available ; } else { copy_len = need_len ; } // copy new data into the existing space System . arraycopy ( this . byteCache , this . bytePosition , this . parsedToken , curr_len , copy_len ) ; need_len -= copy_len ; curr_len += copy_len ; this . bytePosition += copy_len ; } return true ; }
|
Method to fill the parse token from the given input buffer . The token array must have been created prior to this attempt to fill it .
| 279
| 27
|
159,385
|
protected boolean fillByteCache ( WsByteBuffer buff ) { if ( this . bytePosition < this . byteLimit ) { return false ; } int size = buff . remaining ( ) ; if ( size > this . byteCacheSize ) { // truncate to just fill up the cache size = this . byteCacheSize ; } this . bytePosition = 0 ; this . byteLimit = size ; if ( 0 == this . byteLimit ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "fillByteCache: no data" ) ; } return false ; } if ( HeaderStorage . NOTSET != this . headerChangeLimit && - 1 != this . parseIndex && - 1 == this . parseBuffersStartPos [ this . parseIndex ] ) { // first occurrance of this buffer and we're keeping track of changes this . parseBuffersStartPos [ this . parseIndex ] = buff . position ( ) ; } buff . get ( this . byteCache , this . bytePosition , this . byteLimit ) ; return true ; }
|
Fills the byte cache .
| 234
| 6
|
159,386
|
protected TokenCodes findCRLFTokenLength ( WsByteBuffer buff ) throws MalformedMessageException { TokenCodes rc = TokenCodes . TOKEN_RC_MOREDATA ; if ( null == buff ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Null buffer provided" ) ; } return rc ; } // start with any pre-existing data int length = this . parsedTokenLength ; byte b ; while ( true ) { if ( this . bytePosition >= this . byteLimit ) { if ( ! fillByteCache ( buff ) ) { // no more data break ; } } b = this . byteCache [ this . bytePosition ++ ] ; // check for a CRLF if ( BNFHeaders . CR == b ) { rc = TokenCodes . TOKEN_RC_DELIM ; if ( HeaderStorage . NOTSET != this . headerChangeLimit ) { this . lastCRLFPosition = findCurrentBufferPosition ( buff ) - 1 ; this . lastCRLFBufferIndex = this . parseIndex ; this . lastCRLFisCR = true ; } break ; // out of while } else if ( BNFHeaders . LF == b ) { // update counter if linefeed found rc = TokenCodes . TOKEN_RC_DELIM ; this . numCRLFs = 1 ; if ( HeaderStorage . NOTSET != this . headerChangeLimit ) { this . lastCRLFPosition = findCurrentBufferPosition ( buff ) - 1 ; this . lastCRLFBufferIndex = this . parseIndex ; this . lastCRLFisCR = false ; } break ; // out of while } length ++ ; // check the limit on a token size if ( length > this . limitTokenSize ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "findCRLFTokenLength: length is too big: " + length ) ; } throw new MalformedMessageException ( "Token length: " + length ) ; } } // end of the while this . parsedTokenLength = length ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "findCRLFTokenLength returning " + rc . getName ( ) + "; len=" + length ) ; } return rc ; }
|
Parse a CRLF delimited token and return the length of the token .
| 527
| 17
|
159,387
|
protected TokenCodes skipCRLFs ( WsByteBuffer buffer ) { int maxCRLFs = 33 ; // limit is the max number of CRLFs to skip if ( this . bytePosition >= this . byteLimit ) { if ( ! fillByteCache ( buffer ) ) { // no more data return TokenCodes . TOKEN_RC_MOREDATA ; } } byte b = this . byteCache [ this . bytePosition ++ ] ; for ( int i = 0 ; i < maxCRLFs ; i ++ ) { if ( - 1 == b ) { // ran out of data return TokenCodes . TOKEN_RC_MOREDATA ; } if ( BNFHeaders . CR != b && BNFHeaders . LF != b ) { // stopped on non-CRLF character, reset position this . bytePosition -- ; return TokenCodes . TOKEN_RC_DELIM ; } // keep going otherwise if ( this . bytePosition >= this . byteLimit ) { return TokenCodes . TOKEN_RC_MOREDATA ; } b = this . byteCache [ this . bytePosition ++ ] ; } // found too many CRLFs... invalid if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Too many leading CRLFs found" ) ; } return TokenCodes . TOKEN_RC_CRLF ; }
|
This method is used to skip leading CRLF characters . It will stop when it finds a non - CRLF character runs out of data or finds too many CRLFs
| 306
| 36
|
159,388
|
protected TokenCodes findHeaderLength ( WsByteBuffer buff ) throws MalformedMessageException { TokenCodes rc = TokenCodes . TOKEN_RC_MOREDATA ; if ( null == buff ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "findHeaderLength: null buffer provided" ) ; } return rc ; } byte b ; int numSpaces = 0 ; // start with any pre-existing data int length = this . parsedTokenLength ; while ( true ) { if ( this . bytePosition >= this . byteLimit ) { if ( ! fillByteCache ( buff ) ) { // no more data break ; } } b = this . byteCache [ this . bytePosition ++ ] ; // look for the colon marking the end if ( BNFHeaders . COLON == b ) { length -= numSpaces ; // remove any "trailing" white space if ( numSpaces > 0 ) { //PI13987 //found trailing whitespace this . foundTrailingWhitespace = true ; } rc = TokenCodes . TOKEN_RC_DELIM ; break ; } // if we hit whitespace, then keep track of the number of spaces so // that we can easily trim that off at the end. This will end up // ignoring whitespace that is inside the header name if that does // happen if ( BNFHeaders . SPACE == b || BNFHeaders . TAB == b ) { numSpaces ++ ; } else { // reset the counter on any non-space or colon numSpaces = 0 ; } // check for possible CRLF if ( BNFHeaders . CR == b || BNFHeaders . LF == b ) { // Note: would be nice to print the failing data but would need // to keep track of where we started inside here, then what about // data straddling bytecaches, etc? throw new MalformedMessageException ( "Invalid CRLF found in header name" ) ; } length ++ ; // check the limit on a token size if ( length > this . limitTokenSize ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "findTokenLength: length is too big: " + length ) ; } throw new MalformedMessageException ( "Token length: " + length ) ; } } // end of the while this . parsedTokenLength = length ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "findHeaderLength: " + rc . getName ( ) + "; len=" + length ) ; } return rc ; }
|
Parse a byte delimited token and return the length of the token .
| 585
| 15
|
159,389
|
private boolean parseHeaderName ( WsByteBuffer buff ) throws MalformedMessageException { // if we're just starting, then skip leading white space characters // otherwise ignore them (i.e we might be in the middle of // "Mozilla/5.0 (Win" if ( null == this . parsedToken ) { if ( ! skipWhiteSpace ( buff ) ) { return false ; } } int start = findCurrentBufferPosition ( buff ) ; int cachestart = this . bytePosition ; TokenCodes rc = findHeaderLength ( buff ) ; if ( TokenCodes . TOKEN_RC_MOREDATA . equals ( rc ) ) { // ran out of data saveParsedToken ( buff , start , false , LOG_FULL ) ; return false ; } // could be in one single bytecache, otherwise we have to extract from // buffer byte [ ] data ; int length = this . parsedTokenLength ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "length=" + length + " pos=" + this . bytePosition + ", cachestart=" + cachestart + ", start=" + start + ", trailingWhitespace=" + this . foundTrailingWhitespace ) ; } //PI13987 - Added the first argument to the if statement if ( ! this . foundTrailingWhitespace && null == this . parsedToken && length < this . bytePosition ) { // it's all in the bytecache if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { //PI13987 - Modified the message being printed as we now print the same thing above Tr . debug ( tc , "Using bytecache" ) ; } data = this . byteCache ; start = cachestart ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { //PI13987 Tr . debug ( tc , "Using bytebuffer" ) ; } saveParsedToken ( buff , start , true , LOG_FULL ) ; data = this . parsedToken ; start = 0 ; length = data . length ; } // otherwise we found the entire length of the name this . currentElem = getElement ( findKey ( data , start , length ) ) ; // Reset all the global variables once HeaderElement has been instantiated if ( HeaderStorage . NOTSET != this . headerChangeLimit ) { this . currentElem . updateLastCRLFInfo ( this . lastCRLFBufferIndex , this . lastCRLFPosition , this . lastCRLFisCR ) ; } this . stateOfParsing = PARSING_VALUE ; this . parsedToken = null ; this . parsedTokenLength = 0 ; this . foundTrailingWhitespace = false ; //PI13987 return true ; }
|
Utility method to parse the header name from the input buffer .
| 614
| 13
|
159,390
|
private boolean parseHeaderValueExtract ( WsByteBuffer buff ) throws MalformedMessageException { // 295178 - don't log sensitive information // log value contents based on the header key (if known) int log = LOG_FULL ; HeaderKeys key = this . currentElem . getKey ( ) ; if ( null != key && ! key . shouldLogValue ( ) ) { // this header key wants to block the entire thing log = LOG_NONE ; } TokenCodes tcRC = parseCRLFTokenExtract ( buff , log ) ; if ( ! tcRC . equals ( TokenCodes . TOKEN_RC_MOREDATA ) ) { setHeaderValue ( ) ; this . parsedToken = null ; this . currentElem = null ; this . stateOfParsing = PARSING_CRLF ; return true ; } // otherwise we need more data in order to read the value return false ; }
|
Utility method for parsing a header value out of the input buffer .
| 198
| 14
|
159,391
|
protected int parseTokenNonExtract ( WsByteBuffer buff , byte bDelimiter , boolean bApproveCRLF ) throws MalformedMessageException { TokenCodes rc = findTokenLength ( buff , bDelimiter , bApproveCRLF ) ; return ( TokenCodes . TOKEN_RC_MOREDATA . equals ( rc ) ) ? - 1 : this . parsedTokenLength ; }
|
Standard parsing of a token ; however instead of saving the data into the global parsedToken variable this merely returns the length of the token . Used for occasions where we just need to find the length of the token .
| 91
| 42
|
159,392
|
private void saveParsedToken ( WsByteBuffer buff , int start , boolean delim , int log ) { final boolean bTrace = TraceComponent . isAnyTracingEnabled ( ) ; // local copy of the length int length = this . parsedTokenLength ; this . parsedTokenLength = 0 ; if ( 0 > length ) { throw new IllegalArgumentException ( "Negative token length: " + length ) ; } if ( bTrace && tc . isDebugEnabled ( ) ) { // 295178 - don't log sensitive information String value = GenericUtils . getEnglishString ( this . parsedToken ) ; if ( null != value ) { if ( LOG_PARTIAL == log ) { value = GenericUtils . nullOutPasswords ( value , LF ) ; } else if ( LOG_NONE == log ) { value = GenericUtils . blockContents ( value ) ; } } Tr . debug ( tc , "Saving token: " + value + " len:" + length + " start:" + start + " pos:" + this . bytePosition + " delim:" + delim ) ; } byte [ ] temp ; int offset ; if ( null != this . parsedToken ) { // concat to the existing value offset = this . parsedToken . length ; temp = new byte [ offset + length ] ; System . arraycopy ( this . parsedToken , 0 , temp , 0 , offset ) ; } else { offset = 0 ; temp = new byte [ length ] ; } //PI13987 - Added the first argument if ( ! this . foundTrailingWhitespace && this . bytePosition > length ) { // pull from the bytecache if ( bTrace && tc . isDebugEnabled ( ) ) { //PI13987 - Print out this new trace message Tr . debug ( tc , "savedParsedToken - using bytecache" ) ; } int cacheStart = this . bytePosition - length ; if ( delim ) { cacheStart -- ; } System . arraycopy ( this . byteCache , cacheStart , temp , offset , length ) ; } else { // must pull from the buffer if ( bTrace && tc . isDebugEnabled ( ) ) { //PI13987 - Print this new trace message Tr . debug ( tc , "savedParsedToken - pulling from buffer" ) ; } int orig = buff . position ( ) ; buff . position ( start ) ; buff . get ( temp , offset , length ) ; buff . position ( orig ) ; } this . parsedToken = temp ; if ( bTrace && tc . isDebugEnabled ( ) ) { // 295178 - don't log sensitive information String value = GenericUtils . getEnglishString ( this . parsedToken ) ; if ( LOG_PARTIAL == log ) { value = GenericUtils . nullOutPasswords ( value , LF ) ; } else if ( LOG_NONE == log ) { value = GenericUtils . blockContents ( value ) ; } Tr . debug ( tc , "Saved token [" + value + "]" ) ; } }
|
Sets the temporary parse token from the input buffer .
| 645
| 11
|
159,393
|
public void parsedCompactHeader ( boolean flag ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "parsedCompactHeader: " + flag ) ; } this . compactHeaderFlag = flag ; }
|
Sets the flag indicating that a SIP compact header has been parsed .
| 62
| 15
|
159,394
|
void finishAlarmThread ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "finishAlarmThread" ) ; //wake up the thread so that it will exit it's main loop and end synchronized ( wakeupLock ) { //flag this alarm thread as finished finished = true ; wakeupLock . notify ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "finishAlarmThread" ) ; }
|
Terminate this alarm thread . This is final the thread should not be restarted .
| 128
| 17
|
159,395
|
public void run ( ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "run" ) ; try { //loop until finished while ( ! finished ) { //what time is it now long now = System . currentTimeMillis ( ) ; boolean fire = false ; //synchronize on the wake up lock synchronized ( wakeupLock ) { //if not suspended and we've reached or passed the target wakeup time if ( running ) { fire = ( now >= nextWakeup ) ; } } if ( fire ) { //call the internal alarm method which should return the //time for the next wakeup ... or SUSPEND if the thread should be suspended manager . fireInternalAlarm ( ) ; synchronized ( wakeupLock ) { setNextWakeup ( manager . getNextWakeup ( ) ) ; } } synchronized ( wakeupLock ) { //if we are still not suspended (another thread could have got in before // the re-lock and changed things) if ( running ) { //if there is still time until the next wakeup if ( wakeupDelta > 0 ) { try { if ( ! finished ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Thread wait : " + wakeupDelta ) ; //wait until the next target wakeup time long start = System . currentTimeMillis ( ) ; wakeupLock . wait ( wakeupDelta + 10 ) ; long end = System . currentTimeMillis ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Thread slept for " + ( end - start ) ) ; if ( end < nextWakeup ) setNextWakeup ( nextWakeup ) ; } } catch ( InterruptedException e ) { // No FFDC code needed // swallow InterruptedException ... we'll just loop round and try again } } } else { try { if ( ! finished ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Thread wait : Inifinite" ) ; wakeupLock . wait ( ) ; } } catch ( InterruptedException e ) { // No FFDC code needed // swallow InterruptedException ... we'll just loop round and try again } } } // synchronized } } catch ( RuntimeException e ) { // FFDC FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.utils.am.MPAlarmThread.run" , "1:284:1.8.1.7" , this ) ; SibTr . error ( tc , nls . getFormattedMessage ( "INTERNAL_MESSAGING_ERROR_CWSIP0002" , new Object [ ] { "com.ibm.ws.sib.processor.utils.am.MPAlarmThread" , "1:290:1.8.1.7" , e } , null ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { SibTr . exception ( tc , e ) ; SibTr . exit ( tc , "run" , e ) ; } throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "run" ) ; }
|
The main loop for the MPAlarmThread . Loops until the alarm thread is marked as finished . If the alarm thread is suspended it will wait forever . Otherwise the it will wait inside the loop until a specified time and then call the MPAlarmManager . fireInternalAlarm method .
| 766
| 59
|
159,396
|
public boolean startInactivityTimer ( ) { final boolean traceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "startInactivityTimer" ) ; if ( _inactivityTimeout > 0 && _status . getState ( ) == TransactionState . STATE_ACTIVE && ! _inactivityTimerActive ) { EmbeddableTimeoutManager . setTimeout ( this , EmbeddableTimeoutManager . INACTIVITY_TIMEOUT , _inactivityTimeout ) ; _inactivityTimerActive = true ; _mostRecentThread . pop ( ) ; } if ( traceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "startInactivityTimer" , _inactivityTimerActive ) ; return _inactivityTimerActive ; }
|
Start an inactivity timer and call alarm method of parameter when timeout expires .
| 175
| 15
|
159,397
|
public void rollbackResources ( ) { final boolean traceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "rollbackResources" ) ; try { final Transaction t = ( ( EmbeddableTranManagerSet ) TransactionManagerFactory . getTransactionManager ( ) ) . suspend ( ) ; getResources ( ) . rollbackResources ( ) ; if ( t != null ) ( ( EmbeddableTranManagerSet ) TransactionManagerFactory . getTransactionManager ( ) ) . resume ( t ) ; } catch ( Exception ex ) { FFDCFilter . processException ( ex , "com.ibm.tx.jta.impl.EmbeddableTransactionImpl.rollbackResources" , "104" , this ) ; if ( traceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Exception caught from rollbackResources()" , ex ) ; } if ( traceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "rollbackResources" ) ; }
|
Rollback all resources but do not drive state changes . Used when transaction HAS TIMED OUT .
| 231
| 19
|
159,398
|
public synchronized void stopInactivityTimer ( ) { final boolean traceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "stopInactivityTimer" ) ; if ( _inactivityTimerActive ) { _inactivityTimerActive = false ; EmbeddableTimeoutManager . setTimeout ( this , EmbeddableTimeoutManager . INACTIVITY_TIMEOUT , 0 ) ; } // The inactivity timer's being stopped so the transaction is // back on-server. Push the thread that it's running on onto // the stack. _mostRecentThread . push ( Thread . currentThread ( ) ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "stopInactivityTimer" ) ; }
|
Stop inactivity timer associated with transaction . This method needs to be synchronized to serialize with inactivity timeout . If the timeout runs after this method then there will be no _inactivityTimer to call and the context will be on_server . If the timeout runs before then a subsequent resume will fail as the transaction will be rolled back .
| 173
| 68
|
159,399
|
@ Override public void resumeAssociation ( ) { final boolean traceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "resumeAssociation" ) ; resumeAssociation ( true ) ; if ( traceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "resumeAssociation" ) ; }
|
Called by interceptor when incoming reply arrives . This polices the single threaded operation of the transaction .
| 89
| 21
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.