idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
15,900 | public static void setConfigException ( Throwable e ) { ClassLoader loader = Thread . currentThread ( ) . getContextClassLoader ( ) ; for ( ; loader != null ; loader = loader . getParent ( ) ) { if ( loader instanceof EnvironmentClassLoader ) { EnvironmentClassLoader envLoader = ( EnvironmentClassLoader ) loader ; envLoader . setConfigException ( e ) ; return ; } } } | Sets a configuration exception . |
15,901 | public static Throwable getConfigException ( ) { ClassLoader loader = Thread . currentThread ( ) . getContextClassLoader ( ) ; for ( ; loader != null ; loader = loader . getParent ( ) ) { if ( loader instanceof EnvironmentClassLoader ) { EnvironmentClassLoader envLoader = ( EnvironmentClassLoader ) loader ; if ( envLoader . getConfigException ( ) != null ) return envLoader . getConfigException ( ) ; } } return null ; } | Returns any configuration exception . |
15,902 | public static String getLocalClassPath ( ClassLoader loader ) { for ( ; loader != null ; loader = loader . getParent ( ) ) { if ( loader instanceof EnvironmentClassLoader ) { return ( ( EnvironmentClassLoader ) loader ) . getLocalClassPath ( ) ; } } return DynamicClassLoader . getSystemClassPath ( ) ; } | Returns the classpath for the environment level . |
15,903 | public static void closeGlobal ( ) { ArrayList < EnvLoaderListener > listeners ; listeners = new ArrayList < > ( ) ; listeners . addAll ( _globalLoaderListeners ) ; _globalLoaderListeners . clear ( ) ; for ( int i = 0 ; i < listeners . size ( ) ; i ++ ) { EnvLoaderListener listener = listeners . get ( i ) ; listener . classLoaderDestroy ( null ) ; } } | destroys the current environment . |
15,904 | public static synchronized void initializeEnvironment ( ) { if ( _isStaticInit ) return ; _isStaticInit = true ; ClassLoader systemLoader = ClassLoader . getSystemClassLoader ( ) ; Thread thread = Thread . currentThread ( ) ; ClassLoader oldLoader = thread . getContextClassLoader ( ) ; try { thread . setContextClassLoader ( systemLoader ) ; if ( "1.8." . compareTo ( System . getProperty ( "java.runtime.version" ) ) > 0 ) throw new ConfigException ( "Baratine requires JDK 1.8 or later" ) ; Properties props = System . getProperties ( ) ; ClassLoader envClassLoader = EnvironmentClassLoader . class . getClassLoader ( ) ; } catch ( Throwable e ) { log ( ) . log ( Level . FINE , e . toString ( ) , e ) ; } finally { thread . setContextClassLoader ( oldLoader ) ; _isInitComplete = true ; } } | Initializes the environment |
15,905 | private MonitoringRule buildRelatedRule ( IGuaranteeTerm guarantee ) { MonitoringRule result = new MonitoringRule ( ) ; result . setId ( guarantee . getName ( ) ) ; String violation = extractOutputMetric ( guarantee ) ; Parameter param = new Parameter ( ) ; param . setName ( QosModels . METRIC_PARAM_NAME ) ; param . setValue ( violation ) ; Action action = new Action ( ) ; action . setName ( QosModels . OUTPUT_METRIC_ACTION ) ; action . getParameters ( ) . add ( param ) ; Actions actions = new Actions ( ) ; actions . getActions ( ) . add ( action ) ; result . setActions ( actions ) ; return result ; } | Build a MonitoringRule given a guarantee term |
15,906 | public void debug ( WriteStream out , Path path , byte [ ] tableKey ) throws IOException { SegmentKelpBuilder builder = new SegmentKelpBuilder ( ) ; builder . path ( path ) ; builder . create ( false ) ; builder . services ( ServicesAmp . newManager ( ) . get ( ) ) ; SegmentServiceImpl segmentService = builder . build ( ) ; for ( SegmentExtent extent : segmentService . getSegmentExtents ( ) ) { debugSegment ( out , segmentService , extent , tableKey ) ; } } | Debug with a table key |
15,907 | private void debugSegment ( WriteStream out , SegmentServiceImpl segmentService , SegmentExtent extent , byte [ ] debugTableKey ) throws IOException { int length = extent . length ( ) ; try ( InSegment in = segmentService . openRead ( extent ) ) { ReadStream is = new ReadStream ( in ) ; is . position ( length - BLOCK_SIZE ) ; long seq = BitsUtil . readLong ( is ) ; if ( seq <= 0 ) { return ; } byte [ ] tableKey = new byte [ 32 ] ; is . readAll ( tableKey , 0 , tableKey . length ) ; TableEntry table = segmentService . findTable ( tableKey ) ; if ( table == null ) { return ; } if ( debugTableKey != null && ! Arrays . equals ( debugTableKey , tableKey ) ) { return ; } out . println ( ) ; StringBuilder sb = new StringBuilder ( ) ; Base64Util . encode ( sb , seq ) ; long time = _idGen . time ( seq ) ; out . println ( "Segment: " + extent . getId ( ) + " (seq: " + sb + ", table: " + Hex . toShortHex ( tableKey ) + ", addr: 0x" + Long . toHexString ( extent . address ( ) ) + ", len: 0x" + Integer . toHexString ( length ) + ", time: " + LocalDateTime . ofEpochSecond ( time / 1000 , 0 , ZoneOffset . UTC ) + ")" ) ; debugSegmentEntries ( out , is , extent , table ) ; } } | Trace through a segment displaying its sequence table and extent . |
15,908 | private void debugSegmentEntries ( WriteStream out , ReadStream is , SegmentExtent extent , TableEntry table ) throws IOException { TempBuffer tBuf = TempBuffer . create ( ) ; byte [ ] buffer = tBuf . buffer ( ) ; for ( long ptr = extent . length ( ) - BLOCK_SIZE ; ptr > 0 ; ptr -= BLOCK_SIZE ) { is . position ( ptr ) ; is . readAll ( buffer , 0 , BLOCK_SIZE ) ; long seq = BitsUtil . readLong ( buffer , 0 ) ; int head = 8 ; byte [ ] tableKey = new byte [ 32 ] ; System . arraycopy ( buffer , head , tableKey , 0 , tableKey . length ) ; is . readAll ( tableKey , 0 , tableKey . length ) ; head += tableKey . length ; int offset = BLOCK_SIZE - 8 ; int tail = BitsUtil . readInt16 ( buffer , offset ) ; offset += 2 ; boolean isCont = buffer [ offset ] == 1 ; if ( seq <= 0 || tail <= 0 ) { return ; } while ( ( head = debugSegmentIndex ( out , is , buffer , extent . address ( ) , ptr , head , table ) ) < tail ) { } if ( ! isCont ) { break ; } } } | Trace through the segment entries . |
15,909 | private int debugSegmentIndex ( WriteStream out , ReadStream is , byte [ ] buffer , long segmentAddress , long ptr , int head , TableEntry table ) throws IOException { int sublen = 1 + 4 * 4 ; int typeCode = buffer [ head ] & 0xff ; head ++ ; if ( typeCode <= 0 ) { return 0 ; } Type type = Type . valueOf ( typeCode ) ; int pid = BitsUtil . readInt ( buffer , head ) ; head += 4 ; int nextPid = BitsUtil . readInt ( buffer , head ) ; head += 4 ; int offset = BitsUtil . readInt ( buffer , head ) ; head += 4 ; int length = BitsUtil . readInt ( buffer , head ) ; head += 4 ; switch ( type ) { case LEAF : out . print ( " " + type ) ; debugLeaf ( out , is , segmentAddress , offset , table ) ; break ; case LEAF_DELTA : out . print ( " " + type ) ; break ; case BLOB : case BLOB_FREE : out . print ( " " + type ) ; break ; default : out . print ( " unk(" + type + ")" ) ; break ; } out . println ( " pid:" + pid + " next:" + nextPid + " offset:" + offset + " length:" + length ) ; return head ; } | Debug a single segment index entry . |
15,910 | public void addMergePath ( PathImpl path ) { if ( ! ( path instanceof MergePath ) ) { ArrayList < PathImpl > pathList = ( ( MergePath ) _root ) . _pathList ; if ( ! pathList . contains ( path ) ) pathList . add ( path ) ; } else if ( ( ( MergePath ) path ) . _root == _root ) return ; else { MergePath mergePath = ( MergePath ) path ; ArrayList < PathImpl > subPaths = mergePath . getMergePaths ( ) ; String pathName = "./" + mergePath . _pathname + "/" ; for ( int i = 0 ; i < subPaths . size ( ) ; i ++ ) { PathImpl subPath = subPaths . get ( i ) ; addMergePath ( subPath . lookup ( pathName ) ) ; } } } | Adds a new path to the end of the merge path . |
15,911 | public PathImpl fsWalk ( String userPath , Map < String , Object > attributes , String path ) { ArrayList < PathImpl > pathList = getMergePaths ( ) ; if ( ! userPath . startsWith ( "/" ) || pathList . size ( ) == 0 ) return new MergePath ( ( MergePath ) _root , userPath , attributes , path ) ; String bestPrefix = null ; for ( int i = 0 ; i < pathList . size ( ) ; i ++ ) { PathImpl subPath = pathList . get ( i ) ; String prefix = subPath . getPath ( ) ; if ( path . startsWith ( prefix ) && ( bestPrefix == null || bestPrefix . length ( ) < prefix . length ( ) ) ) { bestPrefix = prefix ; } } if ( bestPrefix != null ) { path = path . substring ( bestPrefix . length ( ) ) ; if ( ! path . startsWith ( "/" ) ) path = "/" + path ; return new MergePath ( ( MergePath ) _root , userPath , attributes , path ) ; } return pathList . get ( 0 ) . lookup ( userPath , attributes ) ; } | Walking down the path just extends the path . It won t be evaluated until opening . |
15,912 | public ArrayList < PathImpl > getResources ( String pathName ) { ArrayList < PathImpl > list = new ArrayList < PathImpl > ( ) ; String pathname = _pathname ; if ( pathname . startsWith ( "/" ) ) pathname = "." + pathname ; ArrayList < PathImpl > pathList = ( ( MergePath ) _root ) . _pathList ; for ( int i = 0 ; i < pathList . size ( ) ; i ++ ) { PathImpl path = pathList . get ( i ) ; path = path . lookup ( pathname ) ; ArrayList < PathImpl > subResources = path . getResources ( pathName ) ; for ( int j = 0 ; j < subResources . size ( ) ; j ++ ) { PathImpl newPath = subResources . get ( j ) ; if ( ! list . contains ( newPath ) ) list . add ( newPath ) ; } } return list ; } | Returns all the resources matching the path . |
15,913 | public String [ ] list ( ) throws IOException { ArrayList < String > list = new ArrayList < String > ( ) ; String pathname = _pathname ; if ( pathname . startsWith ( "/" ) ) pathname = "." + pathname ; ArrayList < PathImpl > pathList = ( ( MergePath ) _root ) . _pathList ; for ( int i = 0 ; i < pathList . size ( ) ; i ++ ) { PathImpl path = pathList . get ( i ) ; path = path . lookup ( pathname ) ; if ( path . isDirectory ( ) ) { String [ ] subList = path . list ( ) ; for ( int j = 0 ; subList != null && j < subList . length ; j ++ ) { if ( ! list . contains ( subList [ j ] ) ) list . add ( subList [ j ] ) ; } } } return ( String [ ] ) list . toArray ( new String [ list . size ( ) ] ) ; } | List the merged directories . |
15,914 | public PersistentDependency createDepend ( ) { ArrayList < PathImpl > pathList = ( ( MergePath ) _root ) . _pathList ; if ( pathList . size ( ) == 1 ) return ( PersistentDependency ) pathList . get ( 0 ) . createDepend ( ) ; DependencyList dependList = new DependencyList ( ) ; for ( int i = 0 ; i < pathList . size ( ) ; i ++ ) { PathImpl path = pathList . get ( i ) ; PathImpl realPath = path . lookup ( _pathname ) ; dependList . add ( ( PersistentDependency ) realPath . createDepend ( ) ) ; } return dependList ; } | Creates a dependency . |
15,915 | public JType getGenericType ( ) { SignatureAttribute sigAttr = ( SignatureAttribute ) getAttribute ( "Signature" ) ; if ( sigAttr != null ) { return getClassLoader ( ) . parseParameterizedType ( sigAttr . getSignature ( ) ) ; } return getType ( ) ; } | Gets the typename . |
15,916 | public JAnnotation [ ] getDeclaredAnnotations ( ) { if ( _annotations == null ) { Attribute attr = getAttribute ( "RuntimeVisibleAnnotations" ) ; if ( attr instanceof OpaqueAttribute ) { byte [ ] buffer = ( ( OpaqueAttribute ) attr ) . getValue ( ) ; try { ByteArrayInputStream is = new ByteArrayInputStream ( buffer ) ; ConstantPool cp = _jClass . getConstantPool ( ) ; _annotations = JavaAnnotation . parseAnnotations ( is , cp , getClassLoader ( ) ) ; } catch ( IOException e ) { log . log ( Level . FINER , e . toString ( ) , e ) ; } } if ( _annotations == null ) { _annotations = new JavaAnnotation [ 0 ] ; } } return _annotations ; } | Returns the declared annotations . |
15,917 | public JavaField export ( JavaClass cl , JavaClass target ) { JavaField field = new JavaField ( ) ; field . setName ( _name ) ; field . setDescriptor ( _descriptor ) ; field . setAccessFlags ( _accessFlags ) ; target . getConstantPool ( ) . addUTF8 ( _name ) ; target . getConstantPool ( ) . addUTF8 ( _descriptor ) ; for ( int i = 0 ; i < _attributes . size ( ) ; i ++ ) { Attribute attr = _attributes . get ( i ) ; field . addAttribute ( attr . export ( cl , target ) ) ; } return field ; } | exports the field |
15,918 | public void write ( Buffer buffer , boolean isEnd ) throws IOException { if ( _s == null ) { buffer . free ( ) ; return ; } try { _needsFlush = true ; if ( buffer . isDirect ( ) ) { _totalWriteBytes += buffer . length ( ) ; _s . write ( buffer . direct ( ) ) ; return ; } _totalWriteBytes += buffer . length ( ) ; while ( buffer . length ( ) > 0 ) { _writeBuffer . clear ( ) ; buffer . read ( _writeBuffer ) ; _writeBuffer . flip ( ) ; _s . write ( _writeBuffer ) ; } } catch ( IOException e ) { IOException exn = ClientDisconnectException . create ( this + ":" + e , e ) ; try { close ( ) ; } catch ( IOException e1 ) { } throw exn ; } finally { buffer . free ( ) ; } } | Writes an nio buffer to the socket . |
15,919 | private boolean accept ( SocketBar socket ) { PortTcp port = port ( ) ; try { while ( ! port ( ) . isClosed ( ) ) { if ( _serverSocket . accept ( socket ) ) { if ( port . isClosed ( ) ) { socket . close ( ) ; return false ; } else if ( isThrottle ( ) ) { socket . close ( ) ; } else { return true ; } } } } catch ( Throwable e ) { if ( port . isActive ( ) && log . isLoggable ( Level . FINER ) ) { log . log ( Level . FINER , e . toString ( ) , e ) ; } } return false ; } | Accepts a new connection . |
15,920 | private int writeShort ( String value , int offset , int end ) throws IOException { int ch ; OutputStreamWithBuffer os = _os ; byte [ ] buffer = os . buffer ( ) ; int bOffset = os . offset ( ) ; end = Math . min ( end , offset + buffer . length - bOffset ) ; for ( ; offset < end && ( ch = value . charAt ( offset ) ) < 0x80 ; offset ++ ) { buffer [ bOffset ++ ] = ( byte ) ch ; } os . offset ( bOffset ) ; return offset ; } | Writes a short string . |
15,921 | public boolean write ( byte [ ] buf1 , int off1 , int len1 , byte [ ] buf2 , int off2 , int len2 , boolean isEnd ) throws IOException { if ( len1 == 0 ) { write ( buf2 , off2 , len2 , isEnd ) ; return true ; } else return false ; } | Writes a pair of buffer to the underlying stream . |
15,922 | public String removeAgreement ( String agreementId ) { Invocation invocation = getJerseyClient ( ) . target ( getEndpoint ( ) + "/agreements/" + agreementId ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildDelete ( ) ; return invocation . invoke ( ) . readEntity ( String . class ) ; } | Creates proxied HTTP DELETE request to SeaClouds SLA core which removes the SLA from the SLA Core |
15,923 | public String notifyRulesReady ( Agreement slaAgreement ) { Entity content = Entity . entity ( "" , MediaType . TEXT_PLAIN ) ; Invocation invocation = getJerseyClient ( ) . target ( getEndpoint ( ) + "/seaclouds/commands/rulesready?agreementId=" + slaAgreement . getAgreementId ( ) ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildPost ( content ) ; return invocation . invoke ( ) . readEntity ( String . class ) ; } | Creates proxied HTTP POST request to SeaClouds SLA core which notifies that the Monitoring Rules were installed in Tower4Clouds . |
15,924 | public Agreement getAgreement ( String agreementId ) { return getJerseyClient ( ) . target ( getEndpoint ( ) + "/agreements/" + agreementId ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildGet ( ) . invoke ( ) . readEntity ( Agreement . class ) ; } | Creates proxied HTTP GET request to SeaClouds SLA core which retrieves the Agreement details |
15,925 | public Agreement getAgreementByTemplateId ( String slaAgreementTemplateId ) { return getJerseyClient ( ) . target ( getEndpoint ( ) + "/seaclouds/commands/fromtemplate?templateId=" + slaAgreementTemplateId ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildGet ( ) . invoke ( ) . readEntity ( Agreement . class ) ; } | Creates proxied HTTP GET request to SeaClouds SLA which returns the Agreement according to the template id |
15,926 | public GuaranteeTermsStatus getAgreementStatus ( String agreementId ) { return getJerseyClient ( ) . target ( getEndpoint ( ) + "/agreements/" + agreementId + "/guaranteestatus" ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildGet ( ) . invoke ( ) . readEntity ( GuaranteeTermsStatus . class ) ; } | Creates proxied HTTP GET request to SeaClouds SLA core which retrieves the Agreement Status |
15,927 | public List < Violation > getGuaranteeTermViolations ( Agreement agreement , GuaranteeTerm guaranteeTerm ) { String json = getJerseyClient ( ) . target ( getEndpoint ( ) + "/violations?agreementId=" + agreement . getAgreementId ( ) + "&guaranteeTerm=" + guaranteeTerm . getName ( ) ) . request ( ) . header ( "Accept" , MediaType . APPLICATION_JSON ) . header ( "Content-Type" , MediaType . APPLICATION_JSON ) . buildGet ( ) . invoke ( ) . readEntity ( String . class ) ; try { return mapper . readValue ( json , new TypeReference < List < Violation > > ( ) { } ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Creates proxied HTTP GET request to SeaClouds SLA core which retrieves the Agreement Term Violations |
15,928 | public T readObject ( InRawH3 is , InH3Amp in ) { T bean = newInstance ( ) ; in . ref ( bean ) ; FieldSerBase [ ] fields = _fields ; int size = fields . length ; for ( int i = 0 ; i < size ; i ++ ) { fields [ i ] . read ( bean , is , in ) ; } return bean ; } | Reads the bean from the stream . |
15,929 | public InvocationRouter < InvocationBaratine > buildRouter ( WebApp webApp ) { InjectorAmp inject = webApp . inject ( ) ; buildViews ( inject ) ; ArrayList < RouteMap > mapList = new ArrayList < > ( ) ; ServicesAmp manager = webApp . services ( ) ; ServiceRefAmp serviceRef = manager . newService ( new RouteService ( ) ) . ref ( ) ; while ( _routes . size ( ) > 0 ) { ArrayList < RouteWebApp > routes = new ArrayList < > ( _routes ) ; _routes . clear ( ) ; for ( RouteWebApp route : routes ) { mapList . addAll ( route . toMap ( inject , serviceRef ) ) ; } } RouteMap [ ] routeArray = new RouteMap [ mapList . size ( ) ] ; mapList . toArray ( routeArray ) ; return new InvocationRouterWebApp ( webApp , routeArray ) ; } | Builds the web - app s router |
15,930 | public void splitQueryAndUnescape ( I invocation , byte [ ] rawURIBytes , int uriLength ) throws IOException { for ( int i = 0 ; i < uriLength ; i ++ ) { if ( rawURIBytes [ i ] == '?' ) { i ++ ; String queryString = byteToChar ( rawURIBytes , i , uriLength - i , "ISO-8859-1" ) ; invocation . setQueryString ( queryString ) ; uriLength = i - 1 ; break ; } } String rawURIString = byteToChar ( rawURIBytes , 0 , uriLength , "ISO-8859-1" ) ; invocation . setRawURI ( rawURIString ) ; String decodedURI = normalizeUriEscape ( rawURIBytes , 0 , uriLength , _encoding ) ; decodedURI = decodeURI ( rawURIString , decodedURI , invocation ) ; String uri = normalizeUri ( decodedURI ) ; invocation . setURI ( uri ) ; } | Splits out the query string and unescape the value . |
15,931 | public void splitQuery ( I invocation , String rawURI ) throws IOException { int p = rawURI . indexOf ( '?' ) ; if ( p > 0 ) { invocation . setQueryString ( rawURI . substring ( p + 1 ) ) ; rawURI = rawURI . substring ( 0 , p ) ; } invocation . setRawURI ( rawURI ) ; String uri = normalizeUri ( rawURI ) ; invocation . setURI ( uri ) ; } | Splits out the query string and normalizes the URI assuming nothing needs unescaping . |
15,932 | public void normalizeURI ( I invocation , String rawURI ) throws IOException { invocation . setRawURI ( rawURI ) ; String uri = normalizeUri ( rawURI ) ; invocation . setURI ( uri ) ; } | Just normalize the URI . |
15,933 | private static String normalizeUriEscape ( byte [ ] rawUri , int i , int len , String encoding ) throws IOException { ByteToChar converter = allocateConverter ( ) ; if ( encoding == null ) { encoding = "utf-8" ; } try { converter . setEncoding ( encoding ) ; } catch ( UnsupportedEncodingException e ) { log . log ( Level . FINE , e . toString ( ) , e ) ; } try { while ( i < len ) { int ch = rawUri [ i ++ ] & 0xff ; if ( ch == '%' ) i = scanUriEscape ( converter , rawUri , i , len ) ; else converter . addByte ( ch ) ; } String result = converter . getConvertedString ( ) ; freeConverter ( converter ) ; return result ; } catch ( Exception e ) { throw new BadRequestException ( L . l ( "The URL contains escaped bytes unsupported by the {0} encoding." , encoding ) ) ; } } | Converts the escaped URI to a string . |
15,934 | private static int scanUriEscape ( ByteToChar converter , byte [ ] rawUri , int i , int len ) throws IOException { int ch1 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; if ( ch1 == 'u' ) { ch1 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; int ch2 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; int ch3 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; int ch4 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; converter . addChar ( ( char ) ( ( toHex ( ch1 ) << 12 ) + ( toHex ( ch2 ) << 8 ) + ( toHex ( ch3 ) << 4 ) + ( toHex ( ch4 ) ) ) ) ; } else { int ch2 = i < len ? ( rawUri [ i ++ ] & 0xff ) : - 1 ; int b = ( toHex ( ch1 ) << 4 ) + toHex ( ch2 ) ; ; converter . addByte ( b ) ; } return i ; } | Scans the next character from URI adding it to the converter . |
15,935 | private static int toHex ( int ch ) { if ( ch >= '0' && ch <= '9' ) return ch - '0' ; else if ( ch >= 'a' && ch <= 'f' ) return ch - 'a' + 10 ; else if ( ch >= 'A' && ch <= 'F' ) return ch - 'A' + 10 ; else return - 1 ; } | Convert a character to hex |
15,936 | public static PathImpl getLocalWorkDir ( ClassLoader loader ) { PathImpl path = _localWorkDir . get ( loader ) ; if ( path != null ) return path ; path = getTmpWorkDir ( ) ; _localWorkDir . setGlobal ( path ) ; try { path . mkdirs ( ) ; } catch ( java . io . IOException e ) { } return path ; } | Returns the local work directory . |
15,937 | public static void setLocalWorkDir ( PathImpl path , ClassLoader loader ) { try { if ( path instanceof MergePath ) path = ( ( MergePath ) path ) . getWritePath ( ) ; if ( path instanceof MemoryPath ) { String pathName = path . getPath ( ) ; path = WorkDir . getTmpWorkDir ( ) . lookup ( "qa/" + pathName ) ; } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } _localWorkDir . set ( path , loader ) ; } | Sets the work dir . |
15,938 | public < S , T > Convert < S , T > converter ( Class < S > source , Class < T > target ) { ConvertFrom < S > convertType = getOrCreate ( source ) ; return convertType . converter ( target ) ; } | Returns the converter for a given source type and target type . |
15,939 | private < S > ConvertManagerTypeImpl < S > getOrCreate ( Class < S > sourceType ) { ConvertManagerTypeImpl < S > convertType = ( ConvertManagerTypeImpl < S > ) _convertMap . get ( sourceType ) ; if ( convertType != null ) { return convertType ; } convertType = new ConvertManagerTypeImpl < > ( sourceType ) ; _convertMap . putIfAbsent ( sourceType , convertType ) ; return ( ConvertManagerTypeImpl < S > ) _convertMap . get ( sourceType ) ; } | Returns the ConvertManagerTypeImpl for a given source type . |
15,940 | protected int nextOffset ( ) { int opcode = getCode ( ) [ _offset ] & 0xff ; int length = OP_LEN [ opcode ] ; switch ( opcode ) { case GOTO : case GOTO_W : case RET : case IRETURN : case LRETURN : case FRETURN : case DRETURN : case ARETURN : case RETURN : case ATHROW : return - 1 ; case TABLESWITCH : { int arg = _offset + 1 ; arg += ( 4 - arg % 4 ) % 4 ; int low = getInt ( arg + 4 ) ; int high = getInt ( arg + 8 ) ; return arg + 12 + ( high - low + 1 ) * 4 ; } case LOOKUPSWITCH : { return - 1 ; } case WIDE : { int op2 = getCode ( ) [ _offset + 1 ] & 0xff ; if ( op2 == IINC ) length = 5 ; else length = 3 ; break ; } } if ( length < 0 || length > 0x10 ) throw new UnsupportedOperationException ( L . l ( "{0}: can't handle opcode {1}" , "" + _offset , "" + getOpcode ( ) ) ) ; return _offset + length + 1 ; } | Goes to the next opcode . |
15,941 | public boolean isBranch ( ) { switch ( getOpcode ( ) ) { case IFNULL : case IFNONNULL : case IFNE : case IFEQ : case IFLT : case IFGE : case IFGT : case IFLE : case IF_ICMPEQ : case IF_ICMPNE : case IF_ICMPLT : case IF_ICMPGE : case IF_ICMPGT : case IF_ICMPLE : case IF_ACMPEQ : case IF_ACMPNE : case JSR : case JSR_W : case GOTO : case GOTO_W : return true ; } return false ; } | Returns true for a simple branch i . e . a branch with a simple target . |
15,942 | public ConstantPoolEntry getConstantArg ( ) { switch ( getOpcode ( ) ) { case LDC : return _javaClass . getConstantPool ( ) . getEntry ( getByteArg ( ) ) ; case LDC_W : return _javaClass . getConstantPool ( ) . getEntry ( getShortArg ( ) ) ; default : throw new UnsupportedOperationException ( ) ; } } | Returns a constant pool item . |
15,943 | public int getShort ( int offset ) { int b0 = getCode ( ) [ offset + 0 ] & 0xff ; int b1 = getCode ( ) [ offset + 1 ] & 0xff ; return ( short ) ( ( b0 << 8 ) + b1 ) ; } | Reads a short value . |
15,944 | public int getInt ( int offset ) { byte [ ] code = getCode ( ) ; int b0 = code [ offset + 0 ] & 0xff ; int b1 = code [ offset + 1 ] & 0xff ; int b2 = code [ offset + 2 ] & 0xff ; int b3 = code [ offset + 3 ] & 0xff ; return ( b0 << 24 ) + ( b1 << 16 ) + ( b2 << 8 ) + b3 ; } | Reads an int argument . |
15,945 | private void analyze ( Analyzer analyzer , boolean allowFlow , IntArray pendingTargets , IntArray completedTargets ) throws Exception { pending : while ( pendingTargets . size ( ) > 0 ) { int pc = pendingTargets . pop ( ) ; if ( allowFlow ) { if ( completedTargets . contains ( pc ) ) continue pending ; completedTargets . add ( pc ) ; } setOffset ( pc ) ; flow : do { pc = getOffset ( ) ; if ( pc < 0 ) throw new IllegalStateException ( ) ; if ( ! allowFlow ) { if ( completedTargets . contains ( pc ) ) break flow ; completedTargets . add ( pc ) ; } if ( isBranch ( ) ) { int targetPC = getBranchTarget ( ) ; if ( ! pendingTargets . contains ( targetPC ) ) pendingTargets . add ( targetPC ) ; } else if ( isSwitch ( ) ) { int [ ] switchTargets = getSwitchTargets ( ) ; for ( int i = 0 ; i < switchTargets . length ; i ++ ) { if ( ! pendingTargets . contains ( switchTargets [ i ] ) ) pendingTargets . add ( switchTargets [ i ] ) ; } } analyzer . analyze ( this ) ; } while ( next ( ) ) ; } } | Analyzes the code for a basic block . |
15,946 | public static ServerBartender currentSelfServer ( ) { NetworkSystem clusterService = current ( ) ; if ( clusterService == null ) throw new IllegalStateException ( L . l ( "{0} is not available in this context" , NetworkSystem . class . getSimpleName ( ) ) ) ; return clusterService . selfServer ( ) ; } | Returns the current network service . |
15,947 | public ConnectionTcp findConnectionByThreadId ( long threadId ) { for ( PortTcp listener : getPorts ( ) ) { ConnectionTcp conn = listener . findConnectionByThreadId ( threadId ) ; if ( conn != null ) return conn ; } return null ; } | Finds the TcpConnection given the threadId |
15,948 | private void updateClusterRoot ( ) { ArrayList < ServerHeartbeat > serverList = new ArrayList < > ( ) ; for ( ServerHeartbeat server : _serverSelf . getCluster ( ) . getServers ( ) ) { serverList . add ( server ) ; } Collections . sort ( serverList , ( x , y ) -> compareClusterRoot ( x , y ) ) ; UpdatePodBuilder builder = new UpdatePodBuilder ( ) ; builder . name ( "cluster_root" ) ; builder . cluster ( _serverSelf . getCluster ( ) ) ; builder . type ( PodType . solo ) ; builder . depth ( 16 ) ; for ( ServerHeartbeat server : serverList ) { builder . server ( server . getAddress ( ) , server . port ( ) ) ; } long sequence = CurrentTime . currentTime ( ) ; sequence = Math . max ( sequence , _clusterRoot . getSequence ( ) + 1 ) ; builder . sequence ( sequence ) ; UpdatePod update = builder . build ( ) ; updatePodProxy ( update ) ; } | Compare and merge the cluster_root with an update . |
15,949 | void onJoinStart ( ) { ArrayList < UpdatePod > updatePods = new ArrayList < > ( ) ; updateClusterRoot ( ) ; updatePods . addAll ( _podUpdateMap . values ( ) ) ; for ( UpdatePod updatePod : updatePods ) { updatePod ( updatePod ) ; } } | Update the sequence for all init pods after the join has completed . |
15,950 | private int keyColumnStart ( ) { int offset = 0 ; for ( int i = 0 ; i < _columns . length ; i ++ ) { if ( offset == _keyStart ) { return i ; } offset += _columns [ i ] . length ( ) ; } throw new IllegalStateException ( ) ; } | First key column index . |
15,951 | private int keyColumnEnd ( ) { int offset = 0 ; for ( int i = 0 ; i < _columns . length ; i ++ ) { if ( offset == _keyStart + _keyLength ) { return i ; } offset += _columns [ i ] . length ( ) ; } if ( offset == _keyStart + _keyLength ) { return _columns . length ; } throw new IllegalStateException ( ) ; } | End key column index the index after the final key . |
15,952 | public void autoFill ( RowCursor cursor ) { for ( Column col : columns ( ) ) { col . autoFill ( cursor . buffer ( ) , 0 ) ; } } | autofill generated values |
15,953 | void readStream ( InputStream is , byte [ ] buffer , int offset , RowCursor cursor ) throws IOException { for ( Column column : columns ( ) ) { column . readStream ( is , buffer , offset , cursor ) ; } for ( Column column : blobs ( ) ) { column . readStreamBlob ( is , buffer , offset , cursor ) ; } } | Fills a cursor given an input stream |
15,954 | int readCheckpoint ( ReadStream is , byte [ ] blockBuffer , int rowOffset , int blobTail ) throws IOException { int rowLength = length ( ) ; if ( rowOffset < blobTail ) { return - 1 ; } for ( Column column : columns ( ) ) { blobTail = column . readCheckpoint ( is , blockBuffer , rowOffset , rowLength , blobTail ) ; } return blobTail ; } | Reads column - specific data like blobs from the checkpoint . |
15,955 | public void validate ( byte [ ] buffer , int rowOffset , int rowHead , int blobTail ) { for ( Column column : columns ( ) ) { column . validate ( buffer , rowOffset , rowHead , blobTail ) ; } } | Validates the row checking for corruption . |
15,956 | static void incrementKey ( byte [ ] key ) { for ( int i = key . length - 1 ; i >= 0 ; i -- ) { int v = key [ i ] & 0xff ; if ( v < 0xff ) { key [ i ] = ( byte ) ( v + 1 ) ; return ; } key [ i ] = 0 ; } } | Increment a key . |
15,957 | static void decrementKey ( byte [ ] key ) { for ( int i = key . length - 1 ; i >= 0 ; i -- ) { int v = key [ i ] & 0xff ; if ( v > 0 ) { key [ i ] = ( byte ) ( v - 1 ) ; return ; } key [ i ] = ( byte ) 0xff ; } } | Decrement a key . |
15,958 | public void setThreadMax ( int max ) { if ( max == _threadMax ) { return ; } if ( max <= 0 ) { max = DEFAULT_THREAD_MAX ; } if ( max < _idleMin ) throw new ConfigException ( L . l ( "IdleMin ({0}) must be less than ThreadMax ({1})" , _idleMin , max ) ) ; if ( max < 1 ) throw new ConfigException ( L . l ( "ThreadMax ({0}) must be greater than zero" , max ) ) ; _threadMax = max ; update ( ) ; } | Sets the maximum number of threads . |
15,959 | public void setIdleMax ( int max ) { if ( max == _idleMax ) { return ; } if ( max <= 0 ) { max = DEFAULT_IDLE_MAX ; } if ( _threadMax < max ) throw new ConfigException ( L . l ( "IdleMax ({0}) must be less than ThreadMax ({1})" , max , _threadMax ) ) ; if ( max <= 0 ) throw new ConfigException ( L . l ( "IdleMax ({0}) must be greater than 0." , max ) ) ; _idleMax = max ; update ( ) ; } | Sets the maximum number of idle threads . |
15,960 | public boolean isIdleExpire ( ) { if ( ! _lifecycle . isActive ( ) ) return true ; long now = currentTimeActual ( ) ; long idleExpire = _threadIdleExpireTime . get ( ) ; int idleCount = _idleCount . get ( ) ; if ( _idleMin < idleCount ) { long nextIdleExpire = now + _idleTimeout ; if ( _idleMax < idleCount && _idleMin < _idleMax ) { _threadIdleExpireTime . compareAndSet ( idleExpire , nextIdleExpire ) ; return true ; } else if ( idleExpire < now && _threadIdleExpireTime . compareAndSet ( idleExpire , nextIdleExpire ) ) { return true ; } } return false ; } | Returns true if the thread should expire instead of going to the idle state . |
15,961 | public static ReadStreamOld open ( String string ) { VfsStringReader ss = new VfsStringReader ( string ) ; ReadStreamOld rs = new ReadStreamOld ( ss ) ; try { rs . setEncoding ( "UTF-8" ) ; } catch ( Exception e ) { } return rs ; } | Creates a new ReadStream reading bytes from the given string . |
15,962 | public static void clearFreeLists ( ) { while ( _freeStandard . allocate ( ) != null ) { } while ( _freeSmall . allocate ( ) != null ) { } while ( _freeLarge . allocate ( ) != null ) { } } | Free data for OOM . |
15,963 | @ InService ( PageServiceImpl . class ) void writeImpl ( TableKelp table , PageServiceImpl pageServiceImpl , TableWriterService readWrite , SegmentStream sOut , long oldSequence , int saveLength , int saveTail ) { Objects . requireNonNull ( sOut ) ; if ( saveLength <= 0 || oldSequence != sOut . getSequence ( ) || _stub == null || ! _stub . allowDelta ( ) ) { PageLeafImpl newPage ; if ( ! isDirty ( ) && ( _blocks . length == 0 || _blocks [ 0 ] . isCompact ( ) ) ) { newPage = copy ( getSequence ( ) ) ; } else { newPage = compact ( table ) ; } int sequenceWrite = newPage . nextWriteSequence ( ) ; if ( ! pageServiceImpl . compareAndSetLeaf ( this , newPage ) && ! pageServiceImpl . compareAndSetLeaf ( _stub , newPage ) ) { System . out . println ( "HMPH: " + pageServiceImpl . getPage ( getId ( ) ) + " " + this + " " + _stub ) ; } saveLength = newPage . getDataLengthWritten ( ) ; saveTail = newPage . getSaveTail ( ) ; newPage . clearDirty ( ) ; readWrite . writePage ( newPage , sOut , oldSequence , saveLength , saveTail , sequenceWrite , Result . of ( x -> newPage . afterDataFlush ( pageServiceImpl , sequenceWrite ) ) ) ; } else { int sequenceWrite = nextWriteSequence ( ) ; clearDirty ( ) ; readWrite . writePage ( this , sOut , oldSequence , saveLength , saveTail , sequenceWrite , Result . of ( x -> afterDataFlush ( pageServiceImpl , sequenceWrite ) ) ) ; } } | Sends a write - request to the sequence writer for the page . |
15,964 | @ InService ( TableWriterService . class ) public Page writeCheckpoint ( TableKelp table , OutSegment sOut , long oldSequence , int saveLength , int saveTail , int saveSequence ) throws IOException { BlockLeaf [ ] blocks = _blocks ; int size = BLOCK_SIZE * blocks . length ; WriteStream os = sOut . out ( ) ; int available = sOut . getAvailable ( ) ; if ( available < os . position ( ) + size ) { return null ; } long newSequence = sOut . getSequence ( ) ; if ( newSequence < oldSequence ) { return null ; } compareAndSetSequence ( oldSequence , newSequence ) ; PageLeafStub stub = _stub ; Type type ; if ( saveLength > 0 && oldSequence == newSequence && stub != null && stub . allowDelta ( ) ) { int offset = ( int ) os . position ( ) ; type = writeDelta ( table , sOut . out ( ) , saveLength ) ; int length = ( int ) ( os . position ( ) - offset ) ; stub . addDelta ( table , offset , length ) ; } else { int offset = ( int ) os . position ( ) ; if ( sOut . isCompress ( ) ) { try ( OutputStream zOut = sOut . outCompress ( ) ) { type = writeCheckpointFull ( table , zOut , saveTail ) ; } } else { type = writeCheckpointFull ( table , sOut . out ( ) , saveTail ) ; } int length = ( int ) ( os . position ( ) - offset ) ; stub = new PageLeafStub ( getId ( ) , getNextId ( ) , sOut . getSegment ( ) , offset , length ) ; stub . setLeafRef ( this ) ; _stub = stub ; } _writeType = type ; return this ; } | Callback from the writer and gc to write the page . |
15,965 | private PageLeafImpl compact ( TableKelp table ) { long now = CurrentTime . currentTime ( ) / 1000 ; Set < PageLeafEntry > entries = fillEntries ( table ) ; ArrayList < BlockLeaf > blocks = new ArrayList < > ( ) ; BlockLeaf block = new BlockLeaf ( getId ( ) ) ; blocks . add ( block ) ; Row row = table . row ( ) ; for ( PageLeafEntry entry : entries ) { if ( entry . getCode ( ) != INSERT && entry . getExpires ( ) <= now ) { continue ; } while ( ! block . addEntry ( row , entry ) ) { block = new BlockLeaf ( getId ( ) ) ; blocks . add ( block ) ; } } PageLeafImpl newPage = new PageLeafImpl ( getId ( ) , getNextId ( ) , getSequence ( ) , _table , getMinKey ( ) , getMaxKey ( ) , blocks ) ; newPage . validate ( table ) ; newPage . toSorted ( table ) ; if ( isDirty ( ) ) { newPage . setDirty ( ) ; } if ( _stub != null ) { _stub . copyToCompact ( newPage ) ; } return newPage ; } | Compacts the leaf by rebuilding the delta entries and discarding obsolete removed entries . |
15,966 | @ InService ( SegmentServiceImpl . class ) private Type writeCheckpointFull ( TableKelp table , OutputStream os , int saveTail ) throws IOException { os . write ( getMinKey ( ) ) ; os . write ( getMaxKey ( ) ) ; BlockLeaf [ ] blocks = _blocks ; int index = blocks . length - ( saveTail / BLOCK_SIZE ) ; int rowFirst = saveTail % BLOCK_SIZE ; BitsUtil . writeInt16 ( os , blocks . length - index ) ; if ( blocks . length <= index ) { return Type . LEAF ; } blocks [ index ] . writeCheckpointFull ( os , rowFirst ) ; for ( int i = index + 1 ; i < blocks . length ; i ++ ) { blocks [ i ] . writeCheckpointFull ( os , 0 ) ; } return Type . LEAF ; } | Writes the page to the output stream as a full checkpoint . |
15,967 | @ InService ( PageServiceImpl . class ) static PageLeafImpl readCheckpointFull ( TableKelp table , PageServiceImpl pageActor , InputStream is , int pid , int nextPid , long sequence ) throws IOException { byte [ ] minKey = new byte [ table . getKeyLength ( ) ] ; byte [ ] maxKey = new byte [ table . getKeyLength ( ) ] ; int count = 0 ; BlockLeaf [ ] blocks ; IoUtil . readAll ( is , minKey , 0 , minKey . length ) ; IoUtil . readAll ( is , maxKey , 0 , maxKey . length ) ; count = BitsUtil . readInt16 ( is ) ; blocks = new BlockLeaf [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { blocks [ i ] = new BlockLeaf ( pid ) ; blocks [ i ] . readCheckpointFull ( is ) ; } if ( count == 0 ) { blocks = new BlockLeaf [ ] { new BlockLeaf ( pid ) } ; } PageLeafImpl page = new PageLeafImpl ( pid , nextPid , sequence , table , minKey , maxKey , blocks ) ; page . clearDirty ( ) ; page . validate ( table ) ; page . toSorted ( table ) ; return page ; } | Reads a full checkpoint entry into the page . |
15,968 | void readCheckpointDelta ( TableKelp table , PageServiceImpl pageActor , ReadStream is , int length ) throws IOException { Row row = table . row ( ) ; int removeLength = row . removeLength ( ) ; int rowLength = row . length ( ) ; BlockLeaf block = _blocks [ 0 ] ; long endPosition = is . position ( ) + length ; int rowHead = block . rowHead ( ) ; int blobTail = block . getBlobTail ( ) ; long pos ; while ( ( pos = is . position ( ) ) < endPosition ) { int code = is . read ( ) ; is . unread ( ) ; code = code & CODE_MASK ; if ( code == REMOVE ) { rowHead -= removeLength ; if ( rowHead < blobTail ) { block = extendBlocks ( ) ; rowHead = BLOCK_SIZE - removeLength ; blobTail = 0 ; } is . readAll ( block . getBuffer ( ) , rowHead , removeLength ) ; } else if ( code == INSERT ) { rowHead -= rowLength ; while ( ( blobTail = row . readCheckpoint ( is , block . getBuffer ( ) , rowHead , blobTail ) ) < 0 ) { is . position ( pos ) ; block = extendBlocks ( ) ; rowHead = BLOCK_SIZE - rowLength ; blobTail = 0 ; } } else { throw new IllegalStateException ( L . l ( "{0} Corrupted checkpoint at pos={1} with code {2}" , this , pos , code ) ) ; } block . rowHead ( rowHead ) ; block . setBlobTail ( blobTail ) ; } clearDirty ( ) ; validate ( table ) ; } | Reads a delta entry from the checkpoint . |
15,969 | void validate ( TableKelp table ) { if ( ! table . isValidate ( ) ) { return ; } Row row = table . row ( ) ; for ( BlockLeaf block : _blocks ) { block . validateBlock ( row ) ; } } | Validates the leaf blocks |
15,970 | void addDelta ( TableKelp db , int offset , int length ) { if ( _delta == null ) { _delta = new int [ 2 * db . getDeltaMax ( ) ] ; } _delta [ _deltaTail ++ ] = offset ; _delta [ _deltaTail ++ ] = length ; } | Adds a delta record to the leaf stub . |
15,971 | public String [ ] fetchAndPlan ( String aam ) throws ParsingException , IOException { String offerings = this . fetchOfferings ( ) ; return this . plan ( aam , offerings ) ; } | Fetches offerings from the Discoverer and makes plans |
15,972 | public String [ ] plan ( String aam , String uniqueOfferingsTosca ) throws ParsingException , IOException { log . info ( "Planning for aam: \n" + aam ) ; log . info ( "Getting Offeing Step: Start" ) ; Map < String , Pair < NodeTemplate , String > > offerings = parseOfferings ( uniqueOfferingsTosca ) ; log . info ( "Getting Offeing Step: Complete" ) ; log . info ( "\nNot deployable offering have been filtered!" ) ; log . info ( "\nDeployable offerings have location: " + deployableProviders ) ; log . info ( "Got " + offerings . size ( ) + " offerings from discoverer:" ) ; log . info ( "Matchmaking Step: Start" ) ; Matchmaker mm = new Matchmaker ( ) ; Map < String , HashSet < String > > matchingOfferings = mm . match ( ToscaSerializer . fromTOSCA ( aam ) , offerings ) ; log . info ( "Matchmaking Step: Complete" ) ; String mmOutput = "" ; try { mmOutput = generateMMOutput2 ( matchingOfferings , offerings ) ; } catch ( JsonProcessingException e ) { log . error ( "Error preparing matchmaker output for optimization" , e ) ; } for ( String s : matchingOfferings . keySet ( ) ) { log . info ( "Module " + s + "has matching offerings: " + matchingOfferings . get ( s ) ) ; } log . info ( "Optimization Step: Start" ) ; log . info ( "Calling optimizer with suitable offerings: \n" + mmOutput ) ; Optimizer optimizer = new Optimizer ( ) ; String [ ] outputPlans = optimizer . optimize ( aam , mmOutput ) ; log . info ( "Optimzer result: " + Arrays . asList ( outputPlans ) ) ; log . info ( "Optimization Step: Complete" ) ; return outputPlans ; } | Makes plans starting from the AAM and a String containing the TOSCA of the offerings available to generate the plans |
15,973 | public String [ ] fetchAndRePlan ( String aam , List < String > modulesToFilter ) throws ParsingException , IOException { String offerings = this . fetchOfferings ( ) ; return this . rePlan ( aam , offerings , modulesToFilter ) ; } | Fetches offerings from the Discoverer and replan |
15,974 | private String fetchOfferings ( ) { DiscovererFetchallResult allOfferings = null ; try { String discovererOutput = discovererClient . getRequest ( "fetch_all" , Collections . EMPTY_LIST ) ; ObjectMapper mapper = new ObjectMapper ( ) ; allOfferings = mapper . readValue ( discovererOutput , DiscovererFetchallResult . class ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } String offerings = allOfferings . offering ; return offerings ; } | Fetches the list offerings from the Discoverer |
15,975 | ModuleMarshal marshal ( Class < ? > sourceType , Class < ? > targetType , Class < ? > declaredTargetType ) { ImportKey key = new ImportKey ( sourceType , targetType ) ; ModuleMarshal marshal = _marshalMap . get ( key ) ; if ( marshal != null ) { return marshal ; } marshal = marshalImpl ( sourceType , targetType , declaredTargetType ) ; _marshalMap . putIfAbsent ( key , marshal ) ; return marshal ; } | Returns the marshal to convert from the sourceType to the targetType . |
15,976 | public static int getBiggestPrime ( long value ) { for ( int i = PRIMES . length - 1 ; i >= 0 ; i -- ) { if ( PRIMES [ i ] <= value ) { return PRIMES [ i ] ; } } return 2 ; } | Returns the largest prime less than the given bits . |
15,977 | public void setCertificateChainFile ( Path certificateChainFile ) { try { _certificateChainFile = certificateChainFile . toRealPath ( ) . toString ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Sets the certificateChainFile . |
15,978 | public void setCACertificatePath ( Path caCertificatePath ) { try { _caCertificatePath = caCertificatePath . toRealPath ( ) . toString ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Sets the caCertificatePath . |
15,979 | public void setCACertificateFile ( Path caCertificateFile ) { try { _caCertificateFile = caCertificateFile . toRealPath ( ) . toString ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Sets the caCertificateFile . |
15,980 | public void setCARevocationPath ( Path caRevocationPath ) { try { _caRevocationPath = caRevocationPath . toRealPath ( ) . toString ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Sets the caRevocationPath . |
15,981 | public void setCARevocationFile ( Path caRevocationFile ) { try { _caRevocationFile = caRevocationFile . toRealPath ( ) . toString ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Sets the caRevocationFile . |
15,982 | public void addEngineCommand ( String command ) { if ( command == null || command . length ( ) == 0 ) { return ; } int p = command . indexOf ( ':' ) ; String arg = "" ; if ( p > 0 ) { arg = command . substring ( p + 1 ) ; command = command . substring ( 0 , p ) ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( _engineCommands ) ; sb . append ( "\1" ) ; sb . append ( command ) ; sb . append ( "\1" ) ; sb . append ( arg ) ; _engineCommands = sb . toString ( ) ; } | Sets the engine - commands |
15,983 | public void nextProtocols ( String ... protocols ) { if ( protocols == null || protocols . length == 0 ) { _nextProtocols = null ; return ; } StringBuilder sb = new StringBuilder ( ) ; for ( String protocol : protocols ) { if ( protocol == null || "" . equals ( protocol ) ) { continue ; } sb . append ( ( char ) protocol . length ( ) ) ; sb . append ( protocol ) ; } _nextProtocols = sb . toString ( ) ; } | Sets the next protocols . |
15,984 | public void setVerifyClient ( String verifyClient ) throws ConfigException { if ( ! "optional_no_ca" . equals ( verifyClient ) && ! "optional-no-ca" . equals ( verifyClient ) && ! "optional" . equals ( verifyClient ) && ! "require" . equals ( verifyClient ) && ! "none" . equals ( verifyClient ) ) throw new ConfigException ( L . l ( "'{0}' is an unknown value for verify-client. Valid values are 'optional-no-ca', 'optional', and 'require'." , verifyClient ) ) ; if ( "none" . equals ( verifyClient ) ) _verifyClient = null ; else _verifyClient = verifyClient ; } | Sets the verifyClient . |
15,985 | public synchronized ByteBuffer allocateHeaderBuffer ( ) { Stack < ByteBuffer > pool = pools . get ( HEADER_SIZE ) ; if ( pool . isEmpty ( ) ) { return ByteBuffer . allocate ( HEADER_SIZE ) ; } else { return pool . pop ( ) ; } } | Returns a buffer that has the size of the Bitmessage network message header 24 bytes . |
15,986 | public boolean compileIsModified ( ) { if ( _compileIsModified ) return true ; CompileThread compileThread = new CompileThread ( ) ; ThreadPool . current ( ) . start ( compileThread ) ; try { synchronized ( compileThread ) { if ( ! compileThread . isDone ( ) ) compileThread . wait ( 5000 ) ; } if ( _compileIsModified ) return true ; else if ( compileThread . isDone ( ) ) { return reloadIsModified ( ) ; } else return true ; } catch ( Throwable e ) { } return false ; } | Returns true if the compile doesn t avoid the dependency . |
15,987 | ServiceRefAmp lookup ( String path , PodRef podCaller ) { if ( _linkServiceRef . address ( ) . startsWith ( "local:" ) ) { int p = path . indexOf ( '/' , 1 ) ; if ( p > 0 ) { return ( ServiceRefAmp ) _rampManager . service ( path . substring ( p ) ) ; } else { return ( ServiceRefAmp ) _rampManager . service ( path ) ; } } else { ServiceRefAmp linkRef = getLinkServiceRef ( podCaller ) ; return linkRef . onLookup ( path ) ; } } | Lookup returns a ServiceRef for the foreign path and calling pod . |
15,988 | public static StdoutStream create ( ) { if ( _stdout == null ) { _stdout = new StdoutStream ( ) ; ConstPath path = new ConstPath ( null , _stdout ) ; path . setScheme ( "stdout" ) ; } return _stdout ; } | Returns the StdoutStream singleton |
15,989 | public void write ( byte [ ] buf , int offset , int length , boolean isEnd ) throws IOException { System . out . write ( buf , offset , length ) ; System . out . flush ( ) ; } | Writes the data to the System . out . |
15,990 | public static ThreadDump create ( ) { ThreadDump threadDump = _threadDumpRef . get ( ) ; if ( threadDump == null ) { threadDump = new ThreadDumpPro ( ) ; _threadDumpRef . compareAndSet ( null , threadDump ) ; threadDump = _threadDumpRef . get ( ) ; } return threadDump ; } | Returns the singleton instance creating if necessary . An instance of com . caucho . server . admin . ProThreadDump will be returned if available and licensed . ProThreadDump includes the URI of the request the thread is processing if applicable . |
15,991 | public String getThreadDump ( int depth , boolean onlyActive ) { ThreadMXBean threadBean = ManagementFactory . getThreadMXBean ( ) ; long [ ] ids = threadBean . getAllThreadIds ( ) ; ThreadInfo [ ] info = threadBean . getThreadInfo ( ids , depth ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Thread Dump generated " + new Date ( CurrentTime . currentTime ( ) ) ) ; Arrays . sort ( info , new ThreadCompare ( ) ) ; buildThreads ( sb , info , Thread . State . RUNNABLE , false ) ; buildThreads ( sb , info , Thread . State . RUNNABLE , true ) ; if ( ! onlyActive ) { buildThreads ( sb , info , Thread . State . BLOCKED , false ) ; buildThreads ( sb , info , Thread . State . WAITING , false ) ; buildThreads ( sb , info , Thread . State . TIMED_WAITING , false ) ; buildThreads ( sb , info , null , false ) ; } return sb . toString ( ) ; } | Returns dump of threads . Optionally uses cached dump . |
15,992 | private boolean readFlow ( ReadStream is , int length , int streamId ) throws IOException { if ( length != 4 ) { error ( "Invalid window update length {0}" , length ) ; return false ; } int credit = BitsUtil . readInt ( is ) ; if ( streamId == 0 ) { _conn . channelZero ( ) . addSendCredit ( credit ) ; return true ; } ChannelHttp2 channel = _conn . getChannel ( streamId ) ; if ( channel == null ) { return true ; } channel . getOutChannel ( ) . addSendCredit ( _conn . outHttp ( ) , credit ) ; return true ; } | window - update |
15,993 | private boolean readGoAway ( ReadStream is , int length ) throws IOException { int lastStream = BitsUtil . readInt ( is ) ; int errorCode = BitsUtil . readInt ( is ) ; is . skip ( length - 8 ) ; _isGoAway = true ; _conn . onReadGoAway ( ) ; if ( onCloseStream ( ) <= 0 ) { _inHandler . onGoAway ( ) ; } return true ; } | go - away |
15,994 | private boolean readMetaHeader ( ) throws IOException { try ( ReadStream is = openRead ( 0 , META_SEGMENT_SIZE ) ) { int crc = 17 ; long magic = BitsUtil . readLong ( is ) ; if ( magic != KELP_MAGIC ) { System . out . println ( "WRONG_MAGIC: " + magic ) ; return false ; } crc = Crc32Caucho . generate ( crc , magic ) ; _nonce = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , _nonce ) ; int headers = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , headers ) ; for ( int i = 0 ; i < headers ; i ++ ) { int key = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , key ) ; int value = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , value ) ; } int count = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , count ) ; ArrayList < Integer > segmentSizes = new ArrayList < > ( ) ; for ( int i = 0 ; i < count ; i ++ ) { int size = BitsUtil . readInt ( is ) ; crc = Crc32Caucho . generateInt32 ( crc , size ) ; segmentSizes . add ( size ) ; } int crcFile = BitsUtil . readInt ( is ) ; if ( crc != crcFile ) { System . out . println ( "MISMATCHED_CRC: " + crcFile ) ; return false ; } _metaSegment = new SegmentExtent10 ( 0 , 0 , META_SEGMENT_SIZE ) ; _segmentId = 1 ; _metaOffset = is . position ( ) ; } return true ; } | Reads the initial metadata for the store file as a whole . |
15,995 | private boolean readMetaData ( ) throws IOException { SegmentExtent10 segment = _metaSegment ; try ( ReadStream is = openRead ( segment . address ( ) , segment . length ( ) ) ) { is . position ( META_OFFSET ) ; while ( readMetaEntry ( is ) ) { } } return true ; } | Reads the metadata entries for the tables and the segments . |
15,996 | private boolean readMetaTable ( ReadStream is , int crc ) throws IOException { byte [ ] key = new byte [ TABLE_KEY_SIZE ] ; is . read ( key , 0 , key . length ) ; crc = Crc32Caucho . generate ( crc , key ) ; int rowLength = BitsUtil . readInt16 ( is ) ; crc = Crc32Caucho . generateInt16 ( crc , rowLength ) ; int keyOffset = BitsUtil . readInt16 ( is ) ; crc = Crc32Caucho . generateInt16 ( crc , keyOffset ) ; int keyLength = BitsUtil . readInt16 ( is ) ; crc = Crc32Caucho . generateInt16 ( crc , keyLength ) ; int dataLength = BitsUtil . readInt16 ( is ) ; crc = Crc32Caucho . generateInt16 ( crc , dataLength ) ; byte [ ] data = new byte [ dataLength ] ; is . read ( data ) ; crc = Crc32Caucho . generate ( crc , data ) ; int crcFile = BitsUtil . readInt ( is ) ; if ( crcFile != crc ) { log . fine ( "meta-table crc mismatch" ) ; System . out . println ( "meta-table crc mismatch" ) ; return false ; } RowUpgrade row = new RowUpgrade10 ( keyOffset , keyLength ) . read ( data ) ; TableEntry10 table = new TableEntry10 ( key , rowLength , keyOffset , keyLength , row ) ; _tableList . add ( table ) ; return true ; } | Reads metadata for a table . |
15,997 | private void readSegments ( ) throws IOException { for ( SegmentExtent10 extent : _segmentExtents ) { try ( ReadStream is = openRead ( extent . address ( ) , extent . length ( ) ) ) { is . skip ( extent . length ( ) - BLOCK_SIZE ) ; long sequence = BitsUtil . readLong ( is ) ; byte [ ] tableKey = new byte [ TABLE_KEY_SIZE ] ; is . readAll ( tableKey , 0 , tableKey . length ) ; if ( sequence > 0 ) { Segment10 segment = new Segment10 ( sequence , tableKey , extent ) ; _segments . add ( segment ) ; } } } } | Reads the segment metadata the sequence and table key . |
15,998 | private void upgradeDatabase ( KelpUpgrade upgradeKelp ) throws IOException { Collections . sort ( _tableList , ( x , y ) -> x . row ( ) . name ( ) . compareTo ( y . row ( ) . name ( ) ) ) ; for ( TableEntry10 table : _tableList ) { TableUpgrade upgradeTable = upgradeKelp . table ( table . key ( ) , table . row ( ) ) ; upgradeTable ( table , upgradeTable ) ; } } | Upgrade the store |
15,999 | private void upgradeTable ( TableEntry10 table , TableUpgrade upgradeTable ) throws IOException { _pageMap = new TreeMap < > ( ) ; readTableIndex ( table ) ; for ( Page10 page : _pageMap . values ( ) ) { upgradeLeaf ( table , upgradeTable , page ) ; List < Delta10 > deltas = page . deltas ( ) ; if ( deltas != null ) { for ( Delta10 delta : deltas ) { upgradeDelta ( table , upgradeTable , page , delta ) ; } } } } | Upgrade rows from a table . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.