idx int64 0 165k | question stringlengths 73 4.15k | target stringlengths 5 918 | len_question int64 21 890 | len_target int64 3 255 |
|---|---|---|---|---|
12,000 | public boolean isAuthorizedForBucket ( AuthContext ctx , Bucket bucket ) { if ( ctx . getUsername ( ) . equals ( adminName ) ) { return ctx . getPassword ( ) . equals ( adminPass ) ; } if ( bucket . getName ( ) . equals ( ctx . getUsername ( ) ) ) { return bucket . getPassword ( ) . equals ( ctx . getPassword ( ) ) ; } return bucket . getPassword ( ) . isEmpty ( ) && ctx . getPassword ( ) . isEmpty ( ) ; } | Determine if the given credentials allow access to the bucket | 121 | 12 |
12,001 | public boolean isAdministrator ( AuthContext ctx ) { return ctx . getUsername ( ) != null && ctx . getUsername ( ) . equals ( adminName ) && ctx . getPassword ( ) != null && ctx . getPassword ( ) . equals ( adminPass ) ; } | Check if the given credentials allow administrative access | 64 | 8 |
12,002 | public static BinaryResponse create ( BinaryCommand command , MemcachedServer server , ErrorCode errOk , ErrorCode errNotSupp ) { if ( ! server . isCccpEnabled ( ) ) { return new BinaryResponse ( command , errNotSupp ) ; } String config = server . getBucket ( ) . getJSON ( ) ; config = config . replaceAll ( Pattern . quote ( server . getHostname ( ) ) , Matcher . quoteReplacement ( "$HOST" ) ) ; byte [ ] jsBytes = config . getBytes ( ) ; ByteBuffer buf = create ( command , errOk , Datatype . RAW . value ( ) , 0 , 0 , jsBytes . length , 0 ) ; buf . put ( jsBytes ) ; buf . rewind ( ) ; return new BinaryResponse ( buf ) ; } | Create a new response which contains a cluster configuration if supported | 175 | 11 |
12,003 | public void loadDocuments ( String docsFile ) throws IOException { ZipFile zipFile = new ZipFile ( docsFile ) ; Enumeration < ? extends ZipEntry > entries = zipFile . entries ( ) ; int numDocs = 0 ; int numDesigns = 0 ; while ( entries . hasMoreElements ( ) ) { ZipEntry ent = entries . nextElement ( ) ; String fName = ent . getName ( ) ; InputStream is = zipFile . getInputStream ( ent ) ; String contents = ReaderUtils . fromStream ( is ) ; Matcher mIsDoc = ptnDOCUMENT . matcher ( fName ) ; if ( mIsDoc . matches ( ) ) { String docId = mIsDoc . group ( 1 ) ; handleDocument ( docId , contents ) ; numDocs ++ ; continue ; } Matcher mIsDesign = ptnDESIGN . matcher ( fName ) ; if ( mIsDesign . matches ( ) ) { String designName = mIsDesign . group ( 1 ) ; handleDesign ( designName , contents ) ; numDesigns ++ ; } } System . err . printf ( "Loaded %d documents. %d design documents%n" , numDocs , numDesigns ) ; } | Load documents into the bucket | 268 | 5 |
12,004 | public static void main ( String [ ] args ) throws Exception { String input = args [ 0 ] ; File outputFile = new File ( input . replace ( ".zip" , "" ) + ".serialized.xz" ) ; // Get the base name FileOutputStream fos = new FileOutputStream ( outputFile ) ; LZMA2Options options = new LZMA2Options ( 9 ) ; XZOutputStream xzo = new XZOutputStream ( fos , options ) ; ObjectOutputStream oos = new ObjectOutputStream ( xzo ) ; BundleSerializer ml = new BundleSerializer ( ) ; ml . loadDocuments ( input ) ; oos . writeObject ( ml . toStore ) ; oos . flush ( ) ; oos . close ( ) ; } | Converts a zip file into a serialized compress resource . | 166 | 12 |
12,005 | public String processInput ( String input ) { JsonObject object ; try { object = gs . fromJson ( input , JsonObject . class ) ; } catch ( Throwable t ) { return "{ \"status\" : \"fail\", \"error\" : \"Failed to parse input\" }" ; } String command = object . get ( "command" ) . getAsString ( ) ; JsonObject payload ; if ( ! object . has ( "payload" ) ) { payload = new JsonObject ( ) ; } else { payload = object . get ( "payload" ) . getAsJsonObject ( ) ; } CommandStatus status ; try { status = dispatch ( command , payload ) ; } catch ( Throwable t ) { status = new CommandStatus ( ) ; status . fail ( t ) . setPayload ( payload ) ; } return status . toString ( ) ; } | Process the input sent from the client utilizing the mock server and return the response . | 191 | 16 |
12,006 | public static void main ( String [ ] args ) { try { VBucketInfo vbi [ ] = new VBucketInfo [ 1024 ] ; for ( int ii = 0 ; ii < vbi . length ; ++ ii ) { vbi [ ii ] = new VBucketInfo ( ) ; } MemcachedServer server = new MemcachedServer ( null , null , 11211 , vbi , false ) ; for ( VBucketInfo aVbi : vbi ) { aVbi . setOwner ( server ) ; } server . run ( ) ; } catch ( IOException e ) { Logger . getLogger ( MemcachedServer . class . getName ( ) ) . log ( Level . SEVERE , "Fatal error! failed to create socket: " , e ) ; } } | Program entry point that runs the memcached server as a standalone server just like any other memcached server ... | 173 | 23 |
12,007 | public static DesignDocument create ( String body , String name ) throws DesignParseException { DesignDocument doc = new DesignDocument ( body ) ; doc . id = "_design/" + name ; doc . load ( ) ; return doc ; } | Create a new design document | 49 | 5 |
12,008 | public List < Entry > parse ( String [ ] argv ) { optind = - 1 ; List < Entry > ret = new ArrayList < Entry > ( ) ; int idx = 0 ; while ( idx < argv . length ) { if ( argv [ idx ] . equals ( "--" ) ) { // End of options! ++ idx ; break ; } if ( argv [ idx ] . charAt ( 0 ) != ' ' ) { // End of options break ; } if ( argv [ idx ] . startsWith ( "--" ) ) { idx = parseLongOption ( argv , ret , idx ) ; } else if ( argv [ idx ] . startsWith ( "-" ) ) { idx = parseShortOption ( argv , ret , idx ) ; } else { break ; } ++ idx ; } if ( idx != argv . length ) { optind = idx ; } return ret ; } | Parse the given hasArgument vector | 209 | 8 |
12,009 | public static Bucket create ( CouchbaseMock mock , BucketConfiguration config ) throws IOException { switch ( config . type ) { case MEMCACHED : return new MemcachedBucket ( mock , config ) ; case COUCHBASE : return new CouchbaseBucket ( mock , config ) ; default : throw new FileNotFoundException ( "I don't know about this type..." ) ; } } | Create a bucket . | 86 | 4 |
12,010 | protected Map < String , Object > getCommonConfig ( ) { Map < String , Object > mm = new HashMap < String , Object > ( ) ; mm . put ( "replicaNumber" , numReplicas ) ; Map < String , Object > ramQuota = new HashMap < String , Object > ( ) ; ramQuota . put ( "rawRAM" , 1024 * 1024 * 100 ) ; ramQuota . put ( "ram" , 1024 * 1024 * 100 ) ; mm . put ( "quota" , ramQuota ) ; return mm ; } | Returns configuration information common to both Couchbase and Memcached buckets | 121 | 13 |
12,011 | public void respawn ( int index ) { configurationRwLock . writeLock ( ) . lock ( ) ; try { if ( index >= 0 && index < servers . length ) { servers [ index ] . startup ( ) ; } rebalance ( ) ; } finally { Info . incrementConfigRevision ( ) ; configurationRwLock . writeLock ( ) . unlock ( ) ; } } | Re - Add a previously failed - over node | 81 | 9 |
12,012 | final void rebalance ( ) { // Let's start distribute the vbuckets across the servers configurationRwLock . writeLock ( ) . lock ( ) ; try { Info . incrementConfigRevision ( ) ; List < MemcachedServer > nodes = activeServers ( ) ; for ( int ii = 0 ; ii < numVBuckets ; ++ ii ) { Collections . shuffle ( nodes ) ; vbInfo [ ii ] . setOwner ( nodes . get ( 0 ) ) ; if ( nodes . size ( ) < 2 ) { continue ; } List < MemcachedServer > replicas = nodes . subList ( 1 , nodes . size ( ) ) ; if ( replicas . size ( ) > numReplicas ) { replicas = replicas . subList ( 0 , numReplicas ) ; } vbInfo [ ii ] . setReplicas ( replicas ) ; } } finally { Info . incrementConfigRevision ( ) ; configurationRwLock . writeLock ( ) . unlock ( ) ; } } | Issues a rebalance within the bucket . vBuckets which are mapped to failed - over nodes are relocated with their first replica being promoted to active . | 216 | 33 |
12,013 | public static JsonObject getJsonQuery ( URL url ) throws MalformedURLException { String query = url . getQuery ( ) ; JsonObject payload = new JsonObject ( ) ; JsonParser parser = new JsonParser ( ) ; if ( query == null ) { return null ; } for ( String kv : query . split ( "&" ) ) { String [ ] parts = kv . split ( "=" ) ; if ( parts . length != 2 ) { throw new MalformedURLException ( ) ; } String optName = parts [ 0 ] ; JsonElement optVal ; try { optVal = parser . parse ( URLDecoder . decode ( parts [ 1 ] , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new MalformedURLException ( ) ; } payload . add ( optName , optVal ) ; } return payload ; } | Parses a url - encoded query string and | 199 | 10 |
12,014 | public static Map < String , String > getQueryParams ( String s ) throws MalformedURLException { Map < String , String > params = new HashMap < String , String > ( ) ; for ( String kv : s . split ( "&" ) ) { String [ ] parts = kv . split ( "=" ) ; if ( parts . length != 2 ) { throw new MalformedURLException ( ) ; } try { String k = URLDecoder . decode ( parts [ 0 ] , "UTF-8" ) ; String v = URLDecoder . decode ( parts [ 1 ] , "UTF-8" ) ; params . put ( k , v ) ; } catch ( UnsupportedEncodingException ex ) { throw new MalformedURLException ( ex . getMessage ( ) ) ; } } return params ; } | Get traditional query parameters as a Java map | 181 | 8 |
12,015 | public static void makeStringResponse ( HttpResponse response , String s ) { StringEntity entity = new StringEntity ( s , ContentType . TEXT_PLAIN ) ; entity . setContentEncoding ( "utf-8" ) ; response . setEntity ( entity ) ; } | Sets a string as the response | 58 | 7 |
12,016 | public static void makeResponse ( HttpResponse response , String msg , int status ) { response . setStatusCode ( status ) ; makeStringResponse ( response , msg ) ; } | Sets the response body and status | 37 | 7 |
12,017 | public static void make400Response ( HttpResponse response , String msg ) { makeResponse ( response , msg , HttpStatus . SC_BAD_REQUEST ) ; } | Sets a 404 not found response with a message | 37 | 10 |
12,018 | public static void bailResponse ( HttpContext cx , HttpResponse response ) throws IOException , HttpException { HttpServerConnection conn = getConnection ( cx ) ; conn . sendResponseHeader ( response ) ; conn . sendResponseEntity ( response ) ; conn . flush ( ) ; } | Send and flush the response object over the current connection and close the connection | 61 | 14 |
12,019 | public static AuthContext getAuth ( HttpContext cx , HttpRequest req ) throws IOException { AuthContext auth = ( AuthContext ) cx . getAttribute ( HttpServer . CX_AUTH ) ; if ( auth == null ) { Header authHdr = req . getLastHeader ( HttpHeaders . AUTHORIZATION ) ; if ( authHdr == null ) { auth = new AuthContext ( ) ; } else { auth = new AuthContext ( authHdr . getValue ( ) ) ; } } return auth ; } | Get any authorization credentials supplied over the connection . If no credentials were provided in the request an empty AuthContex is returned | 115 | 24 |
12,020 | public static Reducer create ( String txt ) { Context cx = Context . enter ( ) ; try { return new Reducer ( txt , cx ) ; } finally { Context . exit ( ) ; } } | Create a new Reducer object | 44 | 6 |
12,021 | public void stopServer ( ) { shouldRun = false ; try { listener . close ( ) ; } catch ( IOException ex ) { // Don't care } while ( true ) { synchronized ( allWorkers ) { if ( allWorkers . isEmpty ( ) ) { break ; } for ( Worker w : allWorkers ) { w . stopSocket ( ) ; w . interrupt ( ) ; } } } try { listener . close ( ) ; } catch ( IOException ex ) { ex . printStackTrace ( ) ; } } | Shut down the HTTP server and all its workers and close the listener socket . | 113 | 15 |
12,022 | public static void defineDesignDocument ( CouchbaseMock mock , String designName , String contents , String bucketName ) throws IOException { URL url = getDesignURL ( mock , designName , bucketName ) ; HttpURLConnection conn = ( HttpURLConnection ) url . openConnection ( ) ; setAuthHeaders ( mock , bucketName , conn ) ; conn . setRequestMethod ( "PUT" ) ; conn . setRequestProperty ( "Content-Type" , "application/json" ) ; conn . setDoOutput ( true ) ; conn . setDoInput ( true ) ; OutputStreamWriter osw = new OutputStreamWriter ( conn . getOutputStream ( ) ) ; osw . write ( contents ) ; osw . flush ( ) ; osw . close ( ) ; try { conn . getInputStream ( ) . close ( ) ; } catch ( IOException ex ) { InputStream es = conn . getErrorStream ( ) ; if ( es != null ) { System . err . printf ( "Problem creating view: %s%n" , ReaderUtils . fromStream ( es ) ) ; } else { System . err . printf ( "Error stream is null!\n" ) ; } throw ex ; } } | Utility method to define a view | 261 | 7 |
12,023 | private static void sendHelpText ( HttpResponse response , int code ) throws IOException { HandlerUtil . makeStringResponse ( response , MockHelpCommandHandler . getIndentedHelp ( ) ) ; response . setStatusCode ( code ) ; } | Sends a help text with the provided code | 52 | 9 |
12,024 | public void write ( ByteBuffer bb , VBucketCoordinates coords ) { if ( ! enabled ) { return ; } bb . putLong ( 24 , coords . getUuid ( ) ) ; bb . putLong ( 32 , coords . getSeqno ( ) ) ; } | Write the appropriate mutation information into the output buffers . This method will do nothing if extra mutation information is not enabled . | 66 | 23 |
12,025 | public void startHarakiriMonitor ( InetSocketAddress address , boolean terminate ) throws IOException { if ( terminate ) { harakiriMonitor . setTemrinateAction ( new Callable ( ) { @ Override public Object call ( ) throws Exception { System . exit ( 1 ) ; return null ; } } ) ; } harakiriMonitor . connect ( address . getHostName ( ) , address . getPort ( ) ) ; harakiriMonitor . start ( ) ; } | Tell the harakiri monitor to connect to the given address . | 102 | 13 |
12,026 | private static BucketConfiguration createDefaultConfig ( String hostname , int numNodes , int bucketStartPort , int numVBuckets , int numReplicas ) { BucketConfiguration defaultConfig = new BucketConfiguration ( ) ; defaultConfig . type = BucketType . COUCHBASE ; defaultConfig . hostname = hostname ; defaultConfig . numNodes = numNodes ; if ( numReplicas > - 1 ) { defaultConfig . numReplicas = numReplicas ; } defaultConfig . bucketStartPort = bucketStartPort ; defaultConfig . numVBuckets = numVBuckets ; return defaultConfig ; } | Initializes the default configuration from the command line parameters . This is present in order to allow the super constructor to be the first statement | 130 | 26 |
12,027 | public int getCarrierPort ( String bucketName ) { Bucket bucket = buckets . get ( bucketName ) ; if ( null == bucket ) { // Buckets are created when the mock is started. Calling getCarrierPort() // before the mock has been started makes no sense. throw new RuntimeException ( "Bucket does not exist. Has the mock been started?" ) ; } return bucket . getCarrierPort ( ) ; } | Get the carrier port for a bucket . | 90 | 8 |
12,028 | public void createBucket ( BucketConfiguration config ) throws BucketAlreadyExistsException , IOException { if ( ! config . validate ( ) ) { throw new IllegalArgumentException ( "Invalid bucket configuration" ) ; } synchronized ( buckets ) { if ( buckets . containsKey ( config . name ) ) { throw new BucketAlreadyExistsException ( config . name ) ; } Bucket bucket = Bucket . create ( this , config ) ; BucketAdminServer adminServer = new BucketAdminServer ( bucket , httpServer , this ) ; adminServer . register ( ) ; bucket . setAdminServer ( adminServer ) ; HttpAuthVerifier verifier = new HttpAuthVerifier ( bucket , authenticator ) ; if ( config . type == BucketType . COUCHBASE ) { CAPIServer capi = new CAPIServer ( bucket , verifier ) ; capi . register ( httpServer ) ; bucket . setCAPIServer ( capi ) ; } buckets . put ( config . name , bucket ) ; bucket . start ( ) ; } } | Create a new bucket and start it . | 219 | 8 |
12,029 | public void removeBucket ( String name ) throws FileNotFoundException { Bucket bucket ; synchronized ( buckets ) { if ( ! buckets . containsKey ( name ) ) { throw new FileNotFoundException ( "No such bucket: " + name ) ; } bucket = buckets . remove ( name ) ; } CAPIServer capi = bucket . getCAPIServer ( ) ; if ( capi != null ) { capi . shutdown ( ) ; } BucketAdminServer adminServer = bucket . getAdminServer ( ) ; if ( adminServer != null ) { adminServer . shutdown ( ) ; } bucket . stop ( ) ; } | Destroy a bucket | 131 | 3 |
12,030 | private void start ( String docsFile , String monitorAddress , boolean useBeerSample ) throws IOException { try { if ( port == 0 ) { ServerSocketChannel ch = ServerSocketChannel . open ( ) ; ch . socket ( ) . bind ( new InetSocketAddress ( 0 ) ) ; port = ch . socket ( ) . getLocalPort ( ) ; if ( monitorAddress == null && debug ) { System . out . println ( "port=" + port ) ; } httpServer . bind ( ch ) ; } else { httpServer . bind ( new InetSocketAddress ( port ) ) ; } } catch ( IOException ex ) { Logger . getLogger ( CouchbaseMock . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; System . exit ( - 1 ) ; } for ( BucketConfiguration config : initialConfigs . values ( ) ) { try { createBucket ( config ) ; } catch ( BucketAlreadyExistsException ex ) { throw new IOException ( ex ) ; } } httpServer . start ( ) ; // See if we need to load documents: if ( docsFile != null ) { DocumentLoader loader = new DocumentLoader ( this , "default" ) ; loader . loadDocuments ( docsFile ) ; } else if ( useBeerSample ) { RestAPIUtil . loadBeerSample ( this ) ; } if ( monitorAddress != null ) { startHarakiriMonitor ( monitorAddress , true ) ; } else if ( debug ) { StringBuilder wireshark = new StringBuilder ( "couchbase && (" ) ; System . out . println ( "\nConnection strings:" ) ; for ( Bucket bucket : getBuckets ( ) . values ( ) ) { System . out . println ( "couchbase://127.0.0.1:" + port + "=http/" + bucket . getName ( ) ) ; StringBuilder connstr = new StringBuilder ( "couchbase://" ) ; for ( MemcachedServer server : bucket . getServers ( ) ) { connstr . append ( server . getHostname ( ) ) . append ( ":" ) . append ( server . getPort ( ) ) . append ( "=mcd," ) ; wireshark . append ( "tcp.port == " ) . append ( server . getPort ( ) ) . append ( " || " ) ; } connstr . replace ( connstr . length ( ) - 1 , connstr . length ( ) , "" ) ; connstr . append ( "/" ) . append ( bucket . getName ( ) ) ; System . out . println ( connstr ) ; } wireshark . replace ( wireshark . length ( ) - 4 , wireshark . length ( ) , "" ) ; wireshark . append ( ")" ) ; System . out . println ( "\nWireshark filters:" ) ; System . out . println ( "http && tcp.port == " + port ) ; System . out . println ( wireshark ) ; } startupLatch . countDown ( ) ; } | Used for the command line this ensures that the CountDownLatch object is only set to 0 when all the command line parameters have been initialized ; so that when the monitor finally sends the port over the socket all the items will have already been initialized . | 654 | 50 |
12,031 | public void run ( ) throws Exception { // Send the initial command: client . sendRequest ( cmd ) ; long endTime = System . currentTimeMillis ( ) + spec . getMaxDuration ( ) ; // Wait until the 'after' time Thread . sleep ( spec . getAfter ( ) ) ; int numAttempts = 0 ; long now = System . currentTimeMillis ( ) ; while ( now < endTime ) { client . sendRequest ( cmd ) ; now = System . currentTimeMillis ( ) ; numAttempts ++ ; // See how to retry: long sleepTime = 0 ; if ( spec . isConstant ( ) ) { sleepTime = spec . getInterval ( ) ; } else if ( spec . isLinear ( ) ) { sleepTime = spec . getInterval ( ) * numAttempts ; } else if ( spec . isExponential ( ) ) { sleepTime = ( long ) Math . pow ( spec . getInterval ( ) , numAttempts ) ; } if ( spec . getCeil ( ) > 0 ) { sleepTime = Math . min ( spec . getCeil ( ) , sleepTime ) ; } if ( now + sleepTime > endTime ) { break ; } else { accuSleep ( sleepTime ) ; now = System . currentTimeMillis ( ) ; } } } | Runs until the retry duration is reached | 283 | 9 |
12,032 | public void step ( ) throws IOException { if ( closed ) { throw new ClosedChannelException ( ) ; } if ( input . position ( ) == header . length ) { if ( command == null ) { command = CommandFactory . create ( input ) ; } if ( command . complete ( ) ) { command . process ( ) ; protocolHandler . execute ( command , this ) ; command = null ; input . rewind ( ) ; } } } | Attempt to process a single command from the input buffer . Note this does not actually read from the socket . | 93 | 21 |
12,033 | boolean hasOutput ( ) { if ( pending == null ) { return false ; } if ( pending . isEmpty ( ) ) { return false ; } if ( ! pending . get ( 0 ) . hasRemaining ( ) ) { return false ; } return true ; } | Determines whether this connection has pending responses to be sent | 57 | 12 |
12,034 | public void returnOutputContext ( OutputContext ctx ) { List < ByteBuffer > remaining = ctx . releaseRemaining ( ) ; if ( pending == null ) { pending = remaining ; } else { List < ByteBuffer > tmp = pending ; pending = remaining ; pending . addAll ( tmp ) ; } } | Re - transfer ownership of a given output buffer to the connection | 65 | 12 |
12,035 | void setSupportedFeatures ( boolean [ ] input ) { if ( input . length != supportedFeatures . length ) { throw new IllegalArgumentException ( "Bad features length!" ) ; } // Scan through all other features and disable them unless they are supported for ( int i = 0 ; i < input . length ; i ++ ) { BinaryHelloCommand . Feature feature = BinaryHelloCommand . Feature . valueOf ( i ) ; if ( feature == null ) { supportedFeatures [ i ] = false ; continue ; } switch ( feature ) { case MUTATION_SEQNO : case XERROR : case XATTR : case SELECT_BUCKET : case TRACING : supportedFeatures [ i ] = input [ i ] ; break ; case SNAPPY : supportedFeatures [ i ] = input [ i ] && server . getCompression ( ) != CompressionMode . DISABLED ; break ; default : supportedFeatures [ i ] = false ; break ; } } // Post-processing if ( supportedFeatures [ BinaryHelloCommand . Feature . MUTATION_SEQNO . getValue ( ) ] ) { miw . setEnabled ( true ) ; } else { miw . setEnabled ( false ) ; } } | Sets the supported features from a HELLO command . | 254 | 11 |
12,036 | public ByteBuffer [ ] getIov ( ) { if ( buffers . size ( ) == 1 ) { singleArray [ 0 ] = buffers . get ( 0 ) ; return singleArray ; } return buffers . toArray ( new ByteBuffer [ buffers . size ( ) ] ) ; } | Get an array of buffers representing all the active chunks | 59 | 10 |
12,037 | public OutputContext getSlice ( int limit ) { List < ByteBuffer > newBufs = new LinkedList < ByteBuffer > ( ) ; ByteBuffer buf = ByteBuffer . allocate ( limit ) ; Iterator < ByteBuffer > iter = buffers . iterator ( ) ; while ( iter . hasNext ( ) && buf . position ( ) < buf . limit ( ) ) { ByteBuffer cur = iter . next ( ) ; int diff = buf . limit ( ) - buf . position ( ) ; if ( diff > cur . limit ( ) ) { buf . put ( cur ) ; iter . remove ( ) ; } else { ByteBuffer slice = cur . duplicate ( ) ; slice . limit ( diff ) ; buf . put ( slice ) ; } } return new OutputContext ( newBufs ) ; } | Get an OutputBuffer containing a subset of the current one | 170 | 11 |
12,038 | public void updateBytesSent ( long num ) { Iterator < ByteBuffer > iter = buffers . iterator ( ) ; while ( iter . hasNext ( ) ) { ByteBuffer cur = iter . next ( ) ; if ( cur . hasRemaining ( ) ) { break ; } iter . remove ( ) ; } } | Indicate that some data has been flushed to the network | 66 | 11 |
12,039 | public List < ByteBuffer > releaseRemaining ( ) { List < ByteBuffer > ret = buffers ; buffers = null ; return ret ; } | Truncate the output . This will empty the list of chunks | 29 | 13 |
12,040 | private MutationStatus incrCoords ( KeySpec ks ) { final StorageVBucketCoordinates curCoord ; synchronized ( vbCoords ) { curCoord = vbCoords [ ks . vbId ] ; } long seq = curCoord . incrSeqno ( ) ; long uuid = curCoord . getUuid ( ) ; VBucketCoordinates coord = new BasicVBucketCoordinates ( uuid , seq ) ; return new MutationStatus ( coord ) ; } | Increments the current coordinates for a new mutation . | 113 | 10 |
12,041 | void forceStorageMutation ( Item itm , VBucketCoordinates coords ) { forceMutation ( itm . getKeySpec ( ) . vbId , itm , coords , false ) ; } | Force a storage of an item to the cache . | 47 | 10 |
12,042 | void forceDeleteMutation ( Item itm , VBucketCoordinates coords ) { forceMutation ( itm . getKeySpec ( ) . vbId , itm , coords , true ) ; } | Forces the deletion of an item from the case . | 47 | 11 |
12,043 | public static int convertExpiryTime ( int original ) { if ( original == 0 ) { return original ; } else if ( original > THIRTY_DAYS ) { return original + ( int ) Info . getClockOffset ( ) ; } return ( int ) ( ( new Date ( ) . getTime ( ) / 1000 ) + original + Info . getClockOffset ( ) ) ; } | Converts an expiration value to an absolute Unix timestamp . | 83 | 11 |
12,044 | public static < T > T decode ( String json , Class < T > cls ) { return GSON . fromJson ( json , cls ) ; } | Attempt to decode a JSON string as a Java object | 34 | 10 |
12,045 | @ SuppressWarnings ( "unchecked" ) public static Map < String , Object > decodeAsMap ( String json ) { return decode ( json , HashMap . class ) ; } | Decode a JSON string as Java map . The string must represent a JSON Object | 40 | 16 |
12,046 | public Map < String , Object > rowAt ( int ix ) { return ( Map < String , Object > ) rows . get ( ix ) ; } | Get the raw JSON row at a given index | 33 | 9 |
12,047 | public String executeRaw ( Iterable < Item > items , Configuration config ) throws QueryExecutionException { if ( config == null ) { config = new Configuration ( ) ; } Context cx = Context . enter ( ) ; Scriptable scope = cx . initStandardObjects ( ) ; NativeObject configObject = config . toNativeObject ( ) ; Scriptable redFunc = null ; if ( reducer != null ) { redFunc = reducer . getFunction ( ) ; } try { // long indexStart = System.currentTimeMillis(); indexer . run ( items , cx ) ; // long indexEnd = System.currentTimeMillis(); // System.err.printf("Indexing took %d ms%n", indexEnd-indexStart); Scriptable indexResults = indexer . getLastResults ( ) ; Scriptable resultObject ; try { // long filterStart = System.currentTimeMillis(); resultObject = jsRun . execute ( configObject , indexResults , redFunc , cx ) ; // long filterEnd = System.currentTimeMillis(); // System.err.printf("Filtering took %d ms%n", filterEnd-filterStart); } catch ( JavaScriptException ex ) { Object thrownObject = ex . getValue ( ) ; String jsonException ; try { jsonException = ( String ) NativeJSON . stringify ( cx , scope , thrownObject , null , null ) ; throw new QueryExecutionException ( jsonException ) ; } catch ( EcmaError ex2 ) { throw new QueryExecutionException ( ex2 . getErrorMessage ( ) ) ; } } catch ( EcmaError parseErr ) { throw new QueryExecutionException ( parseErr . getErrorMessage ( ) ) ; } NativeArray rows = ( NativeArray ) resultObject . get ( "rows" , resultObject ) ; resultObject . delete ( "rows" ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "{" ) ; for ( Object id : ( ( NativeObject ) resultObject ) . getAllIds ( ) ) { if ( ! ( id instanceof String ) ) { throw new RuntimeException ( "ARGH: " + id ) ; } sb . append ( ' ' ) . append ( id ) . append ( "\":" ) ; sb . append ( ( String ) NativeJSON . stringify ( cx , scope , resultObject . get ( ( String ) id , resultObject ) , null , null ) ) ; sb . append ( "," ) ; } sb . append ( "\"rows\":[\n" ) ; for ( int i = 0 ; i < rows . size ( ) ; i ++ ) { Object o = rows . get ( i , rows ) ; sb . append ( ( String ) NativeJSON . stringify ( cx , scope , o , null , null ) ) ; if ( i < rows . size ( ) - 1 ) { sb . append ( "," ) ; } sb . append ( "\n" ) ; } sb . append ( "]\n" ) ; sb . append ( "}\n" ) ; return sb . toString ( ) ; } finally { Context . exit ( ) ; } } | Executes the view query with the given parameters . | 680 | 10 |
12,048 | public synchronized ThriftClient getThriftClient ( ) { if ( mode . api != ConnectionAPI . THRIFT_SMART ) return getSimpleThriftClient ( ) ; if ( tclient == null ) tclient = getSmartThriftClient ( ) ; return tclient ; } | Thrift client connection | 59 | 4 |
12,049 | public String [ ] getEndpointInfo ( InetAddress endpoint ) { String [ ] rawEndpointInfo = getRawEndpointInfo ( endpoint ) ; if ( rawEndpointInfo == null ) throw new RuntimeException ( "Unknown host " + endpoint + " with no default configured" ) ; return rawEndpointInfo ; } | Get the raw information about an end point | 68 | 8 |
12,050 | public String getDatacenter ( InetAddress endpoint ) { String [ ] info = getEndpointInfo ( endpoint ) ; assert info != null : "No location defined for endpoint " + endpoint ; return info [ 0 ] ; } | Return the data center for which an endpoint resides in | 48 | 10 |
12,051 | public String getRack ( InetAddress endpoint ) { String [ ] info = getEndpointInfo ( endpoint ) ; assert info != null : "No location defined for endpoint " + endpoint ; return info [ 1 ] ; } | Return the rack for which an endpoint resides in | 47 | 9 |
12,052 | public void setPartitionFilter ( Expression partitionFilter ) throws IOException { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; property . setProperty ( PARTITION_FILTER_SIGNATURE , indexExpressionsToString ( filterToIndexExpressions ( partitionFilter ) ) ) ; } | set partition filter | 83 | 3 |
12,053 | public void putNext ( Tuple t ) throws IOException { /* We support two cases for output: First, the original output: (key, (name, value), (name,value), {(name,value)}) (tuples or bag is optional) For supers, we only accept the original output. */ if ( t . size ( ) < 1 ) { // simply nothing here, we can't even delete without a key logger . warn ( "Empty output skipped, filter empty tuples to suppress this warning" ) ; return ; } ByteBuffer key = objToBB ( t . get ( 0 ) ) ; if ( t . getType ( 1 ) == DataType . TUPLE ) writeColumnsFromTuple ( key , t , 1 ) ; else if ( t . getType ( 1 ) == DataType . BAG ) { if ( t . size ( ) > 2 ) throw new IOException ( "No arguments allowed after bag" ) ; writeColumnsFromBag ( key , ( DataBag ) t . get ( 1 ) ) ; } else throw new IOException ( "Second argument in output must be a tuple or bag" ) ; } | write next row | 246 | 3 |
12,054 | private void writeColumnsFromTuple ( ByteBuffer key , Tuple t , int offset ) throws IOException { ArrayList < Mutation > mutationList = new ArrayList < Mutation > ( ) ; for ( int i = offset ; i < t . size ( ) ; i ++ ) { if ( t . getType ( i ) == DataType . BAG ) writeColumnsFromBag ( key , ( DataBag ) t . get ( i ) ) ; else if ( t . getType ( i ) == DataType . TUPLE ) { Tuple inner = ( Tuple ) t . get ( i ) ; if ( inner . size ( ) > 0 ) // may be empty, for an indexed column that wasn't present mutationList . add ( mutationFromTuple ( inner ) ) ; } else if ( ! usePartitionFilter ) { throw new IOException ( "Output type was not a bag or a tuple" ) ; } } if ( mutationList . size ( ) > 0 ) writeMutations ( key , mutationList ) ; } | write tuple data to cassandra | 224 | 6 |
12,055 | private Mutation mutationFromTuple ( Tuple t ) throws IOException { Mutation mutation = new Mutation ( ) ; if ( t . get ( 1 ) == null ) { if ( allow_deletes ) { mutation . deletion = new Deletion ( ) ; mutation . deletion . predicate = new org . apache . cassandra . thrift . SlicePredicate ( ) ; mutation . deletion . predicate . column_names = Arrays . asList ( objToBB ( t . get ( 0 ) ) ) ; mutation . deletion . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; } else throw new IOException ( "null found but deletes are disabled, set " + PIG_ALLOW_DELETES + "=true in environment or allow_deletes=true in URL to enable" ) ; } else { org . apache . cassandra . thrift . Column column = new org . apache . cassandra . thrift . Column ( ) ; column . setName ( objToBB ( t . get ( 0 ) ) ) ; column . setValue ( objToBB ( t . get ( 1 ) ) ) ; column . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; mutation . column_or_supercolumn = new ColumnOrSuperColumn ( ) ; mutation . column_or_supercolumn . column = column ; } return mutation ; } | compose Cassandra mutation from tuple | 299 | 6 |
12,056 | private void writeColumnsFromBag ( ByteBuffer key , DataBag bag ) throws IOException { List < Mutation > mutationList = new ArrayList < Mutation > ( ) ; for ( Tuple pair : bag ) { Mutation mutation = new Mutation ( ) ; if ( DataType . findType ( pair . get ( 1 ) ) == DataType . BAG ) // supercolumn { SuperColumn sc = new SuperColumn ( ) ; sc . setName ( objToBB ( pair . get ( 0 ) ) ) ; List < org . apache . cassandra . thrift . Column > columns = new ArrayList < org . apache . cassandra . thrift . Column > ( ) ; for ( Tuple subcol : ( DataBag ) pair . get ( 1 ) ) { org . apache . cassandra . thrift . Column column = new org . apache . cassandra . thrift . Column ( ) ; column . setName ( objToBB ( subcol . get ( 0 ) ) ) ; column . setValue ( objToBB ( subcol . get ( 1 ) ) ) ; column . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; columns . add ( column ) ; } if ( columns . isEmpty ( ) ) { if ( allow_deletes ) { mutation . deletion = new Deletion ( ) ; mutation . deletion . super_column = objToBB ( pair . get ( 0 ) ) ; mutation . deletion . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; } else throw new IOException ( "SuperColumn deletion attempted with empty bag, but deletes are disabled, set " + PIG_ALLOW_DELETES + "=true in environment or allow_deletes=true in URL to enable" ) ; } else { sc . columns = columns ; mutation . column_or_supercolumn = new ColumnOrSuperColumn ( ) ; mutation . column_or_supercolumn . super_column = sc ; } } else mutation = mutationFromTuple ( pair ) ; mutationList . add ( mutation ) ; // for wide rows, we need to limit the amount of mutations we write at once if ( mutationList . size ( ) >= 10 ) // arbitrary, CFOF will re-batch this up, and BOF won't care { writeMutations ( key , mutationList ) ; mutationList . clear ( ) ; } } // write the last batch if ( mutationList . size ( ) > 0 ) writeMutations ( key , mutationList ) ; } | write bag data to Cassandra | 541 | 5 |
12,057 | private void writeMutations ( ByteBuffer key , List < Mutation > mutations ) throws IOException { try { writer . write ( key , mutations ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } | write mutation to Cassandra | 51 | 4 |
12,058 | private List < IndexExpression > filterToIndexExpressions ( Expression expression ) throws IOException { List < IndexExpression > indexExpressions = new ArrayList < IndexExpression > ( ) ; Expression . BinaryExpression be = ( Expression . BinaryExpression ) expression ; ByteBuffer name = ByteBuffer . wrap ( be . getLhs ( ) . toString ( ) . getBytes ( ) ) ; ByteBuffer value = ByteBuffer . wrap ( be . getRhs ( ) . toString ( ) . getBytes ( ) ) ; switch ( expression . getOpType ( ) ) { case OP_EQ : indexExpressions . add ( new IndexExpression ( name , IndexOperator . EQ , value ) ) ; break ; case OP_GE : indexExpressions . add ( new IndexExpression ( name , IndexOperator . GTE , value ) ) ; break ; case OP_GT : indexExpressions . add ( new IndexExpression ( name , IndexOperator . GT , value ) ) ; break ; case OP_LE : indexExpressions . add ( new IndexExpression ( name , IndexOperator . LTE , value ) ) ; break ; case OP_LT : indexExpressions . add ( new IndexExpression ( name , IndexOperator . LT , value ) ) ; break ; case OP_AND : indexExpressions . addAll ( filterToIndexExpressions ( be . getLhs ( ) ) ) ; indexExpressions . addAll ( filterToIndexExpressions ( be . getRhs ( ) ) ) ; break ; default : throw new IOException ( "Unsupported expression type: " + expression . getOpType ( ) . name ( ) ) ; } return indexExpressions ; } | get a list of Cassandra IndexExpression from Pig expression | 364 | 11 |
12,059 | private static String indexExpressionsToString ( List < IndexExpression > indexExpressions ) throws IOException { assert indexExpressions != null ; // oh, you thought cfdefToString was awful? IndexClause indexClause = new IndexClause ( ) ; indexClause . setExpressions ( indexExpressions ) ; indexClause . setStart_key ( "" . getBytes ( ) ) ; TSerializer serializer = new TSerializer ( new TBinaryProtocol . Factory ( ) ) ; try { return Hex . bytesToHex ( serializer . serialize ( indexClause ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } } | convert a list of index expression to string | 148 | 9 |
12,060 | private static List < IndexExpression > indexExpressionsFromString ( String ie ) throws IOException { assert ie != null ; TDeserializer deserializer = new TDeserializer ( new TBinaryProtocol . Factory ( ) ) ; IndexClause indexClause = new IndexClause ( ) ; try { deserializer . deserialize ( indexClause , Hex . hexToBytes ( ie ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } return indexClause . getExpressions ( ) ; } | convert string to a list of index expression | 118 | 9 |
12,061 | private List < IndexExpression > getIndexExpressions ( ) throws IOException { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; if ( property . getProperty ( PARTITION_FILTER_SIGNATURE ) != null ) return indexExpressionsFromString ( property . getProperty ( PARTITION_FILTER_SIGNATURE ) ) ; else return null ; } | get a list of index expression | 99 | 6 |
12,062 | protected List < ColumnDef > getColumnMetadata ( Cassandra . Client client ) throws TException , CharacterCodingException , InvalidRequestException , ConfigurationException { return getColumnMeta ( client , true , true ) ; } | get a list of column for the column family | 45 | 9 |
12,063 | private Tuple keyToTuple ( ByteBuffer key , CfDef cfDef , AbstractType comparator ) throws IOException { Tuple tuple = TupleFactory . getInstance ( ) . newTuple ( 1 ) ; addKeyToTuple ( tuple , key , cfDef , comparator ) ; return tuple ; } | convert key to a tuple | 67 | 6 |
12,064 | private void addKeyToTuple ( Tuple tuple , ByteBuffer key , CfDef cfDef , AbstractType comparator ) throws IOException { if ( comparator instanceof AbstractCompositeType ) { setTupleValue ( tuple , 0 , composeComposite ( ( AbstractCompositeType ) comparator , key ) ) ; } else { setTupleValue ( tuple , 0 , cassandraToObj ( getDefaultMarshallers ( cfDef ) . get ( MarshallerType . KEY_VALIDATOR ) , key ) ) ; } } | add key to a tuple | 117 | 5 |
12,065 | public Iterator < RangeTombstone > rangeIterator ( ) { return ranges == null ? Iterators . < RangeTombstone > emptyIterator ( ) : ranges . iterator ( ) ; } | Use sparingly not the most efficient thing | 40 | 8 |
12,066 | public BooleanConditionBuilder must ( ConditionBuilder ... conditionBuilders ) { if ( must == null ) { must = new ArrayList <> ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { must . add ( conditionBuilder . build ( ) ) ; } return this ; } | Returns this builder with the specified mandatory conditions . | 66 | 9 |
12,067 | public BooleanConditionBuilder should ( ConditionBuilder ... conditionBuilders ) { if ( should == null ) { should = new ArrayList <> ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { should . add ( conditionBuilder . build ( ) ) ; } return this ; } | Returns this builder with the specified optional conditions . | 66 | 9 |
12,068 | public BooleanConditionBuilder not ( ConditionBuilder ... conditionBuilders ) { if ( not == null ) { not = new ArrayList <> ( conditionBuilders . length ) ; } for ( ConditionBuilder conditionBuilder : conditionBuilders ) { not . add ( conditionBuilder . build ( ) ) ; } return this ; } | Returns this builder with the specified mandatory not conditions . | 66 | 10 |
12,069 | public Schema load ( KSMetaData keyspaceDef ) { for ( CFMetaData cfm : keyspaceDef . cfMetaData ( ) . values ( ) ) load ( cfm ) ; setKeyspaceDefinition ( keyspaceDef ) ; return this ; } | Load specific keyspace into Schema | 55 | 7 |
12,070 | public void storeKeyspaceInstance ( Keyspace keyspace ) { if ( keyspaceInstances . containsKey ( keyspace . getName ( ) ) ) throw new IllegalArgumentException ( String . format ( "Keyspace %s was already initialized." , keyspace . getName ( ) ) ) ; keyspaceInstances . put ( keyspace . getName ( ) , keyspace ) ; } | Store given Keyspace instance to the schema | 83 | 8 |
12,071 | public CFMetaData getCFMetaData ( String keyspaceName , String cfName ) { assert keyspaceName != null ; KSMetaData ksm = keyspaces . get ( keyspaceName ) ; return ( ksm == null ) ? null : ksm . cfMetaData ( ) . get ( cfName ) ; } | Given a keyspace name & column family name get the column family meta data . If the keyspace name or column family name is not valid this function returns null . | 68 | 33 |
12,072 | public CFMetaData getCFMetaData ( UUID cfId ) { Pair < String , String > cf = getCF ( cfId ) ; return ( cf == null ) ? null : getCFMetaData ( cf . left , cf . right ) ; } | Get ColumnFamily metadata by its identifier | 54 | 7 |
12,073 | public Map < String , CFMetaData > getKeyspaceMetaData ( String keyspaceName ) { assert keyspaceName != null ; KSMetaData ksm = keyspaces . get ( keyspaceName ) ; assert ksm != null ; return ksm . cfMetaData ( ) ; } | Get metadata about keyspace inner ColumnFamilies | 61 | 10 |
12,074 | public void purge ( CFMetaData cfm ) { cfIdMap . remove ( Pair . create ( cfm . ksName , cfm . cfName ) ) ; cfm . markPurged ( ) ; } | Used for ColumnFamily data eviction out from the schema | 46 | 10 |
12,075 | public void updateVersion ( ) { try { MessageDigest versionDigest = MessageDigest . getInstance ( "MD5" ) ; for ( Row row : SystemKeyspace . serializedSchema ( ) ) { if ( invalidSchemaRow ( row ) || ignoredSchemaRow ( row ) ) continue ; // we want to digest only live columns ColumnFamilyStore . removeDeletedColumnsOnly ( row . cf , Integer . MAX_VALUE , SecondaryIndexManager . nullUpdater ) ; row . cf . purgeTombstones ( Integer . MAX_VALUE ) ; row . cf . updateDigest ( versionDigest ) ; } version = UUID . nameUUIDFromBytes ( versionDigest . digest ( ) ) ; SystemKeyspace . updateSchemaVersion ( version ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Read schema from system keyspace and calculate MD5 digest of every row resulting digest will be converted into UUID which would act as content - based version of the schema . | 184 | 34 |
12,076 | public boolean signal ( ) { if ( ! hasWaiters ( ) ) return false ; while ( true ) { RegisteredSignal s = queue . poll ( ) ; if ( s == null || s . signal ( ) != null ) return s != null ; } } | Signal one waiting thread | 55 | 5 |
12,077 | public void signalAll ( ) { if ( ! hasWaiters ( ) ) return ; // to avoid a race where the condition is not met and the woken thread managed to wait on the queue before // we finish signalling it all, we pick a random thread we have woken-up and hold onto it, so that if we encounter // it again we know we're looping. We reselect a random thread periodically, progressively less often. // the "correct" solution to this problem is to use a queue that permits snapshot iteration, but this solution is sufficient int i = 0 , s = 5 ; Thread randomThread = null ; Iterator < RegisteredSignal > iter = queue . iterator ( ) ; while ( iter . hasNext ( ) ) { RegisteredSignal signal = iter . next ( ) ; Thread signalled = signal . signal ( ) ; if ( signalled != null ) { if ( signalled == randomThread ) break ; if ( ++ i == s ) { randomThread = signalled ; s <<= 1 ; } } iter . remove ( ) ; } } | Signal all waiting threads | 223 | 5 |
12,078 | public int getWaiting ( ) { if ( ! hasWaiters ( ) ) return 0 ; Iterator < RegisteredSignal > iter = queue . iterator ( ) ; int count = 0 ; while ( iter . hasNext ( ) ) { Signal next = iter . next ( ) ; if ( ! next . isCancelled ( ) ) count ++ ; } return count ; } | Return how many threads are waiting | 79 | 6 |
12,079 | public boolean releaseIfHolds ( T referenced ) { Ref ref = references . remove ( referenced ) ; if ( ref != null ) ref . release ( ) ; return ref != null ; } | Release the retained Ref to the provided object if held return false otherwise | 39 | 13 |
12,080 | public void release ( Collection < T > release ) { List < Ref < T >> refs = new ArrayList <> ( ) ; List < T > notPresent = null ; for ( T obj : release ) { Ref < T > ref = references . remove ( obj ) ; if ( ref == null ) { if ( notPresent == null ) notPresent = new ArrayList <> ( ) ; notPresent . add ( obj ) ; } else { refs . add ( ref ) ; } } IllegalStateException notPresentFail = null ; if ( notPresent != null ) { notPresentFail = new IllegalStateException ( "Could not release references to " + notPresent + " as references to these objects were not held" ) ; notPresentFail . fillInStackTrace ( ) ; } try { release ( refs ) ; } catch ( Throwable t ) { if ( notPresentFail != null ) t . addSuppressed ( notPresentFail ) ; } if ( notPresentFail != null ) throw notPresentFail ; } | Release a retained Ref to all of the provided objects ; if any is not held an exception will be thrown | 216 | 21 |
12,081 | public boolean tryRef ( T t ) { Ref < T > ref = t . tryRef ( ) ; if ( ref == null ) return false ; ref = references . put ( t , ref ) ; if ( ref != null ) ref . release ( ) ; // release dup return true ; } | Attempt to take a reference to the provided object ; if it has already been released null will be returned | 61 | 20 |
12,082 | public Refs < T > addAll ( Refs < T > add ) { List < Ref < T >> overlap = new ArrayList <> ( ) ; for ( Map . Entry < T , Ref < T > > e : add . references . entrySet ( ) ) { if ( this . references . containsKey ( e . getKey ( ) ) ) overlap . add ( e . getValue ( ) ) ; else this . references . put ( e . getKey ( ) , e . getValue ( ) ) ; } add . references . clear ( ) ; release ( overlap ) ; return this ; } | Merge two sets of references ensuring only one reference is retained between the two sets | 127 | 16 |
12,083 | public static < T extends RefCounted < T > > Refs < T > tryRef ( Iterable < T > reference ) { HashMap < T , Ref < T > > refs = new HashMap <> ( ) ; for ( T rc : reference ) { Ref < T > ref = rc . tryRef ( ) ; if ( ref == null ) { release ( refs . values ( ) ) ; return null ; } refs . put ( rc , ref ) ; } return new Refs < T > ( refs ) ; } | Acquire a reference to all of the provided objects or none | 115 | 12 |
12,084 | public Allocation allocate ( Mutation mutation , int size ) { CommitLogSegment segment = allocatingFrom ( ) ; Allocation alloc ; while ( null == ( alloc = segment . allocate ( mutation , size ) ) ) { // failed to allocate, so move to a new segment with enough room advanceAllocatingFrom ( segment ) ; segment = allocatingFrom ; } return alloc ; } | Reserve space in the current segment for the provided mutation or if there isn t space available create a new segment . | 80 | 23 |
12,085 | CommitLogSegment allocatingFrom ( ) { CommitLogSegment r = allocatingFrom ; if ( r == null ) { advanceAllocatingFrom ( null ) ; r = allocatingFrom ; } return r ; } | simple wrapper to ensure non - null value for allocatingFrom ; only necessary on first call | 47 | 18 |
12,086 | private void advanceAllocatingFrom ( CommitLogSegment old ) { while ( true ) { CommitLogSegment next ; synchronized ( this ) { // do this in a critical section so we can atomically remove from availableSegments and add to allocatingFrom/activeSegments // see https://issues.apache.org/jira/browse/CASSANDRA-6557?focusedCommentId=13874432&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13874432 if ( allocatingFrom != old ) return ; next = availableSegments . poll ( ) ; if ( next != null ) { allocatingFrom = next ; activeSegments . add ( next ) ; } } if ( next != null ) { if ( old != null ) { // Now we can run the user defined command just after switching to the new commit log. // (Do this here instead of in the recycle call so we can get a head start on the archive.) CommitLog . instance . archiver . maybeArchive ( old ) ; // ensure we don't continue to use the old file; not strictly necessary, but cleaner to enforce it old . discardUnusedTail ( ) ; } // request that the CL be synced out-of-band, as we've finished a segment CommitLog . instance . requestExtraSync ( ) ; return ; } // no more segments, so register to receive a signal when not empty WaitQueue . Signal signal = hasAvailableSegments . register ( CommitLog . instance . metrics . waitingOnSegmentAllocation . time ( ) ) ; // trigger the management thread; this must occur after registering // the signal to ensure we are woken by any new segment creation wakeManager ( ) ; // check if the queue has already been added to before waiting on the signal, to catch modifications // that happened prior to registering the signal; *then* check to see if we've been beaten to making the change if ( ! availableSegments . isEmpty ( ) || allocatingFrom != old ) { signal . cancel ( ) ; // if we've been beaten, just stop immediately if ( allocatingFrom != old ) return ; // otherwise try again, as there should be an available segment continue ; } // can only reach here if the queue hasn't been inserted into // before we registered the signal, as we only remove items from the queue // after updating allocatingFrom. Can safely block until we are signalled // by the allocator that new segments have been published signal . awaitUninterruptibly ( ) ; } } | Fetches a new segment from the queue creating a new one if necessary and activates it | 546 | 18 |
12,087 | void forceRecycleAll ( Iterable < UUID > droppedCfs ) { List < CommitLogSegment > segmentsToRecycle = new ArrayList <> ( activeSegments ) ; CommitLogSegment last = segmentsToRecycle . get ( segmentsToRecycle . size ( ) - 1 ) ; advanceAllocatingFrom ( last ) ; // wait for the commit log modifications last . waitForModifications ( ) ; // make sure the writes have materialized inside of the memtables by waiting for all outstanding writes // on the relevant keyspaces to complete Set < Keyspace > keyspaces = new HashSet <> ( ) ; for ( UUID cfId : last . getDirtyCFIDs ( ) ) { ColumnFamilyStore cfs = Schema . instance . getColumnFamilyStoreInstance ( cfId ) ; if ( cfs != null ) keyspaces . add ( cfs . keyspace ) ; } for ( Keyspace keyspace : keyspaces ) keyspace . writeOrder . awaitNewBarrier ( ) ; // flush and wait for all CFs that are dirty in segments up-to and including 'last' Future < ? > future = flushDataFrom ( segmentsToRecycle , true ) ; try { future . get ( ) ; for ( CommitLogSegment segment : activeSegments ) for ( UUID cfId : droppedCfs ) segment . markClean ( cfId , segment . getContext ( ) ) ; // now recycle segments that are unused, as we may not have triggered a discardCompletedSegments() // if the previous active segment was the only one to recycle (since an active segment isn't // necessarily dirty, and we only call dCS after a flush). for ( CommitLogSegment segment : activeSegments ) if ( segment . isUnused ( ) ) recycleSegment ( segment ) ; CommitLogSegment first ; if ( ( first = activeSegments . peek ( ) ) != null && first . id <= last . id ) logger . error ( "Failed to force-recycle all segments; at least one segment is still in use with dirty CFs." ) ; } catch ( Throwable t ) { // for now just log the error and return false, indicating that we failed logger . error ( "Failed waiting for a forced recycle of in-use commit log segments" , t ) ; } } | Switch to a new segment regardless of how much is left in the current one . | 492 | 16 |
12,088 | void recycleSegment ( final CommitLogSegment segment ) { boolean archiveSuccess = CommitLog . instance . archiver . maybeWaitForArchiving ( segment . getName ( ) ) ; activeSegments . remove ( segment ) ; if ( ! archiveSuccess ) { // if archiving (command) was not successful then leave the file alone. don't delete or recycle. discardSegment ( segment , false ) ; return ; } if ( isCapExceeded ( ) ) { discardSegment ( segment , true ) ; return ; } logger . debug ( "Recycling {}" , segment ) ; segmentManagementTasks . add ( new Callable < CommitLogSegment > ( ) { public CommitLogSegment call ( ) { return segment . recycle ( ) ; } } ) ; } | Indicates that a segment is no longer in use and that it should be recycled . | 166 | 17 |
12,089 | void recycleSegment ( final File file ) { if ( isCapExceeded ( ) || CommitLogDescriptor . fromFileName ( file . getName ( ) ) . getMessagingVersion ( ) != MessagingService . current_version ) { // (don't decrease managed size, since this was never a "live" segment) logger . debug ( "(Unopened) segment {} is no longer needed and will be deleted now" , file ) ; FileUtils . deleteWithConfirm ( file ) ; return ; } logger . debug ( "Recycling {}" , file ) ; // this wasn't previously a live segment, so add it to the managed size when we make it live size . addAndGet ( DatabaseDescriptor . getCommitLogSegmentSize ( ) ) ; segmentManagementTasks . add ( new Callable < CommitLogSegment > ( ) { public CommitLogSegment call ( ) { return new CommitLogSegment ( file . getPath ( ) ) ; } } ) ; } | Differs from the above because it can work on any file instead of just existing commit log segments managed by this manager . | 216 | 24 |
12,090 | private void discardSegment ( final CommitLogSegment segment , final boolean deleteFile ) { logger . debug ( "Segment {} is no longer active and will be deleted {}" , segment , deleteFile ? "now" : "by the archive script" ) ; size . addAndGet ( - DatabaseDescriptor . getCommitLogSegmentSize ( ) ) ; segmentManagementTasks . add ( new Callable < CommitLogSegment > ( ) { public CommitLogSegment call ( ) { segment . close ( ) ; if ( deleteFile ) segment . delete ( ) ; return null ; } } ) ; } | Indicates that a segment file should be deleted . | 131 | 10 |
12,091 | public void resetUnsafe ( ) { logger . debug ( "Closing and clearing existing commit log segments..." ) ; while ( ! segmentManagementTasks . isEmpty ( ) ) Thread . yield ( ) ; for ( CommitLogSegment segment : activeSegments ) segment . close ( ) ; activeSegments . clear ( ) ; for ( CommitLogSegment segment : availableSegments ) segment . close ( ) ; availableSegments . clear ( ) ; allocatingFrom = null ; } | Resets all the segments for testing purposes . DO NOT USE THIS OUTSIDE OF TESTS . | 102 | 21 |
12,092 | public static BloomSpecification computeBloomSpec ( int bucketsPerElement ) { assert bucketsPerElement >= 1 ; assert bucketsPerElement <= probs . length - 1 ; return new BloomSpecification ( optKPerBuckets [ bucketsPerElement ] , bucketsPerElement ) ; } | Given the number of buckets that can be used per element return a specification that minimizes the false positive rate . | 58 | 22 |
12,093 | public static int maxBucketsPerElement ( long numElements ) { numElements = Math . max ( 1 , numElements ) ; double v = ( Long . MAX_VALUE - EXCESS ) / ( double ) numElements ; if ( v < 1.0 ) { throw new UnsupportedOperationException ( "Cannot compute probabilities for " + numElements + " elements." ) ; } return Math . min ( BloomCalculations . probs . length - 1 , ( int ) v ) ; } | Calculates the maximum number of buckets per element that this implementation can support . Crucially it will lower the bucket count if necessary to meet BitSet s size restrictions . | 111 | 34 |
12,094 | public Boolean getBoolean ( String key , Boolean defaultValue ) throws SyntaxException { String value = getSimple ( key ) ; return ( value == null ) ? defaultValue : value . toLowerCase ( ) . matches ( "(1|true|yes)" ) ; } | Return a property value typed as a Boolean | 57 | 8 |
12,095 | public Double getDouble ( String key , Double defaultValue ) throws SyntaxException { String value = getSimple ( key ) ; if ( value == null ) { return defaultValue ; } else { try { return Double . valueOf ( value ) ; } catch ( NumberFormatException e ) { throw new SyntaxException ( String . format ( "Invalid double value %s for '%s'" , value , key ) ) ; } } } | Return a property value typed as a Double | 91 | 8 |
12,096 | public Integer getInt ( String key , Integer defaultValue ) throws SyntaxException { String value = getSimple ( key ) ; return toInt ( key , value , defaultValue ) ; } | Return a property value typed as an Integer | 39 | 8 |
12,097 | public static ReplayPosition getReplayPosition ( Iterable < ? extends SSTableReader > sstables ) { if ( Iterables . isEmpty ( sstables ) ) return NONE ; Function < SSTableReader , ReplayPosition > f = new Function < SSTableReader , ReplayPosition > ( ) { public ReplayPosition apply ( SSTableReader sstable ) { return sstable . getReplayPosition ( ) ; } } ; Ordering < ReplayPosition > ordering = Ordering . from ( ReplayPosition . comparator ) ; return ordering . max ( Iterables . transform ( sstables , f ) ) ; } | Convenience method to compute the replay position for a group of SSTables . | 133 | 17 |
12,098 | public void setDiscarding ( ) { state = state . transition ( LifeCycle . DISCARDING ) ; // mark the memory owned by this allocator as reclaiming onHeap . markAllReclaiming ( ) ; offHeap . markAllReclaiming ( ) ; } | Mark this allocator reclaiming ; this will permit any outstanding allocations to temporarily overshoot the maximum memory limit so that flushing can begin immediately | 61 | 28 |
12,099 | private < T extends Number > Gauge < Long > createKeyspaceGauge ( String name , final MetricValue extractor ) { allMetrics . add ( name ) ; return Metrics . newGauge ( factory . createMetricName ( name ) , new Gauge < Long > ( ) { public Long value ( ) { long sum = 0 ; for ( ColumnFamilyStore cf : keyspace . getColumnFamilyStores ( ) ) { sum += extractor . getValue ( cf . metric ) ; } return sum ; } } ) ; } | Creates a gauge that will sum the current value of a metric for all column families in this keyspace | 118 | 21 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.