idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
11,600
public boolean hasCategory ( final String conceptURI ) { return Iterables . any ( getCategories ( ) , new Predicate < TopicAnnotation > ( ) { @ Override public boolean apply ( TopicAnnotation ta ) { return ta . getTopicReference ( ) . equals ( conceptURI ) ; } } ) ; }
Returns true if the contents has been categorized with a category identified by the URI passed by parameter
67
18
11,601
public static SQLiteDatabase create ( CursorFactory factory ) { // This is a magic string with special meaning for SQLite. return openDatabase ( com . couchbase . lite . internal . database . sqlite . SQLiteDatabaseConfiguration . MEMORY_DB_PATH , factory , CREATE_IF_NECESSARY ) ; }
Create a memory backed SQLite database . Its contents will be destroyed when the database is closed .
71
19
11,602
public long replace ( String table , String nullColumnHack , ContentValues initialValues ) { try { return insertWithOnConflict ( table , nullColumnHack , initialValues , CONFLICT_REPLACE ) ; } catch ( SQLException e ) { DLog . e ( TAG , "Error inserting " + initialValues , e ) ; return - 1 ; } }
Convenience method for replacing a row in the database .
79
12
11,603
private static byte [ ] decodeBase64Digest ( String base64Digest ) { String expectedPrefix = "sha1-" ; if ( ! base64Digest . startsWith ( expectedPrefix ) ) { throw new IllegalArgumentException ( base64Digest + " did not start with " + expectedPrefix ) ; } base64Digest = base64Digest . replaceFirst ( expectedPrefix , "" ) ; byte [ ] bytes = new byte [ 0 ] ; try { bytes = Base64 . decode ( base64Digest ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( e ) ; } return bytes ; }
Decode base64 d getDigest into a byte array that is suitable for use as a blob key .
139
22
11,604
protected void fireTrigger ( final ReplicationTrigger trigger ) { Log . d ( Log . TAG_SYNC , "%s [fireTrigger()] => " + trigger , this ) ; // All state machine triggers need to happen on the replicator thread synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { executor . submit ( new Runnable ( ) { @ Override public void run ( ) { try { Log . d ( Log . TAG_SYNC , "firing trigger: %s" , trigger ) ; stateMachine . fire ( trigger ) ; } catch ( Exception e ) { Log . i ( Log . TAG_SYNC , "Error in StateMachine.fire(trigger): %s" , e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } } } ) ; } } }
Fire a trigger to the state machine
180
7
11,605
protected void start ( ) { try { if ( ! db . isOpen ( ) ) { String msg = String . format ( Locale . ENGLISH , "Db: %s is not open, abort replication" , db ) ; parentReplication . setLastError ( new Exception ( msg ) ) ; fireTrigger ( ReplicationTrigger . STOP_IMMEDIATE ) ; return ; } db . addActiveReplication ( parentReplication ) ; this . authenticating = false ; initSessionId ( ) ; // initialize batcher initBatcher ( ) ; // initialize authorizer / authenticator initAuthorizer ( ) ; // initialize request workers initializeRequestWorkers ( ) ; // reset lastSequence // https://github.com/couchbase/couchbase-lite-java-core/issues/1623 this . lastSequence = null ; // single-shot replication if ( ! isContinuous ( ) ) goOnline ( ) ; // continuous mode else { if ( isNetworkReachable ( ) ) goOnline ( ) ; else triggerGoOffline ( ) ; startNetworkReachabilityManager ( ) ; } } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "%s: Exception in start()" , e , this ) ; } }
Start the replication process .
268
5
11,606
protected void close ( ) { this . authenticating = false ; // cancel pending futures for ( Future future : pendingFutures ) { future . cancel ( false ) ; CancellableRunnable runnable = cancellables . get ( future ) ; if ( runnable != null ) { runnable . cancel ( ) ; cancellables . remove ( future ) ; } } // shutdown ScheduledExecutorService. Without shutdown, cause thread leak if ( remoteRequestExecutor != null && ! remoteRequestExecutor . isShutdown ( ) ) { // Note: Time to wait is set 60 sec because RemoteRequest's socket timeout is set 60 seconds. Utils . shutdownAndAwaitTermination ( remoteRequestExecutor , Replication . DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN , Replication . DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN ) ; } // Close and remove all idle connections in the pool, clientFactory . evictAllConnectionsInPool ( ) ; }
Close all resources associated with this replicator .
212
9
11,607
@ InterfaceAudience . Private protected void checkSession ( ) { if ( getAuthenticator ( ) != null ) { Authorizer auth = ( Authorizer ) getAuthenticator ( ) ; auth . setRemoteURL ( remote ) ; auth . setLocalUUID ( db . publicUUID ( ) ) ; } if ( getAuthenticator ( ) != null && getAuthenticator ( ) instanceof SessionCookieAuthorizer ) { // Sync Gateway session API is at /db/_session; try that first checkSessionAtPath ( "_session" ) ; } else { login ( ) ; } }
Before doing anything else determine whether we have an active login session .
122
13
11,608
@ InterfaceAudience . Private private void refreshRemoteCheckpointDoc ( ) { Log . i ( Log . TAG_SYNC , "%s: Refreshing remote checkpoint to get its _rev..." , this ) ; Future future = sendAsyncRequest ( "GET" , "_local/" + remoteCheckpointDocID ( ) , null , new RemoteRequestCompletion ( ) { @ Override public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { if ( db == null ) { Log . w ( Log . TAG_SYNC , "%s: db == null while refreshing remote checkpoint. aborting" , this ) ; return ; } if ( e != null && Utils . getStatusFromError ( e ) != Status . NOT_FOUND ) { Log . e ( Log . TAG_SYNC , "%s: Error refreshing remote checkpoint" , e , this ) ; } else { Log . d ( Log . TAG_SYNC , "%s: Refreshed remote checkpoint: %s" , this , result ) ; remoteCheckpoint = ( Map < String , Object > ) result ; lastSequenceChanged = true ; saveLastSequence ( ) ; // try saving again } } } ) ; pendingFutures . add ( future ) ; }
Variant of - fetchRemoveCheckpointDoc that s used while replication is running to reload the checkpoint to get its current revision number if there was an error saving it .
273
34
11,609
protected void stop ( ) { this . authenticating = false ; // clear batcher batcher . clear ( ) ; // set non-continuous setLifecycle ( Replication . Lifecycle . ONESHOT ) ; // cancel if middle of retry cancelRetryFuture ( ) ; // cancel all pending future tasks. while ( ! pendingFutures . isEmpty ( ) ) { Future future = pendingFutures . poll ( ) ; if ( future != null && ! future . isCancelled ( ) && ! future . isDone ( ) ) { future . cancel ( true ) ; CancellableRunnable runnable = cancellables . get ( future ) ; if ( runnable != null ) { runnable . cancel ( ) ; cancellables . remove ( future ) ; } } } }
Actual work of stopping the replication process .
172
9
11,610
private void notifyChangeListeners ( final Replication . ChangeEvent changeEvent ) { if ( changeListenerNotifyStyle == ChangeListenerNotifyStyle . SYNC ) { for ( ChangeListener changeListener : changeListeners ) { try { changeListener . changed ( changeEvent ) ; } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "Unknown Error in changeListener.changed(changeEvent)" , e ) ; } } } else { // ASYNC synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { executor . submit ( new Runnable ( ) { @ Override public void run ( ) { try { for ( ChangeListener changeListener : changeListeners ) changeListener . changed ( changeEvent ) ; } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "Exception notifying replication listener: %s" , e , this ) ; throw new RuntimeException ( e ) ; } } } ) ; } } } }
Notify all change listeners of a ChangeEvent
214
9
11,611
private void scheduleRetryFuture ( ) { Log . v ( Log . TAG_SYNC , "%s: Failed to xfer; will retry in %d sec" , this , RETRY_DELAY_SECONDS ) ; synchronized ( executor ) { if ( ! executor . isShutdown ( ) ) { this . retryFuture = executor . schedule ( new Runnable ( ) { public void run ( ) { retryIfReady ( ) ; } } , RETRY_DELAY_SECONDS , TimeUnit . SECONDS ) ; } } }
helper function to schedule retry future . no in iOS code .
126
14
11,612
private void cancelRetryFuture ( ) { if ( retryFuture != null && ! retryFuture . isDone ( ) ) { retryFuture . cancel ( true ) ; } retryFuture = null ; }
helper function to cancel retry future . not in iOS code .
45
14
11,613
protected void retryReplicationIfError ( ) { Log . d ( TAG , "retryReplicationIfError() state=" + stateMachine . getState ( ) + ", error=" + this . error + ", isContinuous()=" + isContinuous ( ) + ", isTransientError()=" + Utils . isTransientError ( this . error ) ) ; // Make sure if state is IDLE, this method should be called when state becomes IDLE if ( ! stateMachine . getState ( ) . equals ( ReplicationState . IDLE ) ) return ; if ( this . error != null ) { // IDLE_ERROR if ( isContinuous ( ) ) { // 12/16/2014 - only retry if error is transient error 50x http error // It may need to retry for any kind of errors if ( Utils . isTransientError ( this . error ) ) { onBeforeScheduleRetry ( ) ; cancelRetryFuture ( ) ; scheduleRetryFuture ( ) ; } } } }
Retry replication if previous attempt ends with error
218
9
11,614
void dumpUnsafe ( Printer printer , boolean verbose ) { printer . println ( "Connection #" + mConnectionId + ":" ) ; if ( verbose ) { printer . println ( " connectionPtr: 0x" + Long . toHexString ( mConnectionPtr ) ) ; } printer . println ( " isPrimaryConnection: " + mIsPrimaryConnection ) ; printer . println ( " onlyAllowReadOnlyOperations: " + mOnlyAllowReadOnlyOperations ) ; mRecentOperations . dump ( printer , verbose ) ; }
Dumps debugging information about this connection in the case where the caller might not actually own the connection .
117
20
11,615
void collectDbStatsUnsafe ( ArrayList < com . couchbase . lite . internal . database . sqlite . SQLiteDebug . DbStats > dbStatsList ) { dbStatsList . add ( getMainDbStatsUnsafe ( 0 , 0 , 0 ) ) ; }
Collects statistics about database connection memory usage in the case where the caller might not actually own the connection .
60
21
11,616
protected void execute ( ) { Log . v ( Log . TAG_SYNC , "%s: RemoteRequest execute() called, url: %s" , this , url ) ; executeRequest ( factory . getOkHttpClient ( ) , request ( ) ) ; Log . v ( Log . TAG_SYNC , "%s: RemoteRequest execute() finished, url: %s" , this , url ) ; }
Execute remote request
86
4
11,617
protected RequestBody setCompressedBody ( byte [ ] bodyBytes ) { if ( bodyBytes . length < MIN_JSON_LENGTH_TO_COMPRESS ) return null ; byte [ ] encodedBytes = Utils . compressByGzip ( bodyBytes ) ; if ( encodedBytes == null || encodedBytes . length >= bodyBytes . length ) return null ; return RequestBody . create ( JSON , encodedBytes ) ; }
Generate gzipped body
88
6
11,618
int indexOf ( byte [ ] data , int dataLength , byte [ ] pattern , int dataOffset ) { int [ ] failure = computeFailure ( pattern ) ; int j = 0 ; if ( data . length == 0 ) return - 1 ; //final int dataLength = data.length; final int patternLength = pattern . length ; for ( int i = dataOffset ; i < dataLength ; i ++ ) { while ( j > 0 && pattern [ j ] != data [ i ] ) j = failure [ j - 1 ] ; if ( pattern [ j ] == data [ i ] ) j ++ ; if ( j == patternLength ) return i - patternLength + 1 ; } return - 1 ; }
Finds the first occurrence of the pattern in the text .
148
12
11,619
private static int [ ] computeFailure ( byte [ ] pattern ) { int [ ] failure = new int [ pattern . length ] ; int j = 0 ; for ( int i = 1 ; i < pattern . length ; i ++ ) { while ( j > 0 && pattern [ j ] != pattern [ i ] ) j = failure [ j - 1 ] ; if ( pattern [ j ] == pattern [ i ] ) j ++ ; failure [ i ] = j ; } return failure ; }
Computes the failure function using a boot - strapping process where the pattern is matched against itself .
101
20
11,620
@ InterfaceAudience . Public public List < String > getAllDatabaseNames ( ) { String [ ] databaseFiles = directoryFile . list ( new FilenameFilter ( ) { @ Override public boolean accept ( File dir , String filename ) { if ( filename . endsWith ( Manager . kDBExtension ) ) { return true ; } return false ; } } ) ; List < String > result = new ArrayList < String > ( ) ; for ( String databaseFile : databaseFiles ) { String trimmed = databaseFile . substring ( 0 , databaseFile . length ( ) - Manager . kDBExtension . length ( ) ) ; String replaced = trimmed . replace ( ' ' , ' ' ) ; result . add ( replaced ) ; } Collections . sort ( result ) ; return Collections . unmodifiableList ( result ) ; }
An array of the names of all existing databases .
173
10
11,621
@ InterfaceAudience . Public public void close ( ) { synchronized ( lockDatabases ) { Log . d ( Database . TAG , "Closing " + this ) ; // Close all database: // Snapshot of the current open database to avoid concurrent modification as // the database will be forgotten (removed from the databases map) when it is closed: Database [ ] openDbs = databases . values ( ) . toArray ( new Database [ databases . size ( ) ] ) ; for ( Database database : openDbs ) database . close ( ) ; databases . clear ( ) ; // Stop reachability: context . getNetworkReachabilityManager ( ) . stopListening ( ) ; // Shutdown ScheduledExecutorService: if ( workExecutor != null && ! workExecutor . isShutdown ( ) ) Utils . shutdownAndAwaitTermination ( workExecutor ) ; Log . d ( Database . TAG , "Closed " + this ) ; } }
Releases all resources used by the Manager instance and closes all its databases .
202
15
11,622
@ InterfaceAudience . Public public boolean replaceDatabase ( String databaseName , String databaseDir ) { Database db = getDatabase ( databaseName , false ) ; if ( db == null ) return false ; File dir = new File ( databaseDir ) ; if ( ! dir . exists ( ) ) { Log . w ( Database . TAG , "Database file doesn't exist at path : %s" , databaseDir ) ; return false ; } if ( ! dir . isDirectory ( ) ) { Log . w ( Database . TAG , "Database file is not a directory. " + "Use -replaceDatabaseNamed:withDatabaseFilewithAttachments:error: instead." ) ; return false ; } File destDir = new File ( db . getPath ( ) ) ; File srcDir = new File ( databaseDir ) ; if ( destDir . exists ( ) ) { if ( ! FileDirUtils . deleteRecursive ( destDir ) ) { Log . w ( Database . TAG , "Failed to delete file/directly: " + destDir ) ; return false ; } } try { FileDirUtils . copyFolder ( srcDir , destDir ) ; } catch ( IOException e ) { Log . w ( Database . TAG , "Failed to copy directly from " + srcDir + " to " + destDir , e ) ; return false ; } try { db . open ( ) ; } catch ( CouchbaseLiteException e ) { Log . w ( Database . TAG , "Failed to open database" , e ) ; return false ; } /* TODO: Currently Java implementation is different from iOS, needs to catch up. if(!db.saveLocalUUIDInLocalCheckpointDocument()){ Log.w(Database.TAG, "Failed to replace UUIDs"); return false; } */ if ( ! db . replaceUUIDs ( ) ) { Log . w ( Database . TAG , "Failed to replace UUIDs" ) ; db . close ( ) ; return false ; } // close so app can (re)open db with its preferred options: db . close ( ) ; return true ; }
Replaces or installs a database from a file .
449
10
11,623
@ InterfaceAudience . Private public Future runAsync ( String databaseName , final AsyncTask function ) throws CouchbaseLiteException { final Database database = getDatabase ( databaseName ) ; return runAsync ( new Runnable ( ) { @ Override public void run ( ) { function . run ( database ) ; } } ) ; }
Asynchronously dispatches a callback to run on a background thread . The callback will be passed Database instance . There is not currently a known reason to use it it may not make sense on the Android API but it was added for the purpose of having a consistent API with iOS .
71
56
11,624
@ Override public void startedPart ( Map headers ) { if ( _docReader != null ) throw new IllegalStateException ( "_docReader is already defined" ) ; Log . v ( TAG , "%s: Starting new document; headers =%s" , this , headers ) ; _docReader = new MultipartDocumentReader ( db ) ; _docReader . setHeaders ( headers ) ; _docReader . startedPart ( headers ) ; }
This method is called when a part s headers have been parsed before its data is parsed .
95
18
11,625
@ Override public void finishedPart ( ) { if ( _docReader == null ) throw new IllegalStateException ( "_docReader is not defined" ) ; _docReader . finish ( ) ; _onDocument . onDocument ( _docReader . getDocumentProperties ( ) , _docReader . getDocumentSize ( ) ) ; _docReader = null ; Log . v ( TAG , "%s: Finished document" , this ) ; }
This method is called when a part is complete .
93
10
11,626
@ InterfaceAudience . Public public Document getDocument ( ) { if ( getDocumentId ( ) == null ) { return null ; } assert ( database != null ) ; Document document = database . getDocument ( getDocumentId ( ) ) ; document . loadCurrentRevisionFrom ( this ) ; return document ; }
The document this row was mapped from . This will be nil if a grouping was enabled in the query because then the result rows don t correspond to individual documents .
65
32
11,627
@ InterfaceAudience . Public public String getDocumentId ( ) { // Get the doc id from either the embedded document contents, or the '_id' value key. // Failing that, there's no document linking, so use the regular old _sourceDocID String docID = null ; if ( documentRevision != null ) docID = documentRevision . getDocID ( ) ; if ( docID == null ) { if ( value != null ) { if ( value instanceof Map ) { Map < String , Object > props = ( Map < String , Object > ) value ; docID = ( String ) props . get ( "_id" ) ; } } } if ( docID == null ) docID = sourceDocID ; return docID ; }
The ID of the document described by this view row . This is not necessarily the same as the document that caused this row to be emitted ; see the discussion of the . sourceDocumentID property for details .
160
41
11,628
@ InterfaceAudience . Public public String getDocumentRevisionId ( ) { // Get the revision id from either the embedded document contents, // or the '_rev' or 'rev' value key: String rev = null ; if ( documentRevision != null ) rev = documentRevision . getRevID ( ) ; if ( rev == null ) { if ( value instanceof Map ) { Map < String , Object > mapValue = ( Map < String , Object > ) value ; rev = ( String ) mapValue . get ( "_rev" ) ; if ( rev == null ) { rev = ( String ) mapValue . get ( "rev" ) ; } } } return rev ; }
The revision ID of the document this row was mapped from .
145
12
11,629
public void add ( final AtomicAction action ) { if ( action instanceof Action ) { Action a = ( Action ) action ; peforms . addAll ( a . peforms ) ; backouts . addAll ( a . backouts ) ; cleanUps . addAll ( a . cleanUps ) ; } else { add ( new ActionBlock ( ) { @ Override public void execute ( ) throws ActionException { action . perform ( ) ; } } , new ActionBlock ( ) { @ Override public void execute ( ) throws ActionException { action . backout ( ) ; } } , new ActionBlock ( ) { @ Override public void execute ( ) throws ActionException { action . cleanup ( ) ; } } ) ; } }
Adds an action as a step of thid one .
154
11
11,630
public void add ( ActionBlock perform , ActionBlock backout , ActionBlock cleanup ) { peforms . add ( perform != null ? perform : nullAction ) ; backouts . add ( backout != null ? backout : nullAction ) ; cleanUps . add ( cleanup != null ? cleanup : nullAction ) ; }
Adds an action as a step of this one . The action has three components each optional .
67
18
11,631
public void run ( ) throws ActionException { try { perform ( ) ; try { cleanup ( ) ; // Ignore exception } catch ( ActionException e ) { } lastError = null ; } catch ( ActionException e ) { // (perform: has already backed out whatever it did) lastError = e ; throw e ; } }
Performs all the actions in order . If any action fails backs out the previously performed actions in reverse order . If the actions succeeded cleans them up in reverse order . The lastError property is set to the exception thrown by the failed perform block . The failedStep property is set to the index of the failed perform block .
69
64
11,632
private void doAction ( List < ActionBlock > actions ) throws ActionException { try { actions . get ( nextStep ) . execute ( ) ; } catch ( ActionException e ) { throw e ; } catch ( Exception e ) { throw new ActionException ( "Exception raised by step: " + nextStep , e ) ; } }
Subroutine that calls an action block from either performs backOuts or cleanUps .
69
19
11,633
@ Override public boolean setVersion ( String version ) { // Update the version column in the database. This is a little weird looking because we want // to avoid modifying the database if the version didn't change, and because the row might // not exist yet. SQLiteStorageEngine storage = store . getStorageEngine ( ) ; boolean hasView ; Cursor cursor = null ; try { String sql = "SELECT name, version FROM views WHERE name=?" ; String [ ] args = { name } ; cursor = storage . rawQuery ( sql , args ) ; hasView = cursor . moveToNext ( ) ; } catch ( SQLException e ) { Log . e ( Log . TAG_VIEW , "Error querying existing view name " + name , e ) ; return false ; } finally { if ( cursor != null ) cursor . close ( ) ; } if ( ! hasView ) { // no such record, so insert ContentValues insertValues = new ContentValues ( ) ; insertValues . put ( "name" , name ) ; insertValues . put ( "version" , version ) ; insertValues . put ( "total_docs" , 0 ) ; storage . insert ( "views" , null , insertValues ) ; createIndex ( ) ; return true ; // created new view } ContentValues updateValues = new ContentValues ( ) ; updateValues . put ( "version" , version ) ; updateValues . put ( "lastSequence" , 0 ) ; updateValues . put ( "total_docs" , 0 ) ; String [ ] whereArgs = { name , version } ; int rowsAffected = storage . update ( "views" , updateValues , "name=? AND version!=?" , whereArgs ) ; return ( rowsAffected > 0 ) ; }
Updates the version of the view . A change in version means the delegate s map block has changed its semantics so the _index should be deleted .
374
30
11,634
@ Override public long getLastSequenceIndexed ( ) { String sql = "SELECT lastSequence FROM views WHERE name=?" ; String [ ] args = { name } ; Cursor cursor = null ; long result = - 1 ; try { cursor = store . getStorageEngine ( ) . rawQuery ( sql , args ) ; if ( cursor . moveToNext ( ) ) { result = cursor . getLong ( 0 ) ; } } catch ( Exception e ) { Log . e ( Log . TAG_VIEW , "Error getting last sequence indexed" , e ) ; } finally { if ( cursor != null ) { cursor . close ( ) ; } } return result ; }
The last sequence number that has been indexed .
142
9
11,635
private static boolean groupTogether ( Object key1 , Object key2 , int groupLevel ) { if ( groupLevel == 0 || ! ( key1 instanceof List ) || ! ( key2 instanceof List ) ) { return key1 . equals ( key2 ) ; } @ SuppressWarnings ( "unchecked" ) List < Object > key1List = ( List < Object > ) key1 ; @ SuppressWarnings ( "unchecked" ) List < Object > key2List = ( List < Object > ) key2 ; // if either key list is smaller than groupLevel and the key lists are different // sizes, they cannot be equal. if ( ( key1List . size ( ) < groupLevel || key2List . size ( ) < groupLevel ) && key1List . size ( ) != key2List . size ( ) ) { return false ; } int end = Math . min ( groupLevel , Math . min ( key1List . size ( ) , key2List . size ( ) ) ) ; for ( int i = 0 ; i < end ; ++ i ) { if ( key1List . get ( i ) != null && ! key1List . get ( i ) . equals ( key2List . get ( i ) ) ) return false ; else if ( key1List . get ( i ) == null && key2List . get ( i ) != null ) return false ; } return true ; }
Are key1 and key2 grouped together at this groupLevel?
303
13
11,636
public static Object groupKey ( Object key , int groupLevel ) { if ( groupLevel > 0 && ( key instanceof List ) && ( ( ( List < Object > ) key ) . size ( ) > groupLevel ) ) { return ( ( List < Object > ) key ) . subList ( 0 , groupLevel ) ; } else { return key ; } }
Returns the prefix of the key to use in the result row at this groupLevel
76
16
11,637
@ InterfaceAudience . Public public int getTotalRows ( ) { try { updateIndex ( ) ; } catch ( CouchbaseLiteException e ) { Log . e ( Log . TAG_VIEW , "Update index failed when getting the total rows" , e ) ; } return getCurrentTotalRows ( ) ; }
Get total number of rows in the view . The view s index will be updated if needed before returning the value .
68
23
11,638
@ InterfaceAudience . Public public static double totalValues ( List < Object > values ) { double total = 0 ; for ( Object object : values ) { if ( object instanceof Number ) { Number number = ( Number ) object ; total += number . doubleValue ( ) ; } else { Log . w ( Log . TAG_VIEW , "Warning non-numeric value found in totalValues: %s" , object ) ; } } return total ; }
Utility function to use in reduce blocks . Totals an array of Numbers .
95
16
11,639
@ InterfaceAudience . Private protected Status updateIndexes ( List < View > views ) throws CouchbaseLiteException { List < ViewStore > storages = new ArrayList < ViewStore > ( ) ; for ( View view : views ) { storages . add ( view . viewStore ) ; } return viewStore . updateIndexes ( storages ) ; }
Update multiple view indexes at once .
79
7
11,640
@ InterfaceAudience . Private public List < QueryRow > query ( QueryOptions options ) throws CouchbaseLiteException { if ( options == null ) options = new QueryOptions ( ) ; if ( groupOrReduce ( options ) ) return viewStore . reducedQuery ( options ) ; else return viewStore . regularQuery ( options ) ; }
Queries the view . Does NOT first update the index .
71
12
11,641
public static Printer create ( Printer printer , String prefix ) { if ( prefix == null || prefix . equals ( "" ) ) { return printer ; } return new PrefixPrinter ( printer , prefix ) ; }
Creates a new PrefixPrinter .
45
9
11,642
public void deleteCookie ( Cookie cookie ) { cookies . remove ( cookie . name ( ) ) ; deletePersistedCookie ( cookie . name ( ) ) ; }
Non - standard helper method to delete cookie
35
8
11,643
public static int getDefaultPageSize ( ) { synchronized ( sLock ) { if ( sDefaultPageSize == 0 ) { try { Class clazz = Class . forName ( "android.os.StatFs" ) ; Method m = clazz . getMethod ( "getBlockSize" ) ; Object statFsObj = clazz . getConstructor ( String . class ) . newInstance ( "/data" ) ; Integer value = ( Integer ) m . invoke ( statFsObj , ( Object [ ] ) null ) ; if ( value != null ) return value . intValue ( ) ; } catch ( Exception e ) { } } if ( sDefaultPageSize == 0 ) sDefaultPageSize = 1024 ; return sDefaultPageSize ; } }
Gets the default page size to use when creating a database .
157
13
11,644
private static String byteArrayToHexString ( byte [ ] bytes ) { StringBuilder sb = new StringBuilder ( bytes . length * 2 ) ; for ( byte element : bytes ) { int v = element & 0xff ; if ( v < 16 ) { sb . append ( ' ' ) ; } sb . append ( Integer . toHexString ( v ) ) ; } return sb . toString ( ) ; }
Using some super basic byte array &lt ; - &gt ; hex conversions so we don t have to rely on any large Base64 libraries . Can be overridden if you like!
92
37
11,645
@ InterfaceAudience . Public public SavedRevision createRevision ( Map < String , Object > properties ) throws CouchbaseLiteException { boolean allowConflict = false ; return document . putProperties ( properties , revisionInternal . getRevID ( ) , allowConflict ) ; }
Creates and saves a new revision with the given properties . This will fail with a 412 error if the receiver is not the current revision of the document .
61
31
11,646
@ Override @ InterfaceAudience . Public public Map < String , Object > getProperties ( ) { Map < String , Object > properties = revisionInternal . getProperties ( ) ; if ( ! checkedProperties ) { if ( properties == null ) { if ( loadProperties ( ) == true ) { properties = revisionInternal . getProperties ( ) ; } } checkedProperties = true ; } return properties != null ? Collections . unmodifiableMap ( properties ) : null ; }
The contents of this revision of the document . Any keys in the dictionary that begin with _ such as _id and _rev contain CouchbaseLite metadata .
103
32
11,647
public Object jsonObject ( ) { if ( json == null ) { return null ; } if ( cached == null ) { Object tmp = null ; if ( json [ 0 ] == ' ' ) { tmp = new LazyJsonObject < String , Object > ( json ) ; } else if ( json [ 0 ] == ' ' ) { tmp = new LazyJsonArray < Object > ( json ) ; } else { try { // NOTE: This if-else condition is for Jackson 2.5.0 // json variable is byte[] which is from Cursor.getBlob(). // And json byte array is ended with '\0'. // '\0' causes parsing problem with Jackson 2.5.0 that we upgraded Feb 24, 2015. // We did not observe this problem with Jackson 1.9.2 that we used before. if ( json . length > 0 && json [ json . length - 1 ] == 0 ) { tmp = Manager . getObjectMapper ( ) . readValue ( json , 0 , json . length - 1 , Object . class ) ; } else { tmp = Manager . getObjectMapper ( ) . readValue ( json , Object . class ) ; } } catch ( Exception e ) { //cached will remain null Log . w ( Database . TAG , "Exception parsing json" , e ) ; } } cached = tmp ; } return cached ; }
values are requested
292
3
11,648
public void queueObjects ( List < T > objects ) { if ( objects == null || objects . size ( ) == 0 ) return ; boolean readyToProcess = false ; synchronized ( mutex ) { Log . v ( Log . TAG_BATCHER , "%s: queueObjects called with %d objects (current inbox size = %d)" , this , objects . size ( ) , inbox . size ( ) ) ; inbox . addAll ( objects ) ; mutex . notifyAll ( ) ; if ( isFlushing ) { // Skip scheduling as flushing is processing all the queue objects: return ; } scheduleBatchProcess ( false ) ; if ( inbox . size ( ) >= capacity && isPendingFutureReadyOrInProcessing ( ) ) readyToProcess = true ; } if ( readyToProcess ) { // Give work executor chance to work on a scheduled task and to obtain the // mutex lock when another thread keeps adding objects to the queue fast: synchronized ( processMutex ) { try { processMutex . wait ( 5 ) ; } catch ( InterruptedException e ) { } } } }
Adds multiple objects to the queue .
234
7
11,649
public void flushAll ( boolean waitForAllToFinish ) { Log . v ( Log . TAG_BATCHER , "%s: flushing all objects (wait=%b)" , this , waitForAllToFinish ) ; synchronized ( mutex ) { isFlushing = true ; unschedule ( ) ; } while ( true ) { ScheduledFuture future = null ; synchronized ( mutex ) { if ( inbox . size ( ) == 0 ) break ; // Nothing to do final List < T > toProcess = new ArrayList < T > ( inbox ) ; inbox . clear ( ) ; mutex . notifyAll ( ) ; synchronized ( workExecutor ) { if ( ! workExecutor . isShutdown ( ) ) { future = workExecutor . schedule ( new Runnable ( ) { @ Override public void run ( ) { processor . process ( toProcess ) ; synchronized ( mutex ) { lastProcessedTime = System . currentTimeMillis ( ) ; } } } , 0 , TimeUnit . MILLISECONDS ) ; } } } if ( waitForAllToFinish ) { if ( future != null && ! future . isDone ( ) && ! future . isCancelled ( ) ) { try { future . get ( ) ; } catch ( Exception e ) { Log . e ( Log . TAG_BATCHER , "%s: Error while waiting for pending future " + "when flushing all items" , e , this ) ; } } } } synchronized ( mutex ) { isFlushing = false ; } }
Sends _all_ the queued objects at once to the processor block . After this method returns all inbox objects will be processed .
328
27
11,650
private void scheduleBatchProcess ( boolean immediate ) { synchronized ( mutex ) { if ( inbox . size ( ) == 0 ) return ; // Schedule the processing. To improve latency, if we haven't processed anything // in at least our delay time, rush these object(s) through a minimum delay: long suggestedDelay = 0 ; if ( ! immediate && inbox . size ( ) < capacity ) { // Check with the last processed time: if ( System . currentTimeMillis ( ) - lastProcessedTime < delay ) suggestedDelay = delay ; else { // Note: iOS schedules with 0 delay but the iOS implementation // works on the runloop which still allows the current thread // to continue queuing objects to the batcher until going out of // the runloop. Java cannot do the same so giving a small delay to // allow objects to be added to the batch if available: suggestedDelay = Math . min ( SMALL_DELAY_AFTER_LONG_PAUSE , delay ) ; } } scheduleWithDelay ( suggestedDelay ) ; } }
Schedule batch process based on capacity inbox size and last processed time .
225
14
11,651
private void scheduleWithDelay ( long delay ) { synchronized ( mutex ) { if ( scheduled && delay < scheduledDelay ) { if ( isPendingFutureReadyOrInProcessing ( ) ) { // Ignore as there is one batch currently in processing or ready to be processed: Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay: %d ms, ignored as current batch " + "is ready or in process" , this , delay ) ; return ; } unschedule ( ) ; } if ( ! scheduled ) { scheduled = true ; scheduledDelay = delay ; Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay %d ms, scheduled ..." , this , delay ) ; synchronized ( workExecutor ) { if ( ! workExecutor . isShutdown ( ) ) { pendingFuture = workExecutor . schedule ( new Runnable ( ) { @ Override public void run ( ) { Log . v ( Log . TAG_BATCHER , "%s: call processNow ..." , this ) ; processNow ( ) ; Log . v ( Log . TAG_BATCHER , "%s: call processNow done" , this ) ; } } , scheduledDelay , TimeUnit . MILLISECONDS ) ; } } } else Log . v ( Log . TAG_BATCHER , "%s: scheduleWithDelay %d ms, ignored" , this , delay ) ; } }
Schedule the batch processing with the delay . If there is one batch currently in processing the schedule will be ignored as after the processing is done the next batch will be rescheduled .
311
37
11,652
private void unschedule ( ) { synchronized ( mutex ) { if ( pendingFuture != null && ! pendingFuture . isDone ( ) && ! pendingFuture . isCancelled ( ) ) { Log . v ( Log . TAG_BATCHER , "%s: cancelling the pending future ..." , this ) ; pendingFuture . cancel ( false ) ; } scheduled = false ; } }
Unschedule the scheduled batch processing .
82
8
11,653
private boolean isPendingFutureReadyOrInProcessing ( ) { synchronized ( mutex ) { if ( pendingFuture != null && ! pendingFuture . isDone ( ) && ! pendingFuture . isCancelled ( ) ) { return pendingFuture . getDelay ( TimeUnit . MILLISECONDS ) <= 0 ; } return false ; } }
Check if the current pending future is ready to be processed or in processing .
74
15
11,654
private void processNow ( ) { List < T > toProcess ; boolean scheduleNextBatchImmediately = false ; synchronized ( mutex ) { int count = inbox . size ( ) ; Log . v ( Log . TAG_BATCHER , "%s: processNow() called, inbox size: %d" , this , count ) ; if ( count == 0 ) return ; else if ( count <= capacity ) { toProcess = new ArrayList < T > ( inbox ) ; inbox . clear ( ) ; } else { toProcess = new ArrayList < T > ( inbox . subList ( 0 , capacity ) ) ; for ( int i = 0 ; i < capacity ; i ++ ) inbox . remove ( 0 ) ; scheduleNextBatchImmediately = true ; } mutex . notifyAll ( ) ; } synchronized ( processMutex ) { if ( toProcess != null && toProcess . size ( ) > 0 ) { Log . v ( Log . TAG_BATCHER , "%s: invoking processor %s with %d items" , this , processor , toProcess . size ( ) ) ; processor . process ( toProcess ) ; } else Log . v ( Log . TAG_BATCHER , "%s: nothing to process" , this ) ; synchronized ( mutex ) { lastProcessedTime = System . currentTimeMillis ( ) ; scheduled = false ; scheduleBatchProcess ( scheduleNextBatchImmediately ) ; Log . v ( Log . TAG_BATCHER , "%s: invoking processor done" , this , processor , toProcess . size ( ) ) ; } processMutex . notifyAll ( ) ; } }
This method is called by the work executor to do the batch process . The inbox items up to the batcher capacity will be taken out to process . The next batch will be rescheduled if there are still some items left in the inbox .
345
50
11,655
private String getRequestHeaderContentType ( ) { String contentType = getRequestHeaderValue ( "Content-Type" ) ; if ( contentType != null ) { // remove parameter (Content-Type := type "/" subtype *[";" parameter] ) int index = contentType . indexOf ( ' ' ) ; if ( index > 0 ) contentType = contentType . substring ( 0 , index ) ; contentType = contentType . trim ( ) ; } return contentType ; }
get Content - Type from URLConnection
103
7
11,656
private void setResponseLocation ( URL url ) { String location = url . getPath ( ) ; String query = url . getQuery ( ) ; if ( query != null ) { int startOfQuery = location . indexOf ( query ) ; if ( startOfQuery > 0 ) { location = location . substring ( 0 , startOfQuery ) ; } } connection . getResHeader ( ) . add ( "Location" , location ) ; }
Router + Handlers
93
5
11,657
private static void convertCBLQueryRowsToMaps ( Map < String , Object > allDocsResult ) { List < Map < String , Object > > rowsAsMaps = new ArrayList < Map < String , Object > > ( ) ; List < QueryRow > rows = ( List < QueryRow > ) allDocsResult . get ( "rows" ) ; if ( rows != null ) { for ( QueryRow row : rows ) { rowsAsMaps . add ( row . asJSONDictionary ( ) ) ; } } allDocsResult . put ( "rows" , rowsAsMaps ) ; }
This is a hack to deal with the fact that there is currently no custom serializer for QueryRow . Instead just convert everything to generic Maps .
128
29
11,658
@ Override public void changed ( Database . ChangeEvent event ) { synchronized ( changesLock ) { if ( isTimeout ) return ; lastChangesTimestamp = System . currentTimeMillis ( ) ; // Stop timeout timer: stopTimeout ( ) ; // In race condition, new doc or update doc is fired before starting to observe the // DatabaseChangeEvent, it allows to skip few document changes with /_changes REST API. // Make sure all document changes are tread by /_changes REST API. if ( ! filled ) { filled = true ; RevisionList changes = db . changesSince ( changesSince , changesOptions , changesFilter , changesFilterParams ) ; if ( changes . size ( ) > 0 ) { sendLongpollChanges ( changes , changesSince ) ; return ; } } List < RevisionInternal > revs = new ArrayList < RevisionInternal > ( ) ; List < DocumentChange > changes = event . getChanges ( ) ; for ( DocumentChange change : changes ) { RevisionInternal rev = change . getAddedRevision ( ) ; if ( rev == null ) continue ; String winningRevID = change . getWinningRevisionID ( ) ; if ( ! this . changesIncludesConflicts ) { if ( winningRevID == null ) continue ; // // this change doesn't affect the winning rev ID, no need to send it else if ( ! winningRevID . equals ( rev . getRevID ( ) ) ) { // This rev made a _different_ rev current, so substitute that one. // We need to emit the current sequence # in the feed, so put it in the rev. // This isn't correct internally (this is an old rev so it has an older sequence) // but consumers of the _changes feed don't care about the internal state. RevisionInternal mRev = db . getDocument ( rev . getDocID ( ) , winningRevID , changesIncludesDocs ) ; mRev . setSequence ( rev . getSequence ( ) ) ; rev = mRev ; } } if ( ! event . getSource ( ) . runFilter ( changesFilter , changesFilterParams , rev ) ) continue ; if ( longpoll ) { revs . add ( rev ) ; } else { Log . d ( TAG , "Router: Sending continuous change chunk" ) ; sendContinuousChange ( rev ) ; } timeoutLastSeqence = rev . getSequence ( ) ; } if ( longpoll && revs . size ( ) > 0 ) sendLongpollChanges ( revs , changesSince ) ; else // Restart timeout timer for continuous feed request: startTimeout ( ) ; } }
Implementation of ChangeListener
548
5
11,659
public void cancel ( ) { final OnCancelListener listener ; synchronized ( this ) { if ( mIsCanceled ) { return ; } mIsCanceled = true ; mCancelInProgress = true ; listener = mOnCancelListener ; } try { if ( listener != null ) { listener . onCancel ( ) ; } } finally { synchronized ( this ) { mCancelInProgress = false ; notifyAll ( ) ; } } }
Cancels the operation and signals the cancellation listener . If the operation has not yet started then it will be canceled as soon as it does .
97
29
11,660
public void setOnCancelListener ( OnCancelListener listener ) { synchronized ( this ) { waitForCancelFinishedLocked ( ) ; if ( mOnCancelListener == listener ) { return ; } mOnCancelListener = listener ; if ( ! mIsCanceled || listener == null ) { return ; } } listener . onCancel ( ) ; }
Sets the cancellation listener to be called when canceled .
80
11
11,661
@ InterfaceAudience . Public public void waitForRows ( ) throws CouchbaseLiteException { start ( ) ; while ( true ) { try { queryFuture . get ( ) ; break ; } catch ( InterruptedException e ) { continue ; } catch ( Exception e ) { lastError = e ; throw new CouchbaseLiteException ( e , Status . INTERNAL_SERVER_ERROR ) ; } } }
Blocks until the intial async query finishes . After this call either . rows or . error will be non - nil .
89
24
11,662
@ InterfaceAudience . Public public QueryEnumerator getRows ( ) { start ( ) ; if ( rows == null ) { return null ; } else { // Have to return a copy because the enumeration has to start at item #0 every time return new QueryEnumerator ( rows ) ; } }
Gets the results of the Query . The value will be null until the initial Query completes .
65
19
11,663
@ InterfaceAudience . Public public SavedRevision getCurrentRevision ( ) { if ( currentRevision == null ) currentRevision = getRevision ( null ) ; return currentRevision ; }
Get the current revision
43
4
11,664
@ InterfaceAudience . Public public Map < String , Object > getProperties ( ) { return getCurrentRevision ( ) == null ? null : getCurrentRevision ( ) . getProperties ( ) ; }
The contents of the current revision of the document . This is shorthand for self . currentRevision . properties . Any keys in the dictionary that begin with _ such as _id and _rev contain CouchbaseLite metadata .
45
45
11,665
@ InterfaceAudience . Public public boolean delete ( ) throws CouchbaseLiteException { return getCurrentRevision ( ) == null ? false : getCurrentRevision ( ) . deleteDocument ( ) != null ; }
Deletes this document by adding a deletion revision . This will be replicated to other databases .
45
18
11,666
@ InterfaceAudience . Public public void purge ( ) throws CouchbaseLiteException { Map < String , List < String > > docsToRevs = new HashMap < String , List < String > > ( ) ; List < String > revs = new ArrayList < String > ( ) ; revs . add ( "*" ) ; docsToRevs . put ( documentId , revs ) ; database . purgeRevisions ( docsToRevs ) ; database . removeDocumentFromCache ( this ) ; }
Purges this document from the database ; this is more than deletion it forgets entirely about it . The purge will NOT be replicated to other databases .
109
30
11,667
@ InterfaceAudience . Public public SavedRevision getRevision ( String revID ) { if ( revID != null && currentRevision != null && revID . equals ( currentRevision . getId ( ) ) ) return currentRevision ; RevisionInternal revisionInternal = database . getDocument ( getId ( ) , revID , true ) ; return getRevisionFromRev ( revisionInternal ) ; }
The revision with the specified ID .
86
7
11,668
@ InterfaceAudience . Public public long getLength ( ) { Number length = ( Number ) metadata . get ( "length" ) ; if ( length != null ) { return length . longValue ( ) ; } else { return 0 ; } }
Get the length in bytes of the contents .
51
9
11,669
@ InterfaceAudience . Private protected static Map < String , Object > installAttachmentBodies ( Map < String , Object > attachments , Database database ) throws CouchbaseLiteException { Map < String , Object > updatedAttachments = new HashMap < String , Object > ( ) ; for ( String name : attachments . keySet ( ) ) { Object value = attachments . get ( name ) ; if ( value instanceof Attachment ) { Attachment attachment = ( Attachment ) value ; Map < String , Object > metadataMutable = new HashMap < String , Object > ( ) ; metadataMutable . putAll ( attachment . getMetadata ( ) ) ; InputStream body = attachment . getBodyIfNew ( ) ; if ( body != null ) { // Copy attachment body into the database's blob store: BlobStoreWriter writer ; try { writer = blobStoreWriterForBody ( body , database ) ; } catch ( Exception e ) { throw new CouchbaseLiteException ( e . getMessage ( ) , Status . ATTACHMENT_ERROR ) ; } metadataMutable . put ( "length" , writer . getLength ( ) ) ; metadataMutable . put ( "digest" , writer . mD5DigestString ( ) ) ; metadataMutable . put ( "follows" , true ) ; database . rememberAttachmentWriter ( writer ) ; } updatedAttachments . put ( name , metadataMutable ) ; } else if ( value instanceof AttachmentInternal ) { throw new IllegalArgumentException ( "AttachmentInternal objects not expected here. Could indicate a bug" ) ; } else if ( value != null ) { updatedAttachments . put ( name , value ) ; } } return updatedAttachments ; }
Goes through an _attachments dictionary and replaces any values that are Attachment objects with proper JSON metadata dicts . It registers the attachment bodies with the blob store and sets the metadata getDigest and follows properties accordingly .
364
45
11,670
private void initSupportExecutor ( ) { if ( supportExecutor == null || supportExecutor . isShutdown ( ) ) { supportExecutor = Executors . newSingleThreadExecutor ( new ThreadFactory ( ) { @ Override public Thread newThread ( Runnable r ) { String maskedRemote = URLUtils . sanitizeURL ( remote ) ; return new Thread ( r , "CBLPusherSupportExecutor-" + maskedRemote ) ; } } ) ; } }
create single thread supportExecutor for push replication
104
9
11,671
@ InterfaceAudience . Public public List < String > getAttachmentNames ( ) { Map < String , Object > attachmentMetadata = getAttachmentMetadata ( ) ; if ( attachmentMetadata == null ) { return new ArrayList < String > ( ) ; } return new ArrayList < String > ( attachmentMetadata . keySet ( ) ) ; }
The names of all attachments
75
5
11,672
@ InterfaceAudience . Public public List < Attachment > getAttachments ( ) { Map < String , Object > attachmentMetadata = getAttachmentMetadata ( ) ; if ( attachmentMetadata == null ) { return new ArrayList < Attachment > ( ) ; } List < Attachment > result = new ArrayList < Attachment > ( attachmentMetadata . size ( ) ) ; for ( Map . Entry < String , Object > entry : attachmentMetadata . entrySet ( ) ) { Attachment attachment = toAttachment ( entry . getKey ( ) , entry . getValue ( ) ) ; if ( attachment != null ) { result . add ( attachment ) ; } } return result ; }
All attachments as Attachment objects .
146
7
11,673
@ InterfaceAudience . Public public void setUserProperties ( Map < String , Object > userProperties ) { Map < String , Object > newProps = new HashMap < String , Object > ( ) ; newProps . putAll ( userProperties ) ; for ( String key : properties . keySet ( ) ) { if ( key . startsWith ( "_" ) ) { newProps . put ( key , properties . get ( key ) ) ; // Preserve metadata properties } } properties = newProps ; }
Sets the userProperties of the Revision . Set replaces all properties except for those with keys prefixed with _ .
112
24
11,674
@ InterfaceAudience . Private protected void addAttachment ( Attachment attachment , String name ) { Map < String , Object > attachments = ( Map < String , Object > ) properties . get ( "_attachments" ) ; if ( attachments == null ) { attachments = new HashMap < String , Object > ( ) ; } attachments . put ( name , attachment ) ; properties . put ( "_attachments" , attachments ) ; if ( attachment != null ) { attachment . setName ( name ) ; } }
Creates or updates an attachment . The attachment data will be written to the database when the revision is saved .
105
22
11,675
protected void beginReplicating ( ) { Log . v ( TAG , "submit startReplicating()" ) ; executor . submit ( new Runnable ( ) { @ Override public void run ( ) { if ( isRunning ( ) ) { Log . v ( TAG , "start startReplicating()" ) ; initPendingSequences ( ) ; initDownloadsToInsert ( ) ; startChangeTracker ( ) ; } // start replicator ... } } ) ; }
Actual work of starting the replication process .
100
9
11,676
@ Override @ InterfaceAudience . Private protected void processInbox ( RevisionList inbox ) { Log . d ( TAG , "processInbox called" ) ; if ( db == null || ! db . isOpen ( ) ) { Log . w ( Log . TAG_SYNC , "%s: Database is null or closed. Unable to continue. db name is %s." , this , db . getName ( ) ) ; return ; } if ( canBulkGet == null ) { canBulkGet = serverIsSyncGatewayVersion ( "0.81" ) ; } // Ask the local database which of the revs are not known to it: String lastInboxSequence = ( ( PulledRevision ) inbox . get ( inbox . size ( ) - 1 ) ) . getRemoteSequenceID ( ) ; int numRevisionsRemoved = 0 ; try { // findMissingRevisions is the local equivalent of _revs_diff. it looks at the // array of revisions in "inbox" and removes the ones that already exist. // So whatever's left in 'inbox' // afterwards are the revisions that need to be downloaded. numRevisionsRemoved = db . findMissingRevisions ( inbox ) ; } catch ( SQLException e ) { Log . e ( TAG , String . format ( Locale . ENGLISH , "%s failed to look up local revs" , this ) , e ) ; inbox = null ; } //introducing this to java version since inbox may now be null everywhere int inboxCount = 0 ; if ( inbox != null ) { inboxCount = inbox . size ( ) ; } if ( numRevisionsRemoved > 0 ) { Log . v ( TAG , "%s: processInbox() setting changesCount to: %s" , this , getChangesCount ( ) . get ( ) - numRevisionsRemoved ) ; // May decrease the changesCount, to account for the revisions we just found out we don't need to get. addToChangesCount ( - 1 * numRevisionsRemoved ) ; } if ( inboxCount == 0 ) { // Nothing to do. Just bump the lastSequence. Log . d ( TAG , "%s no new remote revisions to fetch. add lastInboxSequence (%s) to pendingSequences (%s)" , this , lastInboxSequence , pendingSequences ) ; long seq = pendingSequences . addValue ( lastInboxSequence ) ; pendingSequences . removeSequence ( seq ) ; setLastSequence ( pendingSequences . getCheckpointedValue ( ) ) ; pauseOrResume ( ) ; return ; } Log . v ( TAG , "%s: fetching %s remote revisions..." , this , inboxCount ) ; // Dump the revs into the queue of revs to pull from the remote db: for ( int i = 0 ; i < inbox . size ( ) ; i ++ ) { PulledRevision rev = ( PulledRevision ) inbox . get ( i ) ; if ( canBulkGet || ( rev . getGeneration ( ) == 1 && ! rev . isDeleted ( ) && ! rev . isConflicted ( ) ) ) { bulkRevsToPull . add ( rev ) ; } else { queueRemoteRevision ( rev ) ; } rev . setSequence ( pendingSequences . addValue ( rev . getRemoteSequenceID ( ) ) ) ; } pullRemoteRevisions ( ) ; pauseOrResume ( ) ; }
Process a bunch of remote revisions from the _changes feed at once
735
13
11,677
protected void pullBulkRevisions ( List < RevisionInternal > bulkRevs ) { int nRevs = bulkRevs . size ( ) ; if ( nRevs == 0 ) { return ; } Log . d ( TAG , "%s bulk-fetching %d remote revisions..." , this , nRevs ) ; Log . d ( TAG , "%s bulk-fetching remote revisions: %s" , this , bulkRevs ) ; if ( ! canBulkGet ) { pullBulkWithAllDocs ( bulkRevs ) ; return ; } Log . v ( TAG , "%s: POST _bulk_get" , this ) ; final List < RevisionInternal > remainingRevs = new ArrayList < RevisionInternal > ( bulkRevs ) ; ++ httpConnectionCount ; final RemoteBulkDownloaderRequest downloader ; try { downloader = new RemoteBulkDownloaderRequest ( clientFactory , remote , true , bulkRevs , db , this . requestHeaders , new RemoteBulkDownloaderRequest . BulkDownloaderDocument ( ) { public void onDocument ( Map < String , Object > props , long size ) { // Got a revision! // Find the matching revision in 'remainingRevs' and get its sequence: RevisionInternal rev ; if ( props . get ( "_id" ) != null ) { rev = new RevisionInternal ( props , size ) ; } else { rev = new RevisionInternal ( ( String ) props . get ( "id" ) , ( String ) props . get ( "rev" ) , false ) ; } int pos = remainingRevs . indexOf ( rev ) ; if ( pos > - 1 ) { rev . setSequence ( remainingRevs . get ( pos ) . getSequence ( ) ) ; remainingRevs . remove ( pos ) ; } else { Log . w ( TAG , "%s : Received unexpected rev rev" , this ) ; } if ( props . get ( "_id" ) != null ) { // Add to batcher ... eventually it will be fed to -insertRevisions:. queueDownloadedRevision ( rev ) ; } else { Status status = statusFromBulkDocsResponseItem ( props ) ; Throwable err = new CouchbaseLiteException ( status ) ; revisionFailed ( rev , err ) ; } } } , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { // The entire _bulk_get is finished: if ( e != null ) { setError ( e ) ; completedChangesCount . addAndGet ( remainingRevs . size ( ) ) ; } -- httpConnectionCount ; // Start another task if there are still revisions waiting to be pulled: pullRemoteRevisions ( ) ; if ( cancellables != null && cancellables . values ( ) != null && remoteRequest != null ) cancellables . values ( ) . remove ( remoteRequest ) ; } } ) ; } catch ( Exception e ) { Log . e ( TAG , "%s: pullBulkRevisions Exception: %s" , this , e ) ; return ; } downloader . setAuthenticator ( getAuthenticator ( ) ) ; // set compressed request - gzip downloader . setCompressedRequest ( canSendCompressedRequests ( ) ) ; synchronized ( remoteRequestExecutor ) { if ( ! remoteRequestExecutor . isShutdown ( ) ) { Future future = remoteRequestExecutor . submit ( downloader ) ; pendingFutures . add ( future ) ; cancellables . put ( future , downloader ) ; } } }
Get a bunch of revisions in one bulk request . Will use _bulk_get if possible .
763
20
11,678
private void queueDownloadedRevision ( RevisionInternal rev ) { if ( revisionBodyTransformationBlock != null ) { // Add 'file' properties to attachments pointing to their bodies: for ( Map . Entry < String , Map < String , Object > > entry : ( ( Map < String , Map < String , Object > > ) rev . getProperties ( ) . get ( "_attachments" ) ) . entrySet ( ) ) { String name = entry . getKey ( ) ; Map < String , Object > attachment = entry . getValue ( ) ; attachment . remove ( "file" ) ; if ( attachment . get ( "follows" ) != null && attachment . get ( "data" ) == null ) { String filePath = db . fileForAttachmentDict ( attachment ) . getPath ( ) ; if ( filePath != null ) attachment . put ( "file" , filePath ) ; } } RevisionInternal xformed = transformRevision ( rev ) ; if ( xformed == null ) { Log . v ( TAG , "%s: Transformer rejected revision %s" , this , rev ) ; pendingSequences . removeSequence ( rev . getSequence ( ) ) ; lastSequence = pendingSequences . getCheckpointedValue ( ) ; pauseOrResume ( ) ; return ; } rev = xformed ; // Clean up afterwards Map < String , Map < String , Object > > attachments = ( Map < String , Map < String , Object > > ) rev . getProperties ( ) . get ( "_attachments" ) ; for ( Map . Entry < String , Map < String , Object > > entry : attachments . entrySet ( ) ) { Map < String , Object > attachment = entry . getValue ( ) ; attachment . remove ( "file" ) ; } } // NOTE: should not/not necessary to call Body.compact() // new RevisionInternal(Map<string, Object>) creates Body instance only // with `object`. Serializing object to json causes two unnecessary // JSON serializations. if ( rev . getBody ( ) != null ) queuedMemorySize . addAndGet ( rev . getBody ( ) . getSize ( ) ) ; downloadsToInsert . queueObject ( rev ) ; // if queue memory size is more than maximum, force flush the queue. if ( queuedMemorySize . get ( ) > MAX_QUEUE_MEMORY_SIZE ) { Log . d ( TAG , "Flushing queued memory size at: " + queuedMemorySize ) ; downloadsToInsert . flushAllAndWait ( ) ; } }
This invokes the tranformation block if one is installed and queues the resulting CBL_Revision
546
21
11,679
protected void pullBulkWithAllDocs ( final List < RevisionInternal > bulkRevs ) { // http://wiki.apache.org/couchdb/HTTP_Bulk_Document_API ++ httpConnectionCount ; final RevisionList remainingRevs = new RevisionList ( bulkRevs ) ; Collection < String > keys = CollectionUtils . transform ( bulkRevs , new CollectionUtils . Functor < RevisionInternal , String > ( ) { public String invoke ( RevisionInternal rev ) { return rev . getDocID ( ) ; } } ) ; Map < String , Object > body = new HashMap < String , Object > ( ) ; body . put ( "keys" , keys ) ; Future future = sendAsyncRequest ( "POST" , "_all_docs?include_docs=true" , body , new RemoteRequestCompletion ( ) { public void onCompletion ( RemoteRequest remoteRequest , Response httpResponse , Object result , Throwable e ) { Map < String , Object > res = ( Map < String , Object > ) result ; if ( e != null ) { setError ( e ) ; // TODO: There is a known bug caused by the line below, which is // TODO: causing testMockSinglePullCouchDb to fail when running on a Nexus5 device. // TODO: (the batching behavior is different in that case) // TODO: See https://github.com/couchbase/couchbase-lite-java-core/issues/271 // completedChangesCount.addAndGet(bulkRevs.size()); } else { // Process the resulting rows' documents. // We only add a document if it doesn't have attachments, and if its // revID matches the one we asked for. List < Map < String , Object > > rows = ( List < Map < String , Object > > ) res . get ( "rows" ) ; Log . v ( TAG , "%s checking %d bulk-fetched remote revisions" , this , rows . size ( ) ) ; for ( Map < String , Object > row : rows ) { Map < String , Object > doc = ( Map < String , Object > ) row . get ( "doc" ) ; if ( doc != null && doc . get ( "_attachments" ) == null ) { RevisionInternal rev = new RevisionInternal ( doc ) ; RevisionInternal removedRev = remainingRevs . removeAndReturnRev ( rev ) ; if ( removedRev != null ) { rev . setSequence ( removedRev . getSequence ( ) ) ; queueDownloadedRevision ( rev ) ; } } else { Status status = statusFromBulkDocsResponseItem ( row ) ; if ( status . isError ( ) && row . containsKey ( "key" ) && row . get ( "key" ) != null ) { RevisionInternal rev = remainingRevs . revWithDocId ( ( String ) row . get ( "key" ) ) ; if ( rev != null ) { remainingRevs . remove ( rev ) ; revisionFailed ( rev , new CouchbaseLiteException ( status ) ) ; } } } } } // Any leftover revisions that didn't get matched will be fetched individually: if ( remainingRevs . size ( ) > 0 ) { Log . v ( TAG , "%s bulk-fetch didn't work for %d of %d revs; getting individually" , this , remainingRevs . size ( ) , bulkRevs . size ( ) ) ; for ( RevisionInternal rev : remainingRevs ) { queueRemoteRevision ( rev ) ; } pullRemoteRevisions ( ) ; } -- httpConnectionCount ; // Start another task if there are still revisions waiting to be pulled: pullRemoteRevisions ( ) ; } } ) ; pendingFutures . add ( future ) ; }
This is compatible with CouchDB but it only works for revs of generation 1 without attachments .
804
19
11,680
@ InterfaceAudience . Private protected void queueRemoteRevision ( RevisionInternal rev ) { if ( rev . isDeleted ( ) ) { deletedRevsToPull . add ( rev ) ; } else { revsToPull . add ( rev ) ; } }
Add a revision to the appropriate queue of revs to individually GET
55
13
11,681
private static boolean isAllowed ( char c , String allow ) { return ( c >= ' ' && c <= ' ' ) || ( c >= ' ' && c <= ' ' ) || ( c >= ' ' && c <= ' ' ) || "_-!.~'()*" . indexOf ( c ) != NOT_FOUND || ( allow != null && allow . indexOf ( c ) != NOT_FOUND ) ; }
Returns true if the given character is allowed .
91
9
11,682
public boolean mutateAttachments ( CollectionUtils . Functor < Map < String , Object > , Map < String , Object > > functor ) { { Map < String , Object > properties = getProperties ( ) ; Map < String , Object > editedProperties = null ; Map < String , Object > attachments = ( Map < String , Object > ) properties . get ( "_attachments" ) ; Map < String , Object > editedAttachments = null ; if ( attachments != null ) { for ( String name : attachments . keySet ( ) ) { Map < String , Object > attachment = new HashMap < String , Object > ( ( Map < String , Object > ) attachments . get ( name ) ) ; attachment . put ( "name" , name ) ; Map < String , Object > editedAttachment = functor . invoke ( attachment ) ; if ( editedAttachment == null ) { return false ; // block canceled } if ( editedAttachment != attachment ) { if ( editedProperties == null ) { // Make the document properties and _attachments dictionary mutable: editedProperties = new HashMap < String , Object > ( properties ) ; editedAttachments = new HashMap < String , Object > ( attachments ) ; editedProperties . put ( "_attachments" , editedAttachments ) ; } editedAttachment . remove ( "name" ) ; editedAttachments . put ( name , editedAttachment ) ; } } } if ( editedProperties != null ) { setProperties ( editedProperties ) ; return true ; } return false ; } }
Returns YES if any changes were made .
329
8
11,683
public void appendData ( byte [ ] data ) throws IOException , SymmetricKeyException { if ( data == null ) return ; appendData ( data , 0 , data . length ) ; }
Appends data to the blob . Call this when new data is available .
41
15
11,684
public void finish ( ) throws IOException , SymmetricKeyException { if ( outStream != null ) { if ( encryptor != null ) outStream . write ( encryptor . encrypt ( null ) ) ; // FileOutputStream is also closed cascadingly outStream . close ( ) ; outStream = null ; // Only create the key if we got all the data successfully blobKey = new BlobKey ( sha1Digest . digest ( ) ) ; md5DigestResult = md5Digest . digest ( ) ; } }
Call this after all the data has been added .
114
10
11,685
public void cancel ( ) { try { // FileOutputStream is also closed cascadingly if ( outStream != null ) { outStream . close ( ) ; outStream = null ; } // Clear encryptor: encryptor = null ; } catch ( IOException e ) { Log . w ( Log . TAG_BLOB_STORE , "Exception closing buffered output stream" , e ) ; } tempFile . delete ( ) ; }
Call this to cancel before finishing the data .
92
9
11,686
public boolean install ( ) { if ( tempFile == null ) return true ; // already installed // Move temp file to correct location in blob store: String destPath = store . getRawPathForKey ( blobKey ) ; File destPathFile = new File ( destPath ) ; if ( tempFile . renameTo ( destPathFile ) ) // If the move fails, assume it means a file with the same name already exists; in that // case it must have the identical contents, so we're still OK. tempFile = null ; else cancel ( ) ; return true ; }
Installs a finished blob into the store .
120
9
11,687
@ Override @ InterfaceAudience . Public public QueryRow next ( ) { if ( nextRow >= rows . size ( ) ) { return null ; } return rows . get ( nextRow ++ ) ; }
Gets the next QueryRow from the results or null if there are no more results .
43
18
11,688
Map < String , Object > getProperties ( ) { // This is basically the inverse of -[CBLManager parseReplicatorProperties:...] Map < String , Object > props = new HashMap < String , Object > ( ) ; props . put ( "continuous" , isContinuous ( ) ) ; props . put ( "create_target" , shouldCreateTarget ( ) ) ; props . put ( "filter" , getFilter ( ) ) ; props . put ( "query_params" , getFilterParams ( ) ) ; props . put ( "doc_ids" , getDocIds ( ) ) ; URL remoteURL = this . getRemoteUrl ( ) ; // TODO: authenticator is little different from iOS. need to update Map < String , Object > remote = new HashMap < String , Object > ( ) ; remote . put ( "url" , remoteURL . toString ( ) ) ; remote . put ( "headers" , getHeaders ( ) ) ; //remote.put("auth", authMap); if ( isPull ( ) ) { props . put ( "source" , remote ) ; props . put ( "target" , db . getName ( ) ) ; } else { props . put ( "source" , db . getName ( ) ) ; props . put ( "target" , remote ) ; } return props ; }
Currently only used for test
291
5
11,689
@ InterfaceAudience . Public public void start ( ) { if ( replicationInternal == null ) { initReplicationInternal ( ) ; } else { if ( replicationInternal . stateMachine . isInState ( ReplicationState . INITIAL ) ) { // great, it's ready to be started, nothing to do } else if ( replicationInternal . stateMachine . isInState ( ReplicationState . STOPPED ) ) { // if there was a previous internal replication and it's in the STOPPED state, then // start a fresh internal replication initReplicationInternal ( ) ; } else { Log . w ( Log . TAG_SYNC , String . format ( Locale . ENGLISH , "replicationInternal in unexpected state: %s, ignoring start()" , replicationInternal . stateMachine . getState ( ) ) ) ; } } // following is for restarting replicator. // make sure both lastError and ReplicationInternal.error are null. this . lastError = null ; replicationInternal . setError ( null ) ; replicationInternal . triggerStart ( ) ; }
Starts the replication asynchronously .
226
8
11,690
@ InterfaceAudience . Public public void setContinuous ( boolean isContinous ) { if ( isContinous ) { this . lifecycle = Lifecycle . CONTINUOUS ; replicationInternal . setLifecycle ( Lifecycle . CONTINUOUS ) ; } else { this . lifecycle = Lifecycle . ONESHOT ; replicationInternal . setLifecycle ( Lifecycle . ONESHOT ) ; } }
Set whether this replication is continous
86
7
11,691
@ InterfaceAudience . Public public void setAuthenticator ( Authenticator authenticator ) { properties . put ( ReplicationField . AUTHENTICATOR , authenticator ) ; replicationInternal . setAuthenticator ( authenticator ) ; }
Set the Authenticator used for authenticating with the Sync Gateway
47
12
11,692
@ InterfaceAudience . Public public void setCreateTarget ( boolean createTarget ) { properties . put ( ReplicationField . CREATE_TARGET , createTarget ) ; replicationInternal . setCreateTarget ( createTarget ) ; }
Set whether the target database be created if it doesn t already exist?
47
14
11,693
@ Override public void changed ( ChangeEvent event ) { // forget cached IDs (Should be executed in workExecutor) final long lastSeqPushed = ( isPull ( ) || replicationInternal . lastSequence == null ) ? - 1L : Long . valueOf ( replicationInternal . lastSequence ) ; if ( lastSeqPushed >= 0 && lastSeqPushed != _lastSequencePushed ) { db . runAsync ( new AsyncTask ( ) { @ Override public void run ( Database database ) { synchronized ( _lockPendingDocIDs ) { _lastSequencePushed = lastSeqPushed ; _pendingDocIDs = null ; } } } ) ; } for ( ChangeListener changeListener : changeListeners ) { try { changeListener . changed ( event ) ; } catch ( Exception e ) { Log . e ( Log . TAG_SYNC , "Exception calling changeListener.changed" , e ) ; } } }
This is called back for changes from the ReplicationInternal . Simply propagate the events back to all listeners .
205
21
11,694
@ InterfaceAudience . Public public void setFilter ( String filterName ) { properties . put ( ReplicationField . FILTER_NAME , filterName ) ; replicationInternal . setFilter ( filterName ) ; }
Set the filter to be used by this replication
44
9
11,695
@ InterfaceAudience . Public public void setDocIds ( List < String > docIds ) { properties . put ( ReplicationField . DOC_IDS , docIds ) ; replicationInternal . setDocIds ( docIds ) ; }
Sets the documents to specify as part of the replication .
53
12
11,696
public void setFilterParams ( Map < String , Object > filterParams ) { properties . put ( ReplicationField . FILTER_PARAMS , filterParams ) ; replicationInternal . setFilterParams ( filterParams ) ; }
Set parameters to pass to the filter function .
51
9
11,697
@ InterfaceAudience . Public public void setChannels ( List < String > channels ) { properties . put ( ReplicationField . CHANNELS , channels ) ; replicationInternal . setChannels ( channels ) ; }
Set the list of Sync Gateway channel names
45
8
11,698
private void initWithKey ( byte [ ] key ) throws SymmetricKeyException { if ( key == null ) throw new SymmetricKeyException ( "Key cannot be null" ) ; if ( key . length != KEY_SIZE ) throw new SymmetricKeyException ( "Key size is not " + KEY_SIZE + "bytes" ) ; keyData = key ; }
Initialize the object with a raw key of 32 bytes size .
80
13
11,699
public byte [ ] encryptData ( byte [ ] data ) throws SymmetricKeyException { Encryptor encryptor = createEncryptor ( ) ; byte [ ] encrypted = encryptor . encrypt ( data ) ; byte [ ] trailer = encryptor . encrypt ( null ) ; if ( encrypted == null || trailer == null ) throw new SymmetricKeyException ( "Cannot encrypt data" ) ; byte [ ] result = ArrayUtils . concat ( encrypted , trailer ) ; return result ; }
Encrypt the byte array data
105
6