idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
21,100 | public void execute ( ) throws MojoExecutionException , MojoFailureException { try { Set < File > thriftFiles = findThriftFiles ( ) ; final File outputDirectory = getOutputDirectory ( ) ; ImmutableSet < File > outputFiles = findGeneratedFilesInDirectory ( getOutputDirectory ( ) ) ; Set < String > compileRoots = new HashSet < String > ( ) ; compileRoots . add ( "scrooge" ) ; if ( thriftFiles . isEmpty ( ) ) { getLog ( ) . info ( "No thrift files to compile." ) ; } else if ( checkStaleness && ( ( lastModified ( thriftFiles ) + staleMillis ) < lastModified ( outputFiles ) ) ) { getLog ( ) . info ( "Generated thrift files up to date, skipping compile." ) ; attachFiles ( compileRoots ) ; } else { outputDirectory . mkdirs ( ) ; cleanDirectory ( outputDirectory ) ; getLog ( ) . info ( format ( "compiling thrift files %s with Scrooge" , thriftFiles ) ) ; synchronized ( lock ) { ScroogeRunner runner = new ScroogeRunner ( ) ; Map < String , String > thriftNamespaceMap = new HashMap < String , String > ( ) ; for ( ThriftNamespaceMapping mapping : thriftNamespaceMappings ) { thriftNamespaceMap . put ( mapping . getFrom ( ) , mapping . getTo ( ) ) ; } Set < File > includes = thriftIncludes ; includes . add ( getResourcesOutputDirectory ( ) ) ; final File thriftSourceRoot = getThriftSourceRoot ( ) ; if ( thriftSourceRoot != null && thriftSourceRoot . exists ( ) ) { includes . add ( thriftSourceRoot ) ; } runner . compile ( getLog ( ) , includeOutputDirectoryNamespace ? new File ( outputDirectory , "scrooge" ) : outputDirectory , thriftFiles , includes , thriftNamespaceMap , language , thriftOpts ) ; } attachFiles ( compileRoots ) ; } } catch ( IOException e ) { throw new MojoExecutionException ( "An IO error occurred" , e ) ; } } | Executes the mojo . |
21,101 | private long lastModified ( Set < File > files ) { long result = 0 ; for ( File file : files ) { if ( file . lastModified ( ) > result ) result = file . lastModified ( ) ; } return result ; } | Get the last modified time for a set of files . |
21,102 | private Set < File > findThriftFiles ( ) throws IOException , MojoExecutionException { final File thriftSourceRoot = getThriftSourceRoot ( ) ; Set < File > thriftFiles = new HashSet < File > ( ) ; if ( thriftSourceRoot != null && thriftSourceRoot . exists ( ) ) { thriftFiles . addAll ( findThriftFilesInDirectory ( thriftSourceRoot ) ) ; } getLog ( ) . info ( "finding thrift files in dependencies" ) ; extractFilesFromDependencies ( findThriftDependencies ( ) , getResourcesOutputDirectory ( ) ) ; if ( buildExtractedThrift && getResourcesOutputDirectory ( ) . exists ( ) ) { thriftFiles . addAll ( findThriftFilesInDirectory ( getResourcesOutputDirectory ( ) ) ) ; } getLog ( ) . info ( "finding thrift files in referenced (reactor) projects" ) ; thriftFiles . addAll ( getReferencedThriftFiles ( ) ) ; return thriftFiles ; } | build a complete set of local files files from referenced projects and dependencies . |
21,103 | private Set < Artifact > findThriftDependencies ( ) throws IOException , MojoExecutionException { Set < Artifact > thriftDependencies = new HashSet < Artifact > ( ) ; Set < Artifact > deps = new HashSet < Artifact > ( ) ; deps . addAll ( project . getArtifacts ( ) ) ; deps . addAll ( project . getDependencyArtifacts ( ) ) ; Map < String , Artifact > depsMap = new HashMap < String , Artifact > ( ) ; for ( Artifact dep : deps ) { depsMap . put ( dep . getId ( ) , dep ) ; } for ( Artifact artifact : deps ) { if ( isIdlCalssifier ( artifact , classifier ) ) { thriftDependencies . add ( artifact ) ; } else { if ( isDepOfIdlArtifact ( artifact , depsMap ) ) { try { Artifact idlArtifact = MavenScroogeCompilerUtil . getIdlArtifact ( artifact , artifactFactory , artifactResolver , localRepository , remoteArtifactRepositories , classifier ) ; thriftDependencies . add ( idlArtifact ) ; } catch ( MojoExecutionException e ) { getLog ( ) . debug ( "Could not fetch idl jar for " + artifact ) ; } } } } return thriftDependencies ; } | Iterate through dependencies |
21,104 | protected List < File > getRecursiveThriftFiles ( MavenProject project , String outputDirectory ) throws IOException { return getRecursiveThriftFiles ( project , outputDirectory , new ArrayList < File > ( ) ) ; } | Walk project references recursively building up a list of thrift files they provide starting with an empty file list . |
21,105 | List < File > getRecursiveThriftFiles ( MavenProject project , String outputDirectory , List < File > files ) throws IOException { HashFunction hashFun = Hashing . md5 ( ) ; File dir = new File ( new File ( project . getFile ( ) . getParent ( ) , "target" ) , outputDirectory ) ; if ( dir . exists ( ) ) { URI baseDir = getFileURI ( dir ) ; for ( File f : findThriftFilesInDirectory ( dir ) ) { URI fileURI = getFileURI ( f ) ; String relPath = baseDir . relativize ( fileURI ) . getPath ( ) ; File destFolder = getResourcesOutputDirectory ( ) ; destFolder . mkdirs ( ) ; File destFile = new File ( destFolder , relPath ) ; if ( ! destFile . exists ( ) || ( destFile . isFile ( ) && ! Files . hash ( f , hashFun ) . equals ( Files . hash ( destFile , hashFun ) ) ) ) { getLog ( ) . info ( format ( "copying %s to %s" , f . getCanonicalPath ( ) , destFile . getCanonicalPath ( ) ) ) ; copyFile ( f , destFile ) ; } files . add ( destFile ) ; } } Map < String , MavenProject > refs = project . getProjectReferences ( ) ; for ( String name : refs . keySet ( ) ) { getRecursiveThriftFiles ( refs . get ( name ) , outputDirectory , files ) ; } return files ; } | Walk project references recursively adding thrift files to the provided list . |
21,106 | private boolean isDepOfIdlArtifact ( Artifact artifact , Map < String , Artifact > depsMap ) { List < String > depTrail = artifact . getDependencyTrail ( ) ; if ( depTrail != null ) { for ( String name : depTrail ) { Artifact dep = depsMap . get ( name ) ; if ( dep != null && isIdlCalssifier ( dep , classifier ) ) { return true ; } } } return false ; } | Checks if the artifact is dependency of an dependent idl artifact |
21,107 | public static Artifact getIdlArtifact ( Artifact artifact , ArtifactFactory artifactFactory , ArtifactResolver artifactResolver , ArtifactRepository localRepository , List < ArtifactRepository > remoteRepos , String classifier ) throws MojoExecutionException { Artifact idlArtifact = artifactFactory . createArtifactWithClassifier ( artifact . getGroupId ( ) , artifact . getArtifactId ( ) , artifact . getVersion ( ) , "jar" , classifier ) ; try { artifactResolver . resolve ( idlArtifact , remoteRepos , localRepository ) ; return idlArtifact ; } catch ( final ArtifactResolutionException e ) { throw new MojoExecutionException ( "Failed to resolve one or more idl artifacts:\n\n" + e . getMessage ( ) , e ) ; } catch ( final ArtifactNotFoundException e ) { throw new MojoExecutionException ( "Failed to resolve one or more idl artifacts:\n\n" + e . getMessage ( ) , e ) ; } } | Resolves an idl jar for the artifact . |
21,108 | public static < Z > Function0 < Z > lift ( Func0 < Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Func0 to a Scala Function0 |
21,109 | public static < A , Z > Function1 < A , Z > lift ( Func1 < A , Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Func1 to a Scala Function1 |
21,110 | public static < A , B , Z > Function2 < A , B , Z > lift ( Func2 < A , B , Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Func2 to a Scala Function2 |
21,111 | public static < A , B , C , Z > Function3 < A , B , C , Z > lift ( Func3 < A , B , C , Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Func3 to a Scala Function3 |
21,112 | public static < A , B , C , D , Z > Function4 < A , B , C , D , Z > lift ( Func4 < A , B , C , D , Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Func4 to a Scala Function4 |
21,113 | public static < Z > Function0 < Z > lift ( Callable < Z > f ) { return bridge . lift ( f ) ; } | Lift a Java Callable to a Scala Function0 |
21,114 | public static CssSel sel ( String selector , String value ) { return j . sel ( selector , value ) ; } | Create a Css Selector Transform |
21,115 | public static < T > Vendor < T > vendor ( Func0 < T > f ) { return j . vendor ( f ) ; } | Create a Vendor from a Func0 |
21,116 | public static < T > Vendor < T > vendor ( Callable < T > f ) { return j . vendor ( f ) ; } | Create a Vendor from a Callable |
21,117 | public static < T > SessionVar < T > vendSessionVar ( T defValue ) { return ( new VarsJBridge ( ) ) . vendSessionVar ( defValue , new Exception ( ) ) ; } | Vend a SessionVar with the default value |
21,118 | public static < T > SessionVar < T > vendSessionVar ( Callable < T > defFunc ) { return ( new VarsJBridge ( ) ) . vendSessionVar ( defFunc , new Exception ( ) ) ; } | Vend a SessionVar with the function to create the default value |
21,119 | protected < T1 > void dispatchCommand ( final BiConsumer < P , T1 > action , final T1 routable1 ) { routingFor ( routable1 ) . routees ( ) . forEach ( routee -> routee . receiveCommand ( action , routable1 ) ) ; } | DISPATCHING - COMMANDS |
21,120 | public ResultT first ( ) { final CoreRemoteMongoCursor < ResultT > cursor = iterator ( ) ; if ( ! cursor . hasNext ( ) ) { return null ; } return cursor . next ( ) ; } | Helper to return the first item in the iterator or null . |
21,121 | public < U > CoreRemoteMongoIterable < U > map ( final Function < ResultT , U > mapper ) { return new CoreRemoteMappingIterable < > ( this , mapper ) ; } | Maps this iterable from the source document type to the target document type . |
21,122 | public < A extends Collection < ? super ResultT > > A into ( final A target ) { forEach ( new Block < ResultT > ( ) { public void apply ( final ResultT t ) { target . add ( t ) ; } } ) ; return target ; } | Iterates over all the documents adding each to the given target . |
21,123 | public AwsServiceClient withCodecRegistry ( final CodecRegistry codecRegistry ) { return new AwsServiceClientImpl ( proxy . withCodecRegistry ( codecRegistry ) , dispatcher ) ; } | Create a new AwsServiceClient instance with a different codec registry . |
21,124 | public void callFunction ( final String name , final List < ? > args , final Long requestTimeout ) { this . functionService . callFunction ( name , args , requestTimeout ) ; } | Calls the specified Stitch function . |
21,125 | public < T > T callFunction ( final String name , final List < ? > args , final Long requestTimeout , final Class < T > resultClass , final CodecRegistry codecRegistry ) { return this . functionService . withCodecRegistry ( codecRegistry ) . callFunction ( name , args , requestTimeout , resultClass ) ; } | Calls the specified Stitch function and decodes the response into an instance of the specified type . The response will be decoded using the codec registry given . |
21,126 | public void start ( ) { instanceLock . writeLock ( ) . lock ( ) ; try { for ( final Map . Entry < MongoNamespace , NamespaceChangeStreamListener > streamerEntry : nsStreamers . entrySet ( ) ) { streamerEntry . getValue ( ) . start ( ) ; } } finally { instanceLock . writeLock ( ) . unlock ( ) ; } } | Starts all streams . |
21,127 | public void stop ( ) { instanceLock . writeLock ( ) . lock ( ) ; try { for ( final NamespaceChangeStreamListener streamer : nsStreamers . values ( ) ) { streamer . stop ( ) ; } } finally { instanceLock . writeLock ( ) . unlock ( ) ; } } | Stops all streams . |
21,128 | public void addNamespace ( final MongoNamespace namespace ) { this . instanceLock . writeLock ( ) . lock ( ) ; try { if ( this . nsStreamers . containsKey ( namespace ) ) { return ; } final NamespaceChangeStreamListener streamer = new NamespaceChangeStreamListener ( namespace , instanceConfig . getNamespaceConfig ( namespace ) , service , networkMonitor , authMonitor , getLockForNamespace ( namespace ) ) ; this . nsStreamers . put ( namespace , streamer ) ; } finally { this . instanceLock . writeLock ( ) . unlock ( ) ; } } | Requests that the given namespace be started listening to for change events . |
21,129 | public void removeNamespace ( final MongoNamespace namespace ) { this . instanceLock . writeLock ( ) . lock ( ) ; try { if ( ! this . nsStreamers . containsKey ( namespace ) ) { return ; } final NamespaceChangeStreamListener streamer = this . nsStreamers . get ( namespace ) ; streamer . stop ( ) ; this . nsStreamers . remove ( namespace ) ; } finally { this . instanceLock . writeLock ( ) . unlock ( ) ; } } | Requests that the given namespace stopped being listened to for change events . |
21,130 | public Map < BsonValue , ChangeEvent < BsonDocument > > getEventsForNamespace ( final MongoNamespace namespace ) { this . instanceLock . readLock ( ) . lock ( ) ; final NamespaceChangeStreamListener streamer ; try { streamer = nsStreamers . get ( namespace ) ; } finally { this . instanceLock . readLock ( ) . unlock ( ) ; } if ( streamer == null ) { return new HashMap < > ( ) ; } return streamer . getEvents ( ) ; } | Returns the latest change events for a given namespace . |
21,131 | public ChangeEvent < BsonDocument > getUnprocessedEventForDocumentId ( final MongoNamespace namespace , final BsonValue documentId ) { this . instanceLock . readLock ( ) . lock ( ) ; final NamespaceChangeStreamListener streamer ; try { streamer = nsStreamers . get ( namespace ) ; } finally { this . instanceLock . readLock ( ) . unlock ( ) ; } if ( streamer == null ) { return null ; } return streamer . getUnprocessedEventForDocumentId ( documentId ) ; } | If there is an unprocessed change event for a particular document ID fetch it from the appropriate namespace change stream listener and remove it . By reading the event here we are assuming it will be processed by the consumer . |
21,132 | void setPaused ( final boolean isPaused ) { docLock . writeLock ( ) . lock ( ) ; try { docsColl . updateOne ( getDocFilter ( namespace , documentId ) , new BsonDocument ( "$set" , new BsonDocument ( ConfigCodec . Fields . IS_PAUSED , new BsonBoolean ( isPaused ) ) ) ) ; this . isPaused = isPaused ; } catch ( IllegalStateException e ) { } finally { docLock . writeLock ( ) . unlock ( ) ; } } | A document that is paused no longer has remote updates applied to it . Any local updates to this document cause it to be thawed . An example of pausing a document is when a conflict is being resolved for that document and the handler throws an exception . |
21,133 | public void setSomePendingWritesAndSave ( final long atTime , final ChangeEvent < BsonDocument > changeEvent ) { docLock . writeLock ( ) . lock ( ) ; try { if ( isPaused ) { setPaused ( false ) ; setStale ( true ) ; } this . lastUncommittedChangeEvent = coalesceChangeEvents ( this . lastUncommittedChangeEvent , changeEvent ) ; this . lastResolution = atTime ; docsColl . replaceOne ( getDocFilter ( namespace , documentId ) , this ) ; } finally { docLock . writeLock ( ) . unlock ( ) ; } } | Sets that there are some pending writes that occurred at a time for an associated locally emitted change event . This variant maintains the last version set . |
21,134 | private static ChangeEvent < BsonDocument > coalesceChangeEvents ( final ChangeEvent < BsonDocument > lastUncommittedChangeEvent , final ChangeEvent < BsonDocument > newestChangeEvent ) { if ( lastUncommittedChangeEvent == null ) { return newestChangeEvent ; } switch ( lastUncommittedChangeEvent . getOperationType ( ) ) { case INSERT : switch ( newestChangeEvent . getOperationType ( ) ) { case REPLACE : case UPDATE : return new ChangeEvent < > ( newestChangeEvent . getId ( ) , OperationType . INSERT , newestChangeEvent . getFullDocument ( ) , newestChangeEvent . getNamespace ( ) , newestChangeEvent . getDocumentKey ( ) , null , newestChangeEvent . hasUncommittedWrites ( ) ) ; default : break ; } break ; case DELETE : switch ( newestChangeEvent . getOperationType ( ) ) { case INSERT : return new ChangeEvent < > ( newestChangeEvent . getId ( ) , OperationType . REPLACE , newestChangeEvent . getFullDocument ( ) , newestChangeEvent . getNamespace ( ) , newestChangeEvent . getDocumentKey ( ) , null , newestChangeEvent . hasUncommittedWrites ( ) ) ; default : break ; } break ; case UPDATE : switch ( newestChangeEvent . getOperationType ( ) ) { case UPDATE : return new ChangeEvent < > ( newestChangeEvent . getId ( ) , OperationType . UPDATE , newestChangeEvent . getFullDocument ( ) , newestChangeEvent . getNamespace ( ) , newestChangeEvent . getDocumentKey ( ) , lastUncommittedChangeEvent . getUpdateDescription ( ) != null ? lastUncommittedChangeEvent . getUpdateDescription ( ) . merge ( newestChangeEvent . getUpdateDescription ( ) ) : newestChangeEvent . getUpdateDescription ( ) , newestChangeEvent . hasUncommittedWrites ( ) ) ; case REPLACE : return new ChangeEvent < > ( newestChangeEvent . getId ( ) , OperationType . REPLACE , newestChangeEvent . getFullDocument ( ) , newestChangeEvent . getNamespace ( ) , newestChangeEvent . getDocumentKey ( ) , null , newestChangeEvent . hasUncommittedWrites ( ) ) ; default : break ; } break ; case REPLACE : switch ( newestChangeEvent . getOperationType ( ) ) { case UPDATE : return new ChangeEvent < > ( newestChangeEvent . getId ( ) , OperationType . REPLACE , newestChangeEvent . getFullDocument ( ) , newestChangeEvent . getNamespace ( ) , newestChangeEvent . getDocumentKey ( ) , null , newestChangeEvent . hasUncommittedWrites ( ) ) ; default : break ; } break ; default : break ; } return newestChangeEvent ; } | Possibly coalesces the newest change event to match the user s original intent . For example an unsynchronized insert and update is still an insert . |
21,135 | static DocumentVersionInfo getRemoteVersionInfo ( final BsonDocument remoteDocument ) { final BsonDocument version = getDocumentVersionDoc ( remoteDocument ) ; return new DocumentVersionInfo ( version , remoteDocument != null ? BsonUtils . getDocumentId ( remoteDocument ) : null ) ; } | Returns the current version info for a provided remote document . |
21,136 | static BsonDocument getFreshVersionDocument ( ) { final BsonDocument versionDoc = new BsonDocument ( ) ; versionDoc . append ( Fields . SYNC_PROTOCOL_VERSION_FIELD , new BsonInt32 ( 1 ) ) ; versionDoc . append ( Fields . INSTANCE_ID_FIELD , new BsonString ( UUID . randomUUID ( ) . toString ( ) ) ) ; versionDoc . append ( Fields . VERSION_COUNTER_FIELD , new BsonInt64 ( 0L ) ) ; return versionDoc ; } | Returns a BSON version document representing a new version with a new instance ID and version counter of zero . |
21,137 | static BsonDocument getDocumentVersionDoc ( final BsonDocument document ) { if ( document == null || ! document . containsKey ( DOCUMENT_VERSION_FIELD ) ) { return null ; } return document . getDocument ( DOCUMENT_VERSION_FIELD , null ) ; } | Returns the version document of the given document if any ; returns null otherwise . |
21,138 | static BsonDocument getVersionedFilter ( final BsonValue documentId , final BsonValue version ) { final BsonDocument filter = new BsonDocument ( "_id" , documentId ) ; if ( version == null ) { filter . put ( DOCUMENT_VERSION_FIELD , new BsonDocument ( "$exists" , BsonBoolean . FALSE ) ) ; } else { filter . put ( DOCUMENT_VERSION_FIELD , version ) ; } return filter ; } | Returns a query filter for the given document _id and version . The version is allowed to be null . The query will match only if there is either no version on the document in the database in question if we have no reference of the version or if the version matches the database s version . |
21,139 | BsonDocument getNextVersion ( ) { if ( ! this . hasVersion ( ) || this . getVersionDoc ( ) == null ) { return getFreshVersionDocument ( ) ; } final BsonDocument nextVersion = BsonUtils . copyOfDocument ( this . getVersionDoc ( ) ) ; nextVersion . put ( Fields . VERSION_COUNTER_FIELD , new BsonInt64 ( this . getVersion ( ) . getVersionCounter ( ) + 1 ) ) ; return nextVersion ; } | Given a DocumentVersionInfo returns a BSON document representing the next version . This means and incremented version count for a non - empty version or a fresh version document for an empty version . |
21,140 | public RemoteMongoCollection < Document > getCollection ( final String collectionName ) { return new RemoteMongoCollectionImpl < > ( proxy . getCollection ( collectionName ) , dispatcher ) ; } | Gets a collection . |
21,141 | static < T > StitchEvent < T > fromEvent ( final Event event , final Decoder < T > decoder ) { return new StitchEvent < > ( event . getEventName ( ) , event . getData ( ) , decoder ) ; } | Convert a SSE to a Stitch SSE |
21,142 | protected final Event processEvent ( ) throws IOException { while ( true ) { String line ; try { line = readLine ( ) ; } catch ( final EOFException ex ) { if ( doneOnce ) { throw ex ; } doneOnce = true ; line = "" ; } if ( line . isEmpty ( ) ) { if ( dataBuffer . length ( ) == 0 ) { eventName = "" ; continue ; } final Event . Builder eventBuilder = new Event . Builder ( ) ; eventBuilder . withEventName ( eventName . isEmpty ( ) ? Event . MESSAGE_EVENT : eventName ) ; eventBuilder . withData ( dataBuffer . toString ( ) ) ; dataBuffer = new StringBuilder ( ) ; eventName = "" ; return eventBuilder . build ( ) ; } else if ( line . startsWith ( ":" ) ) { } else if ( line . contains ( ":" ) ) { final int colonIdx = line . indexOf ( ":" ) ; final String field = line . substring ( 0 , colonIdx ) ; String value = line . substring ( colonIdx + 1 ) ; value = value . startsWith ( " " ) ? value . substring ( 1 ) : value ; processField ( field , value ) ; } else { processField ( line , "" ) ; } } } | Process the next event in a given stream . |
21,143 | private static String handleRichError ( final Response response , final String body ) { if ( ! response . getHeaders ( ) . containsKey ( Headers . CONTENT_TYPE ) || ! response . getHeaders ( ) . get ( Headers . CONTENT_TYPE ) . equals ( ContentTypes . APPLICATION_JSON ) ) { return body ; } final Document doc ; try { doc = BsonUtils . parseValue ( body , Document . class ) ; } catch ( Exception e ) { return body ; } if ( ! doc . containsKey ( Fields . ERROR ) ) { return body ; } final String errorMsg = doc . getString ( Fields . ERROR ) ; if ( ! doc . containsKey ( Fields . ERROR_CODE ) ) { return errorMsg ; } final String errorCode = doc . getString ( Fields . ERROR_CODE ) ; throw new StitchServiceException ( errorMsg , StitchServiceErrorCode . fromCodeName ( errorCode ) ) ; } | Private helper method which decodes the Stitch error from the body of an HTTP Response object . If the error is successfully decoded this function will throw the error for the end user to eventually consume . If the error cannot be decoded this is likely not an error from the Stitch server and this function will return an error message that the calling function should use as the message of a StitchServiceException with an unknown code . |
21,144 | public static void initialize ( final Context context ) { if ( ! initialized . compareAndSet ( false , true ) ) { return ; } applicationContext = context . getApplicationContext ( ) ; final String packageName = applicationContext . getPackageName ( ) ; localAppName = packageName ; final PackageManager manager = applicationContext . getPackageManager ( ) ; try { final PackageInfo pkgInfo = manager . getPackageInfo ( packageName , 0 ) ; localAppVersion = pkgInfo . versionName ; } catch ( final NameNotFoundException e ) { Log . d ( TAG , "Failed to get version of application, will not send in device info." ) ; } Log . d ( TAG , "Initialized android SDK" ) ; } | Initializes the Stitch SDK so that app clients can be created . |
21,145 | public static StitchAppClient getAppClient ( final String clientAppId ) { ensureInitialized ( ) ; synchronized ( Stitch . class ) { if ( ! appClients . containsKey ( clientAppId ) ) { throw new IllegalStateException ( String . format ( "client for app '%s' has not yet been initialized" , clientAppId ) ) ; } return appClients . get ( clientAppId ) ; } } | Gets an app client by its client app id if it has been initialized ; throws if none can be found . |
21,146 | public void start ( ) { nsLock . writeLock ( ) . lock ( ) ; try { if ( runnerThread != null ) { return ; } runnerThread = new Thread ( new NamespaceChangeStreamRunner ( new WeakReference < > ( this ) , networkMonitor , logger ) ) ; runnerThread . start ( ) ; } finally { nsLock . writeLock ( ) . unlock ( ) ; } } | Opens the stream in a background thread . |
21,147 | public void stop ( ) { if ( runnerThread == null ) { return ; } runnerThread . interrupt ( ) ; nsLock . writeLock ( ) . lock ( ) ; try { if ( runnerThread == null ) { return ; } this . cancel ( ) ; this . close ( ) ; while ( runnerThread . isAlive ( ) ) { runnerThread . interrupt ( ) ; try { runnerThread . join ( 1000 ) ; } catch ( final Exception e ) { e . printStackTrace ( ) ; return ; } } runnerThread = null ; } catch ( Exception e ) { e . printStackTrace ( ) ; } finally { nsLock . writeLock ( ) . unlock ( ) ; } } | Stops the background stream thread . |
21,148 | boolean openStream ( ) throws InterruptedException , IOException { logger . info ( "stream START" ) ; final boolean isOpen ; final Set < BsonValue > idsToWatch = nsConfig . getSynchronizedDocumentIds ( ) ; if ( ! networkMonitor . isConnected ( ) ) { logger . info ( "stream END - Network disconnected" ) ; return false ; } if ( idsToWatch . isEmpty ( ) ) { logger . info ( "stream END - No synchronized documents" ) ; return false ; } nsLock . writeLock ( ) . lockInterruptibly ( ) ; try { if ( ! authMonitor . isLoggedIn ( ) ) { logger . info ( "stream END - Logged out" ) ; return false ; } final Document args = new Document ( ) ; args . put ( "database" , namespace . getDatabaseName ( ) ) ; args . put ( "collection" , namespace . getCollectionName ( ) ) ; args . put ( "ids" , idsToWatch ) ; currentStream = service . streamFunction ( "watch" , Collections . singletonList ( args ) , ResultDecoders . changeEventDecoder ( BSON_DOCUMENT_CODEC ) ) ; if ( currentStream != null && currentStream . isOpen ( ) ) { this . nsConfig . setStale ( true ) ; isOpen = true ; } else { isOpen = false ; } } finally { nsLock . writeLock ( ) . unlock ( ) ; } return isOpen ; } | Open the event stream |
21,149 | @ SuppressWarnings ( "unchecked" ) public Map < BsonValue , ChangeEvent < BsonDocument > > getEvents ( ) { nsLock . readLock ( ) . lock ( ) ; final Map < BsonValue , ChangeEvent < BsonDocument > > events ; try { events = new HashMap < > ( this . events ) ; } finally { nsLock . readLock ( ) . unlock ( ) ; } nsLock . writeLock ( ) . lock ( ) ; try { this . events . clear ( ) ; return events ; } finally { nsLock . writeLock ( ) . unlock ( ) ; } } | Returns the latest change events and clears them from the change stream listener . |
21,150 | public ChangeEvent < BsonDocument > getUnprocessedEventForDocumentId ( final BsonValue documentId ) { final ChangeEvent < BsonDocument > event ; nsLock . readLock ( ) . lock ( ) ; try { event = this . events . get ( documentId ) ; } finally { nsLock . readLock ( ) . unlock ( ) ; } nsLock . writeLock ( ) . lock ( ) ; try { this . events . remove ( documentId ) ; return event ; } finally { nsLock . writeLock ( ) . unlock ( ) ; } } | If there is an unprocessed change event for a particular document ID fetch it from the change stream listener and remove it . By reading the event here we are assuming it will be processed by the consumer . |
21,151 | public static long hash ( final BsonDocument doc ) { if ( doc == null ) { return 0L ; } final byte [ ] docBytes = toBytes ( doc ) ; long hashValue = FNV_64BIT_OFFSET_BASIS ; for ( int offset = 0 ; offset < docBytes . length ; offset ++ ) { hashValue ^= ( 0xFF & docBytes [ offset ] ) ; hashValue *= FNV_64BIT_PRIME ; } return hashValue ; } | Implementation of FNV - 1a hash algorithm . |
21,152 | public static boolean deleteDatabase ( final StitchAppClientInfo appInfo , final String serviceName , final EmbeddedMongoClientFactory clientFactory , final String userId ) { final String dataDir = appInfo . getDataDirectory ( ) ; if ( dataDir == null ) { throw new IllegalArgumentException ( "StitchAppClient not configured with a data directory" ) ; } final String instanceKey = String . format ( "%s-%s_sync_%s_%s" , appInfo . getClientAppId ( ) , dataDir , serviceName , userId ) ; final String dbPath = String . format ( "%s/%s/sync_mongodb_%s/%s/0/" , dataDir , appInfo . getClientAppId ( ) , serviceName , userId ) ; final MongoClient client = clientFactory . getClient ( instanceKey , dbPath , appInfo . getCodecRegistry ( ) ) ; for ( final String listDatabaseName : client . listDatabaseNames ( ) ) { try { client . getDatabase ( listDatabaseName ) . drop ( ) ; } catch ( Exception e ) { } } client . close ( ) ; clientFactory . removeClient ( instanceKey ) ; return new File ( dbPath ) . delete ( ) ; } | Delete a database for a given path and userId . |
21,153 | public Task < Long > count ( ) { return dispatcher . dispatchTask ( new Callable < Long > ( ) { public Long call ( ) { return proxy . count ( ) ; } } ) ; } | Counts the number of documents in the collection . |
21,154 | public Task < RemoteInsertOneResult > insertOne ( final DocumentT document ) { return dispatcher . dispatchTask ( new Callable < RemoteInsertOneResult > ( ) { public RemoteInsertOneResult call ( ) { return proxy . insertOne ( document ) ; } } ) ; } | Inserts the provided document . If the document is missing an identifier the client should generate one . |
21,155 | public Task < Void > registerWithEmail ( final String email , final String password ) { return dispatcher . dispatchTask ( new Callable < Void > ( ) { public Void call ( ) { registerWithEmailInternal ( email , password ) ; return null ; } } ) ; } | Registers a new user with the given email and password . |
21,156 | public Task < Void > confirmUser ( final String token , final String tokenId ) { return dispatcher . dispatchTask ( new Callable < Void > ( ) { public Void call ( ) { confirmUserInternal ( token , tokenId ) ; return null ; } } ) ; } | Confirms a user with the given token and token id . |
21,157 | public Task < Void > resendConfirmationEmail ( final String email ) { return dispatcher . dispatchTask ( new Callable < Void > ( ) { public Void call ( ) { resendConfirmationEmailInternal ( email ) ; return null ; } } ) ; } | Resend the confirmation for a user to the given email . |
21,158 | public Task < Void > sendResetPasswordEmail ( final String email ) { return dispatcher . dispatchTask ( new Callable < Void > ( ) { public Void call ( ) { sendResetPasswordEmailInternal ( email ) ; return null ; } } ) ; } | Sends a user a password reset email for the given email . |
21,159 | public boolean commit ( ) { final CoreRemoteMongoCollection < DocumentT > collection = getCollection ( ) ; final List < WriteModel < DocumentT > > writeModels = getBulkWriteModels ( ) ; boolean success = true ; for ( final WriteModel < DocumentT > write : writeModels ) { if ( write instanceof ReplaceOneModel ) { final ReplaceOneModel < DocumentT > replaceModel = ( ( ReplaceOneModel ) write ) ; final RemoteUpdateResult result = collection . updateOne ( replaceModel . getFilter ( ) , ( Bson ) replaceModel . getReplacement ( ) ) ; success = success && ( result != null && result . getModifiedCount ( ) == result . getMatchedCount ( ) ) ; } else if ( write instanceof UpdateOneModel ) { final UpdateOneModel < DocumentT > updateModel = ( ( UpdateOneModel ) write ) ; final RemoteUpdateResult result = collection . updateOne ( updateModel . getFilter ( ) , updateModel . getUpdate ( ) ) ; success = success && ( result != null && result . getModifiedCount ( ) == result . getMatchedCount ( ) ) ; } else if ( write instanceof UpdateManyModel ) { final UpdateManyModel < DocumentT > updateModel = ( ( UpdateManyModel ) write ) ; final RemoteUpdateResult result = collection . updateMany ( updateModel . getFilter ( ) , updateModel . getUpdate ( ) ) ; success = success && ( result != null && result . getModifiedCount ( ) == result . getMatchedCount ( ) ) ; } } return success ; } | Commits the writes to the remote collection . |
21,160 | public Response doRequest ( final StitchRequest stitchReq ) { initAppMetadata ( clientAppId ) ; return super . doRequestUrl ( stitchReq , getHostname ( ) ) ; } | Performs a request against a Stitch app server determined by the deployment model of the underlying app . Throws a Stitch specific exception if the request fails . |
21,161 | public EventStream doStreamRequest ( final StitchRequest stitchReq ) { initAppMetadata ( clientAppId ) ; return super . doStreamRequestUrl ( stitchReq , getHostname ( ) ) ; } | Performs a streaming request against a Stitch app server determined by the deployment model of the underlying app . Throws a Stitch specific exception if the request fails . |
21,162 | public BsonDocument toUpdateDocument ( ) { final List < BsonElement > unsets = new ArrayList < > ( ) ; for ( final String removedField : this . removedFields ) { unsets . add ( new BsonElement ( removedField , new BsonBoolean ( true ) ) ) ; } final BsonDocument updateDocument = new BsonDocument ( ) ; if ( this . updatedFields . size ( ) > 0 ) { updateDocument . append ( "$set" , this . updatedFields ) ; } if ( unsets . size ( ) > 0 ) { updateDocument . append ( "$unset" , new BsonDocument ( unsets ) ) ; } return updateDocument ; } | Convert this update description to an update document . |
21,163 | public BsonDocument toBsonDocument ( ) { final BsonDocument updateDescDoc = new BsonDocument ( ) ; updateDescDoc . put ( Fields . UPDATED_FIELDS_FIELD , this . getUpdatedFields ( ) ) ; final BsonArray removedFields = new BsonArray ( ) ; for ( final String field : this . getRemovedFields ( ) ) { removedFields . add ( new BsonString ( field ) ) ; } updateDescDoc . put ( Fields . REMOVED_FIELDS_FIELD , removedFields ) ; return updateDescDoc ; } | Converts this update description to its document representation as it would appear in a MongoDB Change Event . |
21,164 | public static UpdateDescription fromBsonDocument ( final BsonDocument document ) { keyPresent ( Fields . UPDATED_FIELDS_FIELD , document ) ; keyPresent ( Fields . REMOVED_FIELDS_FIELD , document ) ; final BsonArray removedFieldsArr = document . getArray ( Fields . REMOVED_FIELDS_FIELD ) ; final Set < String > removedFields = new HashSet < > ( removedFieldsArr . size ( ) ) ; for ( final BsonValue field : removedFieldsArr ) { removedFields . add ( field . asString ( ) . getValue ( ) ) ; } return new UpdateDescription ( document . getDocument ( Fields . UPDATED_FIELDS_FIELD ) , removedFields ) ; } | Converts an update description BSON document from a MongoDB Change Event into an UpdateDescription object . |
21,165 | public UpdateDescription merge ( final UpdateDescription otherDescription ) { if ( otherDescription != null ) { for ( final Map . Entry < String , BsonValue > entry : this . updatedFields . entrySet ( ) ) { if ( otherDescription . removedFields . contains ( entry . getKey ( ) ) ) { this . updatedFields . remove ( entry . getKey ( ) ) ; } } for ( final String removedField : this . removedFields ) { if ( otherDescription . updatedFields . containsKey ( removedField ) ) { this . removedFields . remove ( removedField ) ; } } this . removedFields . addAll ( otherDescription . removedFields ) ; this . updatedFields . putAll ( otherDescription . updatedFields ) ; } return this ; } | Unilaterally merge an update description into this update description . |
21,166 | @ SuppressWarnings ( "unchecked" ) public Stream < ChangeEvent < DocumentT > > watch ( final BsonValue ... ids ) throws InterruptedException , IOException { return operations . watch ( new HashSet < > ( Arrays . asList ( ids ) ) , false , documentClass ) . execute ( service ) ; } | Watches specified IDs in a collection . |
21,167 | public Task < Void > updateSyncFrequency ( final SyncFrequency syncFrequency ) { return this . dispatcher . dispatchTask ( new Callable < Void > ( ) { public Void call ( ) throws Exception { SyncImpl . this . proxy . updateSyncFrequency ( syncFrequency ) ; return null ; } } ) ; } | Sets the SyncFrequency on this collection . |
21,168 | public void emitEvent ( final NamespaceSynchronizationConfig nsConfig , final ChangeEvent < BsonDocument > event ) { listenersLock . lock ( ) ; try { if ( nsConfig . getNamespaceListenerConfig ( ) == null ) { return ; } final NamespaceListenerConfig namespaceListener = nsConfig . getNamespaceListenerConfig ( ) ; eventDispatcher . dispatch ( ( ) -> { try { if ( namespaceListener . getEventListener ( ) != null ) { namespaceListener . getEventListener ( ) . onEvent ( BsonUtils . getDocumentId ( event . getDocumentKey ( ) ) , ChangeEvents . transformChangeEventForUser ( event , namespaceListener . getDocumentCodec ( ) ) ) ; } } catch ( final Exception ex ) { logger . error ( String . format ( Locale . US , "emitEvent ns=%s documentId=%s emit exception: %s" , event . getNamespace ( ) , BsonUtils . getDocumentId ( event . getDocumentKey ( ) ) , ex ) , ex ) ; } return null ; } ) ; } finally { listenersLock . unlock ( ) ; } } | Emits a change event for the given document id . |
21,169 | static ChangeEvent < BsonDocument > changeEventForLocalInsert ( final MongoNamespace namespace , final BsonDocument document , final boolean writePending ) { final BsonValue docId = BsonUtils . getDocumentId ( document ) ; return new ChangeEvent < > ( new BsonDocument ( ) , OperationType . INSERT , document , namespace , new BsonDocument ( "_id" , docId ) , null , writePending ) ; } | Generates a change event for a local insert of the given document in the given namespace . |
21,170 | static ChangeEvent < BsonDocument > changeEventForLocalUpdate ( final MongoNamespace namespace , final BsonValue documentId , final UpdateDescription update , final BsonDocument fullDocumentAfterUpdate , final boolean writePending ) { return new ChangeEvent < > ( new BsonDocument ( ) , OperationType . UPDATE , fullDocumentAfterUpdate , namespace , new BsonDocument ( "_id" , documentId ) , update , writePending ) ; } | Generates a change event for a local update of a document in the given namespace referring to the given document _id . |
21,171 | static ChangeEvent < BsonDocument > changeEventForLocalReplace ( final MongoNamespace namespace , final BsonValue documentId , final BsonDocument document , final boolean writePending ) { return new ChangeEvent < > ( new BsonDocument ( ) , OperationType . REPLACE , document , namespace , new BsonDocument ( "_id" , documentId ) , null , writePending ) ; } | Generates a change event for a local replacement of a document in the given namespace referring to the given document _id . |
21,172 | static ChangeEvent < BsonDocument > changeEventForLocalDelete ( final MongoNamespace namespace , final BsonValue documentId , final boolean writePending ) { return new ChangeEvent < > ( new BsonDocument ( ) , OperationType . DELETE , null , namespace , new BsonDocument ( "_id" , documentId ) , null , writePending ) ; } | Generates a change event for a local deletion of a document in the given namespace referring to the given document _id . |
21,173 | public static < T > void notNull ( final String name , final T value ) { if ( value == null ) { throw new IllegalArgumentException ( name + " can not be null" ) ; } } | Throw IllegalArgumentException if the value is null . |
21,174 | public static void keyPresent ( final String key , final Map < String , ? > map ) { if ( ! map . containsKey ( key ) ) { throw new IllegalStateException ( String . format ( "expected %s to be present" , key ) ) ; } } | Throw IllegalStateException if key is not present in map . |
21,175 | public static BsonDocument copyOfDocument ( final BsonDocument document ) { final BsonDocument newDocument = new BsonDocument ( ) ; for ( final Map . Entry < String , BsonValue > kv : document . entrySet ( ) ) { newDocument . put ( kv . getKey ( ) , kv . getValue ( ) ) ; } return newDocument ; } | Returns a copy of the given document . |
21,176 | public static < T > ConflictHandler < T > localWins ( ) { return new ConflictHandler < T > ( ) { public T resolveConflict ( final BsonValue documentId , final ChangeEvent < T > localEvent , final ChangeEvent < T > remoteEvent ) { return localEvent . getFullDocument ( ) ; } } ; } | The local event will decide the next state of the document in question . |
21,177 | public static boolean isTodoItem ( final Document todoItemDoc ) { return todoItemDoc . containsKey ( ID_KEY ) && todoItemDoc . containsKey ( TASK_KEY ) && todoItemDoc . containsKey ( CHECKED_KEY ) ; } | Returns if a MongoDB document is a todo item . |
21,178 | void recover ( ) { final List < NamespaceSynchronizationConfig > nsConfigs = new ArrayList < > ( ) ; for ( final MongoNamespace ns : this . syncConfig . getSynchronizedNamespaces ( ) ) { nsConfigs . add ( this . syncConfig . getNamespaceConfig ( ns ) ) ; } for ( final NamespaceSynchronizationConfig nsConfig : nsConfigs ) { nsConfig . getLock ( ) . writeLock ( ) . lock ( ) ; } try { for ( final NamespaceSynchronizationConfig nsConfig : nsConfigs ) { nsConfig . getLock ( ) . writeLock ( ) . lock ( ) ; try { recoverNamespace ( nsConfig ) ; } finally { nsConfig . getLock ( ) . writeLock ( ) . unlock ( ) ; } } } finally { for ( final NamespaceSynchronizationConfig nsConfig : nsConfigs ) { nsConfig . getLock ( ) . writeLock ( ) . unlock ( ) ; } } } | Recovers the state of synchronization in case a system failure happened . The goal is to revert to a known good state . |
21,179 | private void recoverNamespace ( final NamespaceSynchronizationConfig nsConfig ) { final MongoCollection < BsonDocument > undoCollection = getUndoCollection ( nsConfig . getNamespace ( ) ) ; final MongoCollection < BsonDocument > localCollection = getLocalCollection ( nsConfig . getNamespace ( ) ) ; final List < BsonDocument > undoDocs = undoCollection . find ( ) . into ( new ArrayList < > ( ) ) ; final Set < BsonValue > recoveredIds = new HashSet < > ( ) ; for ( final BsonDocument undoDoc : undoDocs ) { final BsonValue documentId = BsonUtils . getDocumentId ( undoDoc ) ; final BsonDocument filter = getDocumentIdFilter ( documentId ) ; localCollection . findOneAndReplace ( filter , undoDoc , new FindOneAndReplaceOptions ( ) . upsert ( true ) ) ; recoveredIds . add ( documentId ) ; } for ( final CoreDocumentSynchronizationConfig docConfig : nsConfig . getSynchronizedDocuments ( ) ) { final BsonValue documentId = docConfig . getDocumentId ( ) ; final BsonDocument filter = getDocumentIdFilter ( documentId ) ; if ( recoveredIds . contains ( docConfig . getDocumentId ( ) ) ) { final ChangeEvent < BsonDocument > pendingWrite = docConfig . getLastUncommittedChangeEvent ( ) ; if ( pendingWrite != null ) { switch ( pendingWrite . getOperationType ( ) ) { case INSERT : case UPDATE : case REPLACE : localCollection . findOneAndReplace ( filter , pendingWrite . getFullDocument ( ) , new FindOneAndReplaceOptions ( ) . upsert ( true ) ) ; break ; case DELETE : localCollection . deleteOne ( filter ) ; break ; default : throw new IllegalStateException ( "there should not be a pending write with an unknown event type" ) ; } } } } for ( final BsonValue recoveredId : recoveredIds ) { undoCollection . deleteOne ( getDocumentIdFilter ( recoveredId ) ) ; } localCollection . deleteMany ( new BsonDocument ( "_id" , new BsonDocument ( "$nin" , new BsonArray ( new ArrayList < > ( this . syncConfig . getSynchronizedDocumentIds ( nsConfig . getNamespace ( ) ) ) ) ) ) ) ; } | Recovers the state of synchronization for a namespace in case a system failure happened . The goal is to revert the namespace to a known good state . This method itself is resilient to failures since it doesn t delete any documents from the undo collection until the collection is in the desired state with respect to those documents . |
21,180 | public void wipeInMemorySettings ( ) { this . waitUntilInitialized ( ) ; syncLock . lock ( ) ; try { this . instanceChangeStreamListener . stop ( ) ; if ( instancesColl . find ( ) . first ( ) == null ) { throw new IllegalStateException ( "expected to find instance configuration" ) ; } this . syncConfig = new InstanceSynchronizationConfig ( configDb ) ; this . instanceChangeStreamListener = new InstanceChangeStreamListenerImpl ( syncConfig , service , networkMonitor , authMonitor ) ; this . isConfigured = false ; this . stop ( ) ; } finally { syncLock . unlock ( ) ; } } | Reloads the synchronization config . This wipes all in - memory synchronization settings . |
21,181 | public void start ( ) { syncLock . lock ( ) ; try { if ( ! this . isConfigured ) { return ; } instanceChangeStreamListener . stop ( ) ; if ( listenersEnabled ) { instanceChangeStreamListener . start ( ) ; } if ( syncThread == null ) { syncThread = new Thread ( new DataSynchronizerRunner ( new WeakReference < > ( this ) , networkMonitor , logger ) , "dataSynchronizerRunnerThread" ) ; } if ( syncThreadEnabled && ! isRunning ) { syncThread . start ( ) ; isRunning = true ; } } finally { syncLock . unlock ( ) ; } } | Starts data synchronization in a background thread . |
21,182 | public void stop ( ) { syncLock . lock ( ) ; try { if ( syncThread == null ) { return ; } instanceChangeStreamListener . stop ( ) ; syncThread . interrupt ( ) ; try { syncThread . join ( ) ; } catch ( final InterruptedException e ) { return ; } syncThread = null ; isRunning = false ; } finally { syncLock . unlock ( ) ; } } | Stops the background data synchronization thread . |
21,183 | public void close ( ) { this . waitUntilInitialized ( ) ; this . ongoingOperationsGroup . blockAndWait ( ) ; syncLock . lock ( ) ; try { if ( this . networkMonitor != null ) { this . networkMonitor . removeNetworkStateListener ( this ) ; } this . dispatcher . close ( ) ; stop ( ) ; this . localClient . close ( ) ; } finally { syncLock . unlock ( ) ; } } | Stops the background data synchronization thread and releases the local client . |
21,184 | public boolean doSyncPass ( ) { if ( ! this . isConfigured || ! syncLock . tryLock ( ) ) { return false ; } try { if ( logicalT == Long . MAX_VALUE ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( "reached max logical time; resetting back to 0" ) ; } logicalT = 0 ; } logicalT ++ ; if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( Locale . US , "t='%d': doSyncPass START" , logicalT ) ) ; } if ( networkMonitor == null || ! networkMonitor . isConnected ( ) ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( Locale . US , "t='%d': doSyncPass END - Network disconnected" , logicalT ) ) ; } return false ; } if ( authMonitor == null || ! authMonitor . tryIsLoggedIn ( ) ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( Locale . US , "t='%d': doSyncPass END - Logged out" , logicalT ) ) ; } return false ; } syncRemoteToLocal ( ) ; syncLocalToRemote ( ) ; if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( Locale . US , "t='%d': doSyncPass END" , logicalT ) ) ; } } catch ( InterruptedException e ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( Locale . US , "t='%d': doSyncPass INTERRUPTED" , logicalT ) ) ; } return false ; } finally { syncLock . unlock ( ) ; } return true ; } | Performs a single synchronization pass in both the local and remote directions ; the order of which does not matter . If switching the order produces different results after one pass then there is a bug . |
21,185 | private LocalSyncWriteModelContainer resolveConflict ( final NamespaceSynchronizationConfig nsConfig , final CoreDocumentSynchronizationConfig docConfig , final ChangeEvent < BsonDocument > remoteEvent ) { return resolveConflict ( nsConfig , docConfig , docConfig . getLastUncommittedChangeEvent ( ) , remoteEvent ) ; } | Resolves a conflict between a synchronized document s local and remote state . The resolution will result in either the document being desynchronized or being replaced with some resolved state based on the conflict resolver specified for the document . Uses the last uncommitted local event as the local state . |
21,186 | @ SuppressWarnings ( "unchecked" ) private static Object resolveConflictWithResolver ( final ConflictHandler conflictResolver , final BsonValue documentId , final ChangeEvent localEvent , final ChangeEvent remoteEvent ) { return conflictResolver . resolveConflict ( documentId , localEvent , remoteEvent ) ; } | Returns the resolution of resolving the conflict between a local and remote event using the given conflict resolver . |
21,187 | public void addWatcher ( final MongoNamespace namespace , final Callback < ChangeEvent < BsonDocument > , Object > watcher ) { instanceChangeStreamListener . addWatcher ( namespace , watcher ) ; } | Queues up a callback to be removed and invoked on the next change event . |
21,188 | public Set < CoreDocumentSynchronizationConfig > getSynchronizedDocuments ( final MongoNamespace namespace ) { this . waitUntilInitialized ( ) ; try { ongoingOperationsGroup . enter ( ) ; return this . syncConfig . getSynchronizedDocuments ( namespace ) ; } finally { ongoingOperationsGroup . exit ( ) ; } } | Returns the set of synchronized documents in a namespace . |
21,189 | public Set < BsonValue > getPausedDocumentIds ( final MongoNamespace namespace ) { this . waitUntilInitialized ( ) ; try { ongoingOperationsGroup . enter ( ) ; final Set < BsonValue > pausedDocumentIds = new HashSet < > ( ) ; for ( final CoreDocumentSynchronizationConfig config : this . syncConfig . getSynchronizedDocuments ( namespace ) ) { if ( config . isPaused ( ) ) { pausedDocumentIds . add ( config . getDocumentId ( ) ) ; } } return pausedDocumentIds ; } finally { ongoingOperationsGroup . exit ( ) ; } } | Return the set of synchronized document _ids in a namespace that have been paused due to an irrecoverable error . |
21,190 | boolean resumeSyncForDocument ( final MongoNamespace namespace , final BsonValue documentId ) { if ( namespace == null || documentId == null ) { return false ; } final NamespaceSynchronizationConfig namespaceSynchronizationConfig ; final CoreDocumentSynchronizationConfig config ; if ( ( namespaceSynchronizationConfig = syncConfig . getNamespaceConfig ( namespace ) ) == null || ( config = namespaceSynchronizationConfig . getSynchronizedDocument ( documentId ) ) == null ) { return false ; } config . setPaused ( false ) ; return ! config . isPaused ( ) ; } | A document that is paused no longer has remote updates applied to it . Any local updates to this document cause it to be resumed . An example of pausing a document is when a conflict is being resolved for that document and the handler throws an exception . |
21,191 | void insertOne ( final MongoNamespace namespace , final BsonDocument document ) { this . waitUntilInitialized ( ) ; try { ongoingOperationsGroup . enter ( ) ; final BsonDocument docForStorage = sanitizeDocument ( document ) ; final NamespaceSynchronizationConfig nsConfig = this . syncConfig . getNamespaceConfig ( namespace ) ; final Lock lock = nsConfig . getLock ( ) . writeLock ( ) ; lock . lock ( ) ; final ChangeEvent < BsonDocument > event ; final BsonValue documentId ; try { getLocalCollection ( namespace ) . insertOne ( docForStorage ) ; documentId = BsonUtils . getDocumentId ( docForStorage ) ; event = ChangeEvents . changeEventForLocalInsert ( namespace , docForStorage , true ) ; final CoreDocumentSynchronizationConfig config = syncConfig . addAndGetSynchronizedDocument ( namespace , documentId ) ; config . setSomePendingWritesAndSave ( logicalT , event ) ; } finally { lock . unlock ( ) ; } checkAndInsertNamespaceListener ( namespace ) ; eventDispatcher . emitEvent ( nsConfig , event ) ; } finally { ongoingOperationsGroup . exit ( ) ; } } | Inserts a single document locally and being to synchronize it based on its _id . Inserting a document with the same _id twice will result in a duplicate key exception . |
21,192 | DeleteResult deleteMany ( final MongoNamespace namespace , final Bson filter ) { this . waitUntilInitialized ( ) ; try { ongoingOperationsGroup . enter ( ) ; final List < ChangeEvent < BsonDocument > > eventsToEmit = new ArrayList < > ( ) ; final DeleteResult result ; final NamespaceSynchronizationConfig nsConfig = this . syncConfig . getNamespaceConfig ( namespace ) ; final Lock lock = nsConfig . getLock ( ) . writeLock ( ) ; lock . lock ( ) ; try { final MongoCollection < BsonDocument > localCollection = getLocalCollection ( namespace ) ; final MongoCollection < BsonDocument > undoCollection = getUndoCollection ( namespace ) ; final Set < BsonValue > idsToDelete = localCollection . find ( filter ) . map ( new Function < BsonDocument , BsonValue > ( ) { public BsonValue apply ( final BsonDocument bsonDocument ) { undoCollection . insertOne ( bsonDocument ) ; return BsonUtils . getDocumentId ( bsonDocument ) ; } } ) . into ( new HashSet < > ( ) ) ; result = localCollection . deleteMany ( filter ) ; for ( final BsonValue documentId : idsToDelete ) { final CoreDocumentSynchronizationConfig config = syncConfig . getSynchronizedDocument ( namespace , documentId ) ; if ( config == null ) { continue ; } final ChangeEvent < BsonDocument > event = ChangeEvents . changeEventForLocalDelete ( namespace , documentId , true ) ; if ( config . getLastUncommittedChangeEvent ( ) != null && config . getLastUncommittedChangeEvent ( ) . getOperationType ( ) == OperationType . INSERT ) { desyncDocumentsFromRemote ( nsConfig , config . getDocumentId ( ) ) . commitAndClear ( ) ; undoCollection . deleteOne ( getDocumentIdFilter ( documentId ) ) ; continue ; } config . setSomePendingWritesAndSave ( logicalT , event ) ; undoCollection . deleteOne ( getDocumentIdFilter ( documentId ) ) ; eventsToEmit . add ( event ) ; } checkAndDeleteNamespaceListener ( namespace ) ; } finally { lock . unlock ( ) ; } for ( final ChangeEvent < BsonDocument > event : eventsToEmit ) { eventDispatcher . emitEvent ( nsConfig , event ) ; } return result ; } finally { ongoingOperationsGroup . exit ( ) ; } } | Removes all documents from the collection that match the given query filter . If no documents match the collection is not modified . |
21,193 | MongoCollection < BsonDocument > getUndoCollection ( final MongoNamespace namespace ) { return localClient . getDatabase ( String . format ( "sync_undo_%s" , namespace . getDatabaseName ( ) ) ) . getCollection ( namespace . getCollectionName ( ) , BsonDocument . class ) . withCodecRegistry ( MongoClientSettings . getDefaultCodecRegistry ( ) ) ; } | Returns the undo collection representing the given namespace for recording documents that may need to be reverted after a system failure . |
21,194 | private < T > MongoCollection < T > getLocalCollection ( final MongoNamespace namespace , final Class < T > resultClass , final CodecRegistry codecRegistry ) { return localClient . getDatabase ( String . format ( "sync_user_%s" , namespace . getDatabaseName ( ) ) ) . getCollection ( namespace . getCollectionName ( ) , resultClass ) . withCodecRegistry ( codecRegistry ) ; } | Returns the local collection representing the given namespace . |
21,195 | MongoCollection < BsonDocument > getLocalCollection ( final MongoNamespace namespace ) { return getLocalCollection ( namespace , BsonDocument . class , MongoClientSettings . getDefaultCodecRegistry ( ) ) ; } | Returns the local collection representing the given namespace for raw document operations . |
21,196 | private < T > CoreRemoteMongoCollection < T > getRemoteCollection ( final MongoNamespace namespace , final Class < T > resultClass ) { return remoteClient . getDatabase ( namespace . getDatabaseName ( ) ) . getCollection ( namespace . getCollectionName ( ) , resultClass ) ; } | Returns the remote collection representing the given namespace . |
21,197 | static BsonDocument sanitizeDocument ( final BsonDocument document ) { if ( document == null ) { return null ; } if ( document . containsKey ( DOCUMENT_VERSION_FIELD ) ) { final BsonDocument clonedDoc = document . clone ( ) ; clonedDoc . remove ( DOCUMENT_VERSION_FIELD ) ; return clonedDoc ; } return document ; } | Given a BSON document remove any forbidden fields and return the document . If no changes are made the original document reference is returned . If changes are made a cloned copy of the document with the changes will be returned . |
21,198 | private static BsonDocument withNewVersion ( final BsonDocument document , final BsonDocument newVersion ) { final BsonDocument newDocument = BsonUtils . copyOfDocument ( document ) ; newDocument . put ( DOCUMENT_VERSION_FIELD , newVersion ) ; return newDocument ; } | Adds and returns a document with a new version to the given document . |
21,199 | public static void clearallLocalDBs ( ) { for ( final Map . Entry < MongoClient , Boolean > entry : localInstances . entrySet ( ) ) { for ( final String dbName : entry . getKey ( ) . listDatabaseNames ( ) ) { entry . getKey ( ) . getDatabase ( dbName ) . drop ( ) ; } } } | Helper function that drops all local databases for every client . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.