idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
17,700
private void addTenantProperties ( TenantDefinition tenantDef ) { tenantDef . setProperty ( CREATED_ON_PROP , Utils . formatDate ( new Date ( ) . getTime ( ) ) ) ; tenantDef . setProperty ( MODIFIED_ON_PROP , tenantDef . getProperty ( CREATED_ON_PROP ) ) ; }
Set system - defined properties for a new tenant .
78
10
17,701
private void validateTenantUsers ( TenantDefinition tenantDef ) { for ( UserDefinition userDef : tenantDef . getUsers ( ) . values ( ) ) { Utils . require ( ! Utils . isEmpty ( userDef . getPassword ( ) ) , "Password is required; user ID=" + userDef . getID ( ) ) ; userDef . setHash ( PasswordManager . hash ( userDef . getPassword ( ) ) ) ; userDef . setPassword ( null ) ; } }
hashed format .
105
4
17,702
private void storeTenantDefinition ( TenantDefinition tenantDef ) { String tenantDefJSON = tenantDef . toDoc ( ) . toJSON ( ) ; DBTransaction dbTran = DBService . instance ( ) . startTransaction ( ) ; dbTran . addColumn ( TENANTS_STORE_NAME , tenantDef . getName ( ) , TENANT_DEF_COL_NAME , tenantDefJSON ) ; DBService . instance ( ) . commit ( dbTran ) ; }
Store the given tenant definition the Tenants table in the default database .
109
14
17,703
private boolean isValidTenantUserAccess ( Tenant tenant , String userid , String password , Permission permNeeded ) { TenantDefinition tenantDef = tenant . getDefinition ( ) ; assert tenantDef != null ; if ( tenantDef . getUsers ( ) . size ( ) == 0 ) { // We allow access to the default tenant when it has no users defined. return tenant . getName ( ) . equals ( m_defaultTenantName ) ; } UserDefinition userDef = tenantDef . getUser ( userid ) ; if ( userDef == null || Utils . isEmpty ( password ) ) { return false ; // no such user or no password given } if ( userDef . getHash ( ) != null ) { if ( ! PasswordManager . checkPassword ( password , userDef . getHash ( ) ) ) { return false ; // password is hashed but didn't match } } else { if ( ! password . equals ( userDef . getPassword ( ) ) ) { return false ; // password is plaintext but didn't match } } return isValidUserAccess ( userDef , permNeeded ) ; }
Validate the given user ID and password .
237
9
17,704
private boolean isValidUserAccess ( UserDefinition userDef , Permission permNeeded ) { Set < Permission > permList = userDef . getPermissions ( ) ; if ( permList . size ( ) == 0 || permList . contains ( Permission . ALL ) ) { return true ; } switch ( permNeeded ) { case APPEND : return permList . contains ( Permission . APPEND ) || permList . contains ( Permission . UPDATE ) ; case READ : return permList . contains ( Permission . READ ) ; case UPDATE : return permList . contains ( Permission . UPDATE ) ; default : return false ; } }
Validate user s permission vs . the given required permission .
135
12
17,705
private TenantDefinition getTenantDef ( String tenantName ) { DRow tenantDefRow = DBService . instance ( ) . getRow ( TENANTS_STORE_NAME , tenantName ) ; if ( tenantDefRow == null ) { return null ; } return loadTenantDefinition ( tenantDefRow ) ; }
Get the TenantDefinition for the given tenant . Return null if unknown .
71
15
17,706
private Map < String , TenantDefinition > getAllTenantDefs ( ) { Map < String , TenantDefinition > tenantMap = new HashMap <> ( ) ; Iterable < DRow > rowIter = DBService . instance ( ) . getAllRows ( TENANTS_STORE_NAME ) ; for ( DRow row : rowIter ) { TenantDefinition tenantDef = loadTenantDefinition ( row ) ; if ( tenantDef != null ) { tenantMap . put ( tenantDef . getName ( ) , tenantDef ) ; } } return tenantMap ; }
Get all tenants including the default tenant .
127
8
17,707
private TenantDefinition loadTenantDefinition ( DRow tenantDefRow ) { String tenantName = tenantDefRow . getKey ( ) ; m_logger . debug ( "Loading definition for tenant: {}" , tenantName ) ; DColumn tenantDefCol = tenantDefRow . getColumn ( TENANT_DEF_COL_NAME ) ; if ( tenantDefCol == null ) { return null ; // Not a valid Doradus tenant. } String tenantDefJSON = tenantDefCol . getValue ( ) ; TenantDefinition tenantDef = new TenantDefinition ( ) ; try { tenantDef . parse ( UNode . parseJSON ( tenantDefJSON ) ) ; Utils . require ( tenantDef . getName ( ) . equals ( tenantName ) , "Tenant definition (%s) did not match row name (%s)" , tenantDef . getName ( ) , tenantName ) ; } catch ( Exception e ) { m_logger . warn ( "Skipping malformed tenant definition; tenant=" + tenantName , e ) ; return null ; } return tenantDef ; }
Load a TenantDefinition from the Applications table .
229
10
17,708
private void validateTenantUpdate ( TenantDefinition oldTenantDef , TenantDefinition newTenantDef ) { Utils . require ( oldTenantDef . getName ( ) . equals ( newTenantDef . getName ( ) ) , "Tenant name cannot be changed: %s" , newTenantDef . getName ( ) ) ; Map < String , Object > oldDBServiceOpts = oldTenantDef . getOptionMap ( "DBService" ) ; String oldDBService = oldDBServiceOpts == null ? null : ( String ) oldDBServiceOpts . get ( "dbservice" ) ; Map < String , Object > newDBServiceOpts = newTenantDef . getOptionMap ( "DBService" ) ; String newDBService = newDBServiceOpts == null ? null : ( String ) newDBServiceOpts . get ( "dbservice" ) ; Utils . require ( ( oldDBService == null && newDBService == null ) || oldDBService . equals ( newDBService ) , "'DBService.dbservice' parameter cannot be changed: tenant=%s, previous=%s, new=%s" , newTenantDef . getName ( ) , oldDBService , newDBService ) ; }
Validate that the given modifications are allowed ; throw any transgressions found .
311
15
17,709
private void removeUserHashes ( TenantDefinition tenantDef ) { for ( UserDefinition userDef : tenantDef . getUsers ( ) . values ( ) ) { userDef . setHash ( null ) ; } }
Remove hash value from user definitions .
45
7
17,710
@ Override public void deleteApplication ( ApplicationDefinition appDef ) { checkServiceState ( ) ; deleteApplicationCFs ( appDef ) ; m_shardCache . clear ( appDef ) ; }
Delete all CFs used by the given application .
42
10
17,711
@ Override public void initializeApplication ( ApplicationDefinition oldAppDef , ApplicationDefinition appDef ) { checkServiceState ( ) ; verifyApplicationCFs ( oldAppDef , appDef ) ; }
Create all CFs needed for the given application .
40
10
17,712
private String getDataAgingFreq ( TableDefinition tableDef ) { if ( Utils . isEmpty ( tableDef . getOption ( CommonDefs . OPT_AGING_FIELD ) ) ) { return null ; } String dataAgingFreq = tableDef . getOption ( CommonDefs . OPT_AGING_CHECK_FREQ ) ; if ( ! Utils . isEmpty ( dataAgingFreq ) ) { return dataAgingFreq ; } return tableDef . getAppDef ( ) . getOption ( CommonDefs . OPT_AGING_CHECK_FREQ ) ; }
frequency is defined at either the table or application level .
131
11
17,713
public DBObject getObject ( TableDefinition tableDef , String objID ) { checkServiceState ( ) ; String storeName = objectsStoreName ( tableDef ) ; Tenant tenant = Tenant . getTenant ( tableDef ) ; Iterator < DColumn > colIter = DBService . instance ( tenant ) . getAllColumns ( storeName , objID ) . iterator ( ) ; if ( ! colIter . hasNext ( ) ) { return null ; } DBObject dbObj = createObject ( tableDef , objID , colIter ) ; addShardedLinkValues ( tableDef , dbObj ) ; return dbObj ; }
Get all scalar and link fields for the object in the given table with the given ID .
136
19
17,714
public AggregateResult aggregateQuery ( TableDefinition tableDef , Aggregate aggParams ) { checkServiceState ( ) ; aggParams . execute ( ) ; return aggParams . getResult ( ) ; }
Perform an aggregate query on the given table using the given query parameters .
44
15
17,715
public BatchResult deleteBatch ( TableDefinition tableDef , DBObjectBatch batch ) { checkServiceState ( ) ; List < String > objIDs = new ArrayList <> ( ) ; for ( DBObject dbObj : batch . getObjects ( ) ) { Utils . require ( ! Utils . isEmpty ( dbObj . getObjectID ( ) ) , "All objects must have _ID defined" ) ; objIDs . add ( dbObj . getObjectID ( ) ) ; } BatchObjectUpdater batchUpdater = new BatchObjectUpdater ( tableDef ) ; return batchUpdater . deleteBatch ( objIDs ) ; }
Delete a batch of objects from the given table . All objects must have an ID assigned . Deleting an already - deleted object is a no - op .
144
31
17,716
public static String termIndexRowKey ( TableDefinition tableDef , DBObject dbObj , String fieldName , String term ) { StringBuilder termRecKey = new StringBuilder ( ) ; int shardNumber = tableDef . getShardNumber ( dbObj ) ; if ( shardNumber > 0 ) { termRecKey . append ( shardNumber ) ; termRecKey . append ( "/" ) ; } termRecKey . append ( FieldAnalyzer . makeTermKey ( fieldName , term ) ) ; return termRecKey . toString ( ) ; }
Create the Terms row key for the given table object field name and term .
118
15
17,717
public void verifyShard ( TableDefinition tableDef , int shardNumber ) { assert tableDef . isSharded ( ) ; assert shardNumber > 0 ; checkServiceState ( ) ; m_shardCache . verifyShard ( tableDef , shardNumber ) ; }
Get the starting date of the shard with the given number in the given sharded table . If the given table has not yet started the given shard null is returned .
59
35
17,718
public Map < Integer , Date > getShards ( TableDefinition tableDef ) { checkServiceState ( ) ; if ( tableDef . isSharded ( ) ) { return m_shardCache . getShardMap ( tableDef ) ; } else { return new HashMap <> ( ) ; } }
Get all known shards for the given table . Each shard is defined in a column in the _shards row of the table s Terms store . If the given table is not sharded an empty map is returned .
65
44
17,719
private TableDefinition addAutoTable ( ApplicationDefinition appDef , String tableName ) { m_logger . debug ( "Adding implicit table '{}' to application '{}'" , tableName , appDef . getAppName ( ) ) ; Tenant tenant = Tenant . getTenant ( appDef ) ; TableDefinition tableDef = new TableDefinition ( appDef ) ; tableDef . setTableName ( tableName ) ; appDef . addTable ( tableDef ) ; SchemaService . instance ( ) . defineApplication ( tenant , appDef ) ; appDef = SchemaService . instance ( ) . getApplication ( tenant , appDef . getAppName ( ) ) ; return appDef . getTableDef ( tableName ) ; }
Add an implicit table to the given application and return its new TableDefinition .
157
15
17,720
private void addShardedLinkValues ( TableDefinition tableDef , DBObject dbObj ) { for ( FieldDefinition fieldDef : tableDef . getFieldDefinitions ( ) ) { if ( fieldDef . isLinkField ( ) && fieldDef . isSharded ( ) ) { TableDefinition extentTableDef = tableDef . getLinkExtentTableDef ( fieldDef ) ; Set < Integer > shardNums = getShards ( extentTableDef ) . keySet ( ) ; Set < String > values = getShardedLinkValues ( dbObj . getObjectID ( ) , fieldDef , shardNums ) ; dbObj . addFieldValues ( fieldDef . getName ( ) , values ) ; } } }
Add sharded link values if any to the given DBObject .
152
13
17,721
private void deleteApplicationCFs ( ApplicationDefinition appDef ) { Tenant tenant = Tenant . getTenant ( appDef ) ; for ( TableDefinition tableDef : appDef . getTableDefinitions ( ) . values ( ) ) { DBService . instance ( tenant ) . deleteStoreIfPresent ( objectsStoreName ( tableDef ) ) ; DBService . instance ( tenant ) . deleteStoreIfPresent ( termsStoreName ( tableDef ) ) ; } }
actually exist in case a previous delete - app failed .
100
11
17,722
private Set < String > getShardedLinkValues ( String objID , FieldDefinition linkDef , Set < Integer > shardNums ) { Set < String > values = new HashSet < String > ( ) ; if ( shardNums . size ( ) == 0 ) { return values ; } // Construct row keys for the link's possible Terms records. Set < String > termRowKeys = new HashSet < String > ( ) ; for ( Integer shardNumber : shardNums ) { termRowKeys . add ( shardedLinkTermRowKey ( linkDef , objID , shardNumber ) ) ; } TableDefinition tableDef = linkDef . getTableDef ( ) ; String termStore = termsStoreName ( tableDef ) ; Tenant tenant = Tenant . getTenant ( tableDef ) ; for ( DRow row : DBService . instance ( tenant ) . getRows ( termStore , termRowKeys ) ) { for ( DColumn column : row . getAllColumns ( 1024 ) ) { values . add ( column . getName ( ) ) ; } } return values ; }
Get all target object IDs for the given sharded link .
236
12
17,723
private boolean isValidShardDate ( String shardDate ) { try { // If the format is invalid, a ParseException is thrown. Utils . dateFromString ( shardDate ) ; return true ; } catch ( IllegalArgumentException ex ) { return false ; } }
is bad just return false .
60
6
17,724
private void validateBooleanOption ( String optName , String optValue ) { if ( ! optValue . equalsIgnoreCase ( "true" ) && ! optValue . equalsIgnoreCase ( "false" ) ) { throw new IllegalArgumentException ( "Boolean value expected for '" + optName + "' option: " + optValue ) ; } }
Validate that the given string is a valid Booleab value .
77
14
17,725
private void validateField ( FieldDefinition fieldDef ) { Utils . require ( ! fieldDef . isXLinkField ( ) , "Xlink fields are not allowed in Spider applications" ) ; // Validate scalar field analyzer. if ( fieldDef . isScalarField ( ) ) { String analyzerName = fieldDef . getAnalyzerName ( ) ; if ( Utils . isEmpty ( analyzerName ) ) { analyzerName = FieldType . getDefaultAnalyzer ( fieldDef . getType ( ) ) ; fieldDef . setAnalyzer ( analyzerName ) ; } FieldAnalyzer . verifyAnalyzer ( fieldDef ) ; } }
Validate the given field against SpiderService - specific constraints .
140
12
17,726
private void validateTable ( TableDefinition tableDef ) { for ( String optName : tableDef . getOptionNames ( ) ) { String optValue = tableDef . getOption ( optName ) ; switch ( optName ) { case CommonDefs . OPT_AGING_FIELD : validateTableOptionAgingField ( tableDef , optValue ) ; break ; case CommonDefs . OPT_RETENTION_AGE : validateTableOptionRetentionAge ( tableDef , optValue ) ; break ; case CommonDefs . OPT_SHARDING_FIELD : validateTableOptionShardingField ( tableDef , optValue ) ; break ; case CommonDefs . OPT_SHARDING_GRANULARITY : validateTableOptionShardingGranularity ( tableDef , optValue ) ; break ; case CommonDefs . OPT_SHARDING_START : validateTableOptionShardingStart ( tableDef , optValue ) ; break ; case CommonDefs . OPT_AGING_CHECK_FREQ : validateTableOptionAgingCheckFrequency ( tableDef , optValue ) ; break ; default : Utils . require ( false , "Unknown option for SpiderService table: " + optName ) ; } } for ( FieldDefinition fieldDef : tableDef . getFieldDefinitions ( ) ) { validateField ( fieldDef ) ; } if ( tableDef . getOption ( CommonDefs . OPT_AGING_FIELD ) != null && tableDef . getOption ( CommonDefs . OPT_AGING_CHECK_FREQ ) == null ) { String agingCheckFreq = tableDef . getAppDef ( ) . getOption ( CommonDefs . OPT_AGING_CHECK_FREQ ) ; if ( Utils . isEmpty ( agingCheckFreq ) ) { agingCheckFreq = "1 DAY" ; } tableDef . setOption ( CommonDefs . OPT_AGING_CHECK_FREQ , agingCheckFreq ) ; } }
Validate the given table against SpiderService - specific constraints .
417
12
17,727
private void validateTableOptionAgingCheckFrequency ( TableDefinition tableDef , String optValue ) { new TaskFrequency ( optValue ) ; Utils . require ( tableDef . getOption ( CommonDefs . OPT_AGING_FIELD ) != null , "Option 'aging-check-frequency' requires option 'aging-field'" ) ; }
Validate the table option aging - check - frequency
74
10
17,728
private void validateTableOptionAgingField ( TableDefinition tableDef , String optValue ) { FieldDefinition agingFieldDef = tableDef . getFieldDef ( optValue ) ; Utils . require ( agingFieldDef != null , "Aging field has not been defined: " + optValue ) ; assert agingFieldDef != null ; // Make FindBugs happy Utils . require ( agingFieldDef . getType ( ) == FieldType . TIMESTAMP , "Aging field must be a timestamp field: " + optValue ) ; Utils . require ( tableDef . getOption ( CommonDefs . OPT_RETENTION_AGE ) != null , "Option 'aging-field' requires option 'retention-age'" ) ; }
Validate the table option aging - field .
155
9
17,729
private void validateTableOptionRetentionAge ( TableDefinition tableDef , String optValue ) { RetentionAge retAge = new RetentionAge ( optValue ) ; // throws if invalid format optValue = retAge . toString ( ) ; tableDef . setOption ( CommonDefs . OPT_RETENTION_AGE , optValue ) ; // rewrite value Utils . require ( tableDef . getOption ( CommonDefs . OPT_AGING_FIELD ) != null , "Option 'retention-age' requires option 'aging-field'" ) ; }
Validate the table option retention - age .
117
9
17,730
private void validateTableOptionShardingField ( TableDefinition tableDef , String optValue ) { // Verify that the sharding-field exists and is a timestamp field. FieldDefinition shardingFieldDef = tableDef . getFieldDef ( optValue ) ; Utils . require ( shardingFieldDef != null , "Sharding field has not been defined: " + optValue ) ; assert shardingFieldDef != null ; // Make FindBugs happy Utils . require ( shardingFieldDef . getType ( ) == FieldType . TIMESTAMP , "Sharding field must be a timestamp field: " + optValue ) ; Utils . require ( ! shardingFieldDef . isCollection ( ) , "Sharding field cannot be a collection: " + optValue ) ; // Default sharding-granularity to MONTH. if ( tableDef . getOption ( CommonDefs . OPT_SHARDING_GRANULARITY ) == null ) { tableDef . setOption ( CommonDefs . OPT_SHARDING_GRANULARITY , "MONTH" ) ; } // Default sharding-start to "tomorrow". if ( tableDef . getOption ( CommonDefs . OPT_SHARDING_START ) == null ) { GregorianCalendar startDate = new GregorianCalendar ( Utils . UTC_TIMEZONE ) ; startDate . add ( Calendar . DAY_OF_MONTH , 1 ) ; // adds 1 day String startOpt = String . format ( "%04d-%02d-%02d" , startDate . get ( Calendar . YEAR ) , startDate . get ( Calendar . MONTH ) + 1 , // 0-relative! startDate . get ( Calendar . DAY_OF_MONTH ) ) ; tableDef . setOption ( CommonDefs . OPT_SHARDING_START , startOpt ) ; } }
Validate the table option sharding - field .
398
10
17,731
private void validateTableOptionShardingGranularity ( TableDefinition tableDef , String optValue ) { ShardingGranularity shardingGranularity = ShardingGranularity . fromString ( optValue ) ; Utils . require ( shardingGranularity != null , "Unrecognized 'sharding-granularity' value: " + optValue ) ; // 'sharding-granularity' requires 'sharding-field' Utils . require ( tableDef . getOption ( CommonDefs . OPT_SHARDING_FIELD ) != null , "Option 'sharding-granularity' requires option 'sharding-field'" ) ; }
Validate the table option sharding - granularity .
138
11
17,732
private void validateTableOptionShardingStart ( TableDefinition tableDef , String optValue ) { Utils . require ( isValidShardDate ( optValue ) , "'sharding-start' must be YYYY-MM-DD: " + optValue ) ; GregorianCalendar shardingStartDate = new GregorianCalendar ( Utils . UTC_TIMEZONE ) ; shardingStartDate . setTime ( Utils . dateFromString ( optValue ) ) ; // 'sharding-start' requires 'sharding-field' Utils . require ( tableDef . getOption ( CommonDefs . OPT_SHARDING_FIELD ) != null , "Option 'sharding-start' requires option 'sharding-field'" ) ; }
Validate the table option sharding - start .
160
10
17,733
private void verifyApplicationCFs ( ApplicationDefinition oldAppDef , ApplicationDefinition appDef ) { // Add new table-level CFs: Tenant tenant = Tenant . getTenant ( appDef ) ; DBService dbService = DBService . instance ( tenant ) ; for ( TableDefinition tableDef : appDef . getTableDefinitions ( ) . values ( ) ) { dbService . createStoreIfAbsent ( objectsStoreName ( tableDef ) , true ) ; dbService . createStoreIfAbsent ( termsStoreName ( tableDef ) , true ) ; } // Delete obsolete table-level CFs: if ( oldAppDef != null ) { for ( TableDefinition oldTableDef : oldAppDef . getTableDefinitions ( ) . values ( ) ) { if ( appDef . getTableDef ( oldTableDef . getTableName ( ) ) == null ) { dbService . deleteStoreIfPresent ( objectsStoreName ( oldTableDef ) ) ; dbService . deleteStoreIfPresent ( termsStoreName ( oldTableDef ) ) ; } } } }
Verify that all ColumnFamilies needed for the given application exist .
228
15
17,734
public long getDuration ( ) { Status s = status ; if ( s == Status . READY ) return 0 ; if ( s == Status . RUNNING ) return System . currentTimeMillis ( ) - startTime ; return finishTime - startTime ; }
The current or final time in milliseconds during which the job either is executed or was executed .
54
18
17,735
public synchronized RequestsTracker snapshot ( boolean reset ) { long t = executing . getCount ( ) ; long r = rejected . getCount ( ) ; long f = failed . getCount ( ) ; TimeCounter c = counter . snapshot ( reset ) ; IntervalHistogram h = histogram . snapshot ( reset ) ; if ( reset ) { executing . reset ( ) ; rejected . reset ( ) ; failed . reset ( ) ; succeeded . reset ( ) ; } return new RequestsTracker ( t , r , f , c , h , lastFailureReason , lastRejectReason ) ; }
Clones this tracker and zeroizes out it afterwards if the reset is true .
123
16
17,736
public synchronized void reset ( ) { executing . reset ( ) ; rejected . reset ( ) ; failed . reset ( ) ; succeeded . reset ( ) ; counter . reset ( ) ; histogram . reset ( ) ; lastFailureReason = null ; lastRejectReason = null ; }
Zeroizes out this tracker .
58
6
17,737
private int getCommonPart ( List < AggregationGroup > groups ) { if ( groups . size ( ) < 2 ) return 0 ; for ( int i = 0 ; i < groups . size ( ) ; i ++ ) { if ( groups . get ( i ) . filter != null ) return 0 ; } int itemsCount = groups . get ( 0 ) . items . size ( ) - 1 ; for ( int i = 1 ; i < groups . size ( ) ; i ++ ) { itemsCount = Math . min ( itemsCount , groups . get ( i ) . items . size ( ) - 1 ) ; } if ( itemsCount <= 0 ) return 0 ; int itemIndex = 0 ; for ( ; itemIndex < itemsCount ; itemIndex ++ ) { boolean eq = true ; AggregationGroupItem item = groups . get ( 0 ) . items . get ( itemIndex ) ; if ( item . xlinkContext != null ) break ; for ( int i = 1 ; i < groups . size ( ) ; i ++ ) { AggregationGroupItem item2 = groups . get ( i ) . items . get ( itemIndex ) ; if ( ! item . equals ( item2 ) || item . xlinkContext != null ) { eq = false ; break ; } } if ( ! eq ) break ; } if ( itemIndex > 0 ) { LOG . info ( "Found common path for groups: " + itemIndex ) ; } return itemIndex ; }
Support for common paths in groups
305
6
17,738
public void addLinkValue ( String ownerObjID , FieldDefinition linkDef , String targetObjID ) { addColumn ( SpiderService . objectsStoreName ( linkDef . getTableDef ( ) ) , ownerObjID , SpiderService . linkColumnName ( linkDef , targetObjID ) ) ; }
Add a link value column to the objects store of the given object ID .
63
15
17,739
public void addScalarValueColumn ( TableDefinition tableDef , String objID , String fieldName , String fieldValue ) { addColumn ( SpiderService . objectsStoreName ( tableDef ) , objID , fieldName , SpiderService . scalarValueToBinary ( tableDef , fieldName , fieldValue ) ) ; }
Add the column needed to add or replace the given scalar field belonging to the object with the given ID in the given table .
69
26
17,740
public void addShardedLinkValue ( String ownerObjID , FieldDefinition linkDef , String targetObjID , int targetShardNo ) { assert linkDef . isSharded ( ) ; assert targetShardNo > 0 ; addColumn ( SpiderService . termsStoreName ( linkDef . getTableDef ( ) ) , SpiderService . shardedLinkTermRowKey ( linkDef , ownerObjID , targetShardNo ) , targetObjID ) ; }
Add a link value column on behalf of the given owner object referencing the given target object ID . This is used when a link is sharded and the owner s shard number is > 0 . The link value column is added to a special term record .
97
51
17,741
public void deleteAllObjectsColumn ( TableDefinition tableDef , String objID , int shardNo ) { String rowKey = ALL_OBJECTS_ROW_KEY ; if ( shardNo > 0 ) { rowKey = shardNo + "/" + ALL_OBJECTS_ROW_KEY ; } deleteColumn ( SpiderService . termsStoreName ( tableDef ) , rowKey , objID ) ; }
Delete the all objects column with the given object ID from the given table .
93
15
17,742
public void deleteObjectRow ( TableDefinition tableDef , String objID ) { deleteRow ( SpiderService . objectsStoreName ( tableDef ) , objID ) ; }
Delete the primary field storage row for the given object . This usually called when the object is being deleted .
35
21
17,743
public void deleteScalarValueColumn ( TableDefinition tableDef , String objID , String fieldName ) { deleteColumn ( SpiderService . objectsStoreName ( tableDef ) , objID , fieldName ) ; }
Delete a scalar value column with the given field name for the given object ID from the given table .
45
21
17,744
public void deleteTermIndexColumn ( TableDefinition tableDef , DBObject dbObj , String fieldName , String term ) { deleteColumn ( SpiderService . termsStoreName ( tableDef ) , SpiderService . termIndexRowKey ( tableDef , dbObj , fieldName , term ) , dbObj . getObjectID ( ) ) ; }
Un - index the given term by deleting the Terms column for the given DBObject field name and term .
70
21
17,745
public void deleteLinkValue ( String ownerObjID , FieldDefinition linkDef , String targetObjID ) { deleteColumn ( SpiderService . objectsStoreName ( linkDef . getTableDef ( ) ) , ownerObjID , SpiderService . linkColumnName ( linkDef , targetObjID ) ) ; }
Delete a link value column in the object table of the given owning object .
63
15
17,746
public void deleteShardedLinkRow ( FieldDefinition linkDef , String owningObjID , int shardNumber ) { assert linkDef . isSharded ( ) ; assert shardNumber > 0 ; deleteRow ( SpiderService . termsStoreName ( linkDef . getTableDef ( ) ) , SpiderService . shardedLinkTermRowKey ( linkDef , owningObjID , shardNumber ) ) ; }
Delete the shard row for the given sharded link and shard number .
85
16
17,747
public void deleteShardedLinkValue ( String objID , FieldDefinition linkDef , String targetObjID , int shardNo ) { assert linkDef . isSharded ( ) ; assert shardNo > 0 ; deleteColumn ( SpiderService . termsStoreName ( linkDef . getTableDef ( ) ) , SpiderService . shardedLinkTermRowKey ( linkDef , objID , shardNo ) , targetObjID ) ; }
Delete a link value column in the Terms store for a sharded link .
92
15
17,748
private void addColumn ( String storeName , String rowKey , String colName ) { addColumn ( storeName , rowKey , colName , null ) ; }
Add the given column update with a null column value .
34
11
17,749
private void addColumn ( String storeName , String rowKey , String colName , byte [ ] colValue ) { Map < String , Map < String , byte [ ] > > rowMap = m_columnAdds . get ( storeName ) ; if ( rowMap == null ) { rowMap = new HashMap <> ( ) ; m_columnAdds . put ( storeName , rowMap ) ; } Map < String , byte [ ] > colMap = rowMap . get ( rowKey ) ; if ( colMap == null ) { colMap = new HashMap <> ( ) ; rowMap . put ( rowKey , colMap ) ; } byte [ ] oldValue = colMap . put ( colName , colValue ) ; if ( oldValue == null ) { m_totalUpdates ++ ; } else if ( ! Arrays . equals ( oldValue , colValue ) ) { m_logger . debug ( "Warning: duplicate column mutation with different value: " + "store={}, row={}, col={}, old={}, new={}" , new Object [ ] { storeName , rowKey , colName , oldValue , colValue } ) ; } }
Add the given column update ; the value may be null .
248
12
17,750
private void deleteColumn ( String storeName , String rowKey , String colName ) { Map < String , List < String > > rowMap = m_columnDeletes . get ( storeName ) ; if ( rowMap == null ) { rowMap = new HashMap <> ( ) ; m_columnDeletes . put ( storeName , rowMap ) ; } List < String > colNames = rowMap . get ( rowKey ) ; if ( colNames == null ) { colNames = new ArrayList <> ( ) ; rowMap . put ( rowKey , colNames ) ; } colNames . add ( colName ) ; m_totalUpdates ++ ; }
Add the given column deletion .
142
6
17,751
private void deleteColumns ( String storeName , String rowKey , Collection < String > colNames ) { Map < String , List < String > > rowMap = m_columnDeletes . get ( storeName ) ; if ( rowMap == null ) { rowMap = new HashMap <> ( ) ; m_columnDeletes . put ( storeName , rowMap ) ; } List < String > colList = rowMap . get ( rowKey ) ; if ( colList == null ) { colList = new ArrayList <> ( ) ; rowMap . put ( rowKey , colList ) ; } colList . addAll ( colNames ) ; m_totalUpdates += colNames . size ( ) ; }
Add column deletions for all given column names .
153
10
17,752
private void deleteRow ( String storeName , String rowKey ) { List < String > rowKeys = m_rowDeletes . get ( storeName ) ; if ( rowKeys == null ) { rowKeys = new ArrayList <> ( ) ; m_rowDeletes . put ( storeName , rowKeys ) ; } rowKeys . add ( rowKey ) ; m_totalUpdates ++ ; }
Add the following row deletion .
85
6
17,753
public static UNode createMapNode ( String name ) { return new UNode ( name , NodeType . MAP , null , false , "" ) ; }
Create a MAP UNode with the given node name .
32
11
17,754
public static UNode createArrayNode ( String name ) { return new UNode ( name , NodeType . ARRAY , null , false , "" ) ; }
Create an ARRAY UNode with the given node name .
33
12
17,755
public static UNode createValueNode ( String name , String value ) { String nodeValue = value == null ? "" : value ; return new UNode ( name , NodeType . VALUE , nodeValue , false , "" ) ; }
Create a VALUE UNode with the given node name and value .
49
14
17,756
public static UNode parse ( String text , ContentType contentType ) throws IllegalArgumentException { UNode result = null ; if ( contentType . isJSON ( ) ) { result = parseJSON ( text ) ; } else if ( contentType . isXML ( ) ) { result = parseXML ( text ) ; } else { Utils . require ( false , "Unsupported content-type: " + contentType ) ; } return result ; }
Parse the given text formatted with the given content - type into a UNode tree and return the root node .
96
23
17,757
public static UNode parse ( Reader reader , ContentType contentType ) throws IllegalArgumentException { UNode result = null ; if ( contentType . isJSON ( ) ) { result = parseJSON ( reader ) ; } else if ( contentType . isXML ( ) ) { result = parseXML ( reader ) ; } else { Utils . require ( false , "Unsupported content-type: " + contentType ) ; } return result ; }
Parse the text from the given character reader formatted with the given content - type into a UNode tree and return the root node . The reader is closed when finished .
96
34
17,758
public static UNode parseXML ( String text ) throws IllegalArgumentException { assert text != null && text . length ( ) > 0 ; // This throws if the XML is malformed. Element rootElem = Utils . parseXMLDocument ( text ) ; return parseXMLElement ( rootElem ) ; }
Parse the given XML text and return the appropriate UNode object . The UNode returned is a MAP whose child nodes are built from the attributes and child elements of the document s root element .
68
39
17,759
public static UNode parseXML ( Reader reader ) throws IllegalArgumentException { assert reader != null ; // This throws if the XML is malformed. Element rootElem = Utils . parseXMLDocument ( reader ) ; // Parse the root element and ensure it elligible as a map. UNode rootNode = parseXMLElement ( rootElem ) ; return rootNode ; }
Parse XML from the given Reader and return the appropriate UNode object . The UNode returned is a MAP whose child nodes are built from the attributes and child elements of the document s root element .
83
40
17,760
public UNode getMember ( int index ) { assert isCollection ( ) ; if ( m_children == null || index >= m_children . size ( ) ) { return null ; } return m_children . get ( index ) ; }
Get the child member with the given index . The node must be a MAP or ARRAY . Child members are retained in the order they are added . If the given index is out of bounds null is returned .
50
42
17,761
public Iterable < UNode > getMemberList ( ) { assert m_type == NodeType . MAP || m_type == NodeType . ARRAY ; if ( m_children == null ) { m_children = new ArrayList < UNode > ( ) ; } return m_children ; }
Get the list of child nodes of this collection UNode as an Iterable UNode object . The UNode must be a MAP or an ARRAY .
63
31
17,762
public String toJSON ( ) { JSONEmitter json = new JSONEmitter ( ) ; json . startDocument ( ) ; toJSON ( json ) ; json . endDocument ( ) ; return json . toString ( ) ; }
Convert the DOM tree rooted at this UNode into a JSON document .
48
15
17,763
public String toJSON ( boolean bPretty ) { int indent = bPretty ? 3 : 0 ; JSONEmitter json = new JSONEmitter ( indent ) ; json . startDocument ( ) ; toJSON ( json ) ; json . endDocument ( ) ; return json . toString ( ) ; }
Convert the DOM tree rooted at this UNode into a JSON document . Optionally format the text with indenting to make it look pretty .
62
29
17,764
public byte [ ] toCompressedJSON ( ) throws IOException { // Wrap a GZIPOuputStream around a ByteArrayOuputStream. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream ( ) ; GZIPOutputStream gzipOut = new GZIPOutputStream ( bytesOut ) ; // Wrap the GZIPOutputStream with an OutputStreamWriter that convers JSON Unicode // text to bytes using UTF-8. OutputStreamWriter writer = new OutputStreamWriter ( gzipOut , Utils . UTF8_CHARSET ) ; // Create a JSONEmitter that will write its output to the writer above and generate // the JSON output. JSONEmitter json = new JSONEmitter ( writer ) ; json . startDocument ( ) ; toJSON ( json ) ; json . endDocument ( ) ; // Ensure the output stream is flushed and the GZIP is finished, then the output // buffer is complete. writer . flush ( ) ; gzipOut . finish ( ) ; return bytesOut . toByteArray ( ) ; }
Convert the DOM tree rooted at this UNode into a JSON document compressed with GZIP .
222
20
17,765
public String toXML ( boolean bPretty ) throws IllegalArgumentException { int indent = bPretty ? 3 : 0 ; XMLBuilder xml = new XMLBuilder ( indent ) ; xml . startDocument ( ) ; toXML ( xml ) ; xml . endDocument ( ) ; return xml . toString ( ) ; }
Convert the DOM tree rooted at this UNode into an XML document optionally indenting each XML level to product a pretty structured output .
67
27
17,766
public void toXML ( XMLBuilder xml ) throws IllegalArgumentException { assert xml != null ; // Determine what tag name to use for the generated element. Map < String , String > attrMap = new LinkedHashMap <> ( ) ; String elemName = m_name ; if ( m_tagName . length ( ) > 0 ) { // Place m_name into a "name" attribute and use m_tagName attrMap . put ( "name" , m_name ) ; elemName = m_tagName ; } // Add child VALUE nodes marked as "attribute" in its own map. addXMLAttributes ( attrMap ) ; switch ( m_type ) { case ARRAY : // Start an element with or without attributes. if ( attrMap . size ( ) > 0 ) { xml . startElement ( elemName , attrMap ) ; } else { xml . startElement ( elemName ) ; } // Add XML for non-attribute child nodes. if ( m_children != null ) { for ( UNode childNode : m_children ) { if ( childNode . m_type != NodeType . VALUE || ! childNode . m_bAttribute ) { childNode . toXML ( xml ) ; } } } xml . endElement ( ) ; break ; case MAP : // Start an element with or without attributes. if ( attrMap . size ( ) > 0 ) { xml . startElement ( elemName , attrMap ) ; } else { xml . startElement ( elemName ) ; } // Add XML for non-attribute child nodes in name order. if ( m_childNodeMap != null ) { assert m_childNodeMap . size ( ) == m_children . size ( ) ; for ( UNode childNode : m_childNodeMap . values ( ) ) { if ( childNode . m_type != NodeType . VALUE || ! childNode . m_bAttribute ) { childNode . toXML ( xml ) ; } } } xml . endElement ( ) ; break ; case VALUE : // Map to a simple element. String value = m_value ; if ( Utils . containsIllegalXML ( value ) ) { value = Utils . base64FromString ( m_value ) ; attrMap . put ( "encoding" , "base64" ) ; } if ( attrMap . size ( ) > 0 ) { xml . addDataElement ( elemName , value , attrMap ) ; } else { xml . addDataElement ( elemName , value ) ; } break ; default : assert false : "Unexpected NodeType: " + m_type ; } }
Add the XML required for this node to the given XMLBuilder .
579
13
17,767
public void removeMember ( String childName ) { assert isMap ( ) : "'removeMember' allowed only for MAP nodes" ; if ( m_childNodeMap != null ) { // Remove from child name map and then list if found. UNode removeNode = m_childNodeMap . remove ( childName ) ; if ( removeNode != null ) { m_children . remove ( removeNode ) ; } } }
Delete the child node of this MAP node with the given name if it exists . This node must be a MAP . The child node name may or may not exist .
88
33
17,768
private void toJSON ( JSONEmitter json ) { switch ( m_type ) { case ARRAY : json . startArray ( m_name ) ; if ( m_children != null ) { for ( UNode childNode : m_children ) { if ( childNode . isMap ( ) ) { json . startObject ( ) ; childNode . toJSON ( json ) ; json . endObject ( ) ; } else { childNode . toJSON ( json ) ; } } } json . endArray ( ) ; break ; case MAP : // Return map child modes in name order. json . startGroup ( m_name ) ; if ( m_childNodeMap != null ) { assert m_childNodeMap . size ( ) == m_children . size ( ) ; for ( UNode childNode : m_childNodeMap . values ( ) ) { childNode . toJSON ( json ) ; } } json . endGroup ( ) ; break ; case VALUE : if ( m_bAltFormat && m_tagName != null ) { // Generate as "<tag>: {"<name>": "<value>"} json . startGroup ( m_tagName ) ; json . addValue ( m_name , m_value ) ; json . endGroup ( ) ; } else if ( m_parent != null && m_parent . isArray ( ) ) { if ( m_name . equals ( "value" ) ) { // nameless node: "<value>" json . addValue ( m_value ) ; } else { // value as an object: {"name": "value"} json . addObject ( m_name , m_value ) ; } } else { // Simple case: "<name>": "<value>" json . addValue ( m_name , m_value ) ; } break ; default : assert false : "Unknown NodeType: " + m_type ; } }
Add the appropriate JSON syntax for this UNode to the given JSONEmitter .
400
16
17,769
private static void parseXMLAttributes ( NamedNodeMap attrMap , List < UNode > childUNodeList ) { for ( int index = 0 ; index < attrMap . getLength ( ) ; index ++ ) { Attr attr = ( Attr ) attrMap . item ( index ) ; UNode childNode = createValueNode ( attr . getName ( ) , attr . getValue ( ) , true ) ; childUNodeList . add ( childNode ) ; } }
if to the given child node list .
107
8
17,770
private static boolean parseXMLChildElems ( Element elem , List < UNode > childUNodeList ) { assert elem != null ; assert childUNodeList != null ; // Scan for Element nodes (there could be Comment and other nodes). boolean bDupNodeNames = false ; Set < String > nodeNameSet = new HashSet < String > ( ) ; NodeList nodeList = elem . getChildNodes ( ) ; for ( int index = 0 ; index < nodeList . getLength ( ) ; index ++ ) { Node childNode = nodeList . item ( index ) ; if ( childNode instanceof Element ) { // Create the appropriate child UNode for this element. UNode childUNode = parseXMLElement ( ( Element ) childNode ) ; childUNodeList . add ( childUNode ) ; if ( nodeNameSet . contains ( childUNode . getName ( ) ) ) { bDupNodeNames = true ; } else { nodeNameSet . add ( childUNode . getName ( ) ) ; } } } return bDupNodeNames ; }
are found while scanning .
234
5
17,771
private void addXMLAttributes ( Map < String , String > attrMap ) { if ( m_children != null ) { for ( UNode childNode : m_children ) { // A child node must not contain a tag name to be considered an attribute. if ( childNode . m_type == NodeType . VALUE && childNode . m_bAttribute && Utils . isEmpty ( childNode . m_tagName ) ) { assert m_name != null && m_name . length ( ) > 0 ; attrMap . put ( childNode . m_name , childNode . m_value ) ; } } } }
Get the child nodes of this UNode that are VALUE nodes marked as attributes .
136
17
17,772
private void toStringTree ( StringBuilder builder , int indent ) { for ( int count = 0 ; count < indent ; count ++ ) { builder . append ( " " ) ; } builder . append ( this . toString ( ) ) ; builder . append ( "\n" ) ; if ( m_children != null ) { for ( UNode childNode : m_children ) { childNode . toStringTree ( builder , indent + 3 ) ; } } }
appended with a newline .
97
7
17,773
void deleteRow ( String storeName , Map < String , AttributeValue > key ) { String tableName = storeToTableName ( storeName ) ; m_logger . debug ( "Deleting row from table {}, key={}" , tableName , DynamoDBService . getDDBKey ( key ) ) ; Timer timer = new Timer ( ) ; boolean bSuccess = false ; for ( int attempts = 1 ; ! bSuccess ; attempts ++ ) { try { m_ddbClient . deleteItem ( tableName , key ) ; if ( attempts > 1 ) { m_logger . info ( "deleteRow() succeeded on attempt #{}" , attempts ) ; } bSuccess = true ; m_logger . debug ( "Time to delete table {}, key={}: {}" , new Object [ ] { tableName , DynamoDBService . getDDBKey ( key ) , timer . toString ( ) } ) ; } catch ( ProvisionedThroughputExceededException e ) { if ( attempts >= m_max_commit_attempts ) { String errMsg = "All retries exceeded; abandoning deleteRow() for table: " + tableName ; m_logger . error ( errMsg , e ) ; throw new RuntimeException ( errMsg , e ) ; } m_logger . warn ( "deleteRow() attempt #{} failed: {}" , attempts , e ) ; try { Thread . sleep ( attempts * m_retry_wait_millis ) ; } catch ( InterruptedException ex2 ) { // ignore } } } }
Delete row and back off if ProvisionedThroughputExceededException occurs .
339
16
17,774
private AWSCredentials getCredentials ( ) { String awsProfile = getParamString ( "aws_profile" ) ; if ( ! Utils . isEmpty ( awsProfile ) ) { m_logger . info ( "Using AWS profile: {}" , awsProfile ) ; ProfileCredentialsProvider credsProvider = null ; String awsCredentialsFile = getParamString ( "aws_credentials_file" ) ; if ( ! Utils . isEmpty ( awsCredentialsFile ) ) { credsProvider = new ProfileCredentialsProvider ( awsCredentialsFile , awsProfile ) ; } else { credsProvider = new ProfileCredentialsProvider ( awsProfile ) ; } return credsProvider . getCredentials ( ) ; } String awsAccessKey = getParamString ( "aws_access_key" ) ; Utils . require ( ! Utils . isEmpty ( awsAccessKey ) , "Either 'aws_profile' or 'aws_access_key' must be defined for tenant: " + m_tenant . getName ( ) ) ; String awsSecretKey = getParamString ( "aws_secret_key" ) ; Utils . require ( ! Utils . isEmpty ( awsSecretKey ) , "'aws_secret_key' must be defined when 'aws_access_key' is defined. " + "'aws_profile' is preferred over aws_access_key/aws_secret_key. Tenant: " + m_tenant . getName ( ) ) ; return new BasicAWSCredentials ( awsAccessKey , awsSecretKey ) ; }
Set the AWS credentials in m_ddbClient
359
10
17,775
private void setRegionOrEndPoint ( ) { String regionName = getParamString ( "ddb_region" ) ; if ( regionName != null ) { Regions regionEnum = Regions . fromName ( regionName ) ; Utils . require ( regionEnum != null , "Unknown 'ddb_region': " + regionName ) ; m_logger . info ( "Using region: {}" , regionName ) ; m_ddbClient . setRegion ( Region . getRegion ( regionEnum ) ) ; } else { String ddbEndpoint = getParamString ( "ddb_endpoint" ) ; Utils . require ( ddbEndpoint != null , "Either 'ddb_region' or 'ddb_endpoint' must be defined for tenant: " + m_tenant . getName ( ) ) ; m_logger . info ( "Using endpoint: {}" , ddbEndpoint ) ; m_ddbClient . setEndpoint ( ddbEndpoint ) ; } }
Set the region or endpoint in m_ddbClient
218
11
17,776
private void setDefaultCapacity ( ) { Object capacity = getParam ( "ddb_default_read_capacity" ) ; if ( capacity != null ) { READ_CAPACITY_UNITS = Integer . parseInt ( capacity . toString ( ) ) ; } capacity = getParam ( "ddb_default_write_capacity" ) ; if ( capacity != null ) { WRITE_CAPACITY_UNITS = Integer . parseInt ( capacity . toString ( ) ) ; } m_logger . info ( "Default table capacity: read={}, write={}" , READ_CAPACITY_UNITS , WRITE_CAPACITY_UNITS ) ; }
Set READ_CAPACITY_UNITS and WRITE_CAPACITY_UNITS if overridden .
143
22
17,777
private ScanResult scan ( ScanRequest scanRequest ) { m_logger . debug ( "Performing scan() request on table {}" , scanRequest . getTableName ( ) ) ; Timer timer = new Timer ( ) ; boolean bSuccess = false ; ScanResult scanResult = null ; for ( int attempts = 1 ; ! bSuccess ; attempts ++ ) { try { scanResult = m_ddbClient . scan ( scanRequest ) ; if ( attempts > 1 ) { m_logger . info ( "scan() succeeded on attempt #{}" , attempts ) ; } bSuccess = true ; m_logger . debug ( "Time to scan table {}: {}" , scanRequest . getTableName ( ) , timer . toString ( ) ) ; } catch ( ProvisionedThroughputExceededException e ) { if ( attempts >= m_max_read_attempts ) { String errMsg = "All retries exceeded; abandoning scan() for table: " + scanRequest . getTableName ( ) ; m_logger . error ( errMsg , e ) ; throw new RuntimeException ( errMsg , e ) ; } m_logger . warn ( "scan() attempt #{} failed: {}" , attempts , e ) ; try { Thread . sleep ( attempts * m_retry_wait_millis ) ; } catch ( InterruptedException ex2 ) { // ignore } } } return scanResult ; }
Perform a scan request and retry if ProvisionedThroughputExceededException occurs .
305
19
17,778
private List < DColumn > loadAttributes ( Map < String , AttributeValue > attributeMap , Predicate < String > colNamePredicate ) { List < DColumn > columns = new ArrayList <> ( ) ; if ( attributeMap != null ) { for ( Map . Entry < String , AttributeValue > mapEntry : attributeMap . entrySet ( ) ) { String colName = mapEntry . getKey ( ) ; if ( ! colName . equals ( DynamoDBService . ROW_KEY_ATTR_NAME ) && // Don't add row key attribute as a column colNamePredicate . test ( colName ) ) { AttributeValue attrValue = mapEntry . getValue ( ) ; if ( attrValue . getB ( ) != null ) { columns . add ( new DColumn ( colName , Utils . getBytes ( attrValue . getB ( ) ) ) ) ; } else if ( attrValue . getS ( ) != null ) { String value = attrValue . getS ( ) ; if ( value . equals ( DynamoDBService . NULL_COLUMN_MARKER ) ) { value = "" ; } columns . add ( new DColumn ( colName , value ) ) ; } else { throw new RuntimeException ( "Unknown AttributeValue type: " + attrValue ) ; } } } } // Sort or reverse sort column names. Collections . sort ( columns , new Comparator < DColumn > ( ) { @ Override public int compare ( DColumn col1 , DColumn col2 ) { return col1 . getName ( ) . compareTo ( col2 . getName ( ) ) ; } } ) ; return columns ; }
Filter store and sort attributes from the given map .
364
10
17,779
private void deleteTable ( String tableName ) { m_logger . info ( "Deleting table: {}" , tableName ) ; try { m_ddbClient . deleteTable ( new DeleteTableRequest ( tableName ) ) ; for ( int seconds = 0 ; seconds < 10 ; seconds ++ ) { try { m_ddbClient . describeTable ( tableName ) ; Thread . sleep ( 1000 ) ; } catch ( ResourceNotFoundException e ) { break ; // Success } // All other exceptions passed to outer try/catch } } catch ( ResourceNotFoundException e ) { // Already deleted. } catch ( Exception e ) { throw new RuntimeException ( "Error deleting table: " + tableName , e ) ; } }
Delete the given table and wait for it to be deleted .
155
12
17,780
public List < String > tokenize ( String text ) { List < String > tokens = new ArrayList < String > ( ) ; text = text . toLowerCase ( ) ; char [ ] array = text . toCharArray ( ) ; //convert all apostrophes to 0x27 for ( int i = 0 ; i < array . length ; i ++ ) { if ( isApostrofe ( array [ i ] ) ) array [ i ] = 0x27 ; } int pos = 0 ; //term cycle while ( pos < array . length ) { //scan to the start of the term while ( pos < array . length && ! Character . isLetterOrDigit ( array [ pos ] ) ) pos ++ ; int start = pos ; if ( start == array . length ) break ; //scan to the end of the term while ( pos < array . length && isLetterOrDigitOrApostrofe ( array [ pos ] ) ) pos ++ ; int newpos = pos ; while ( newpos > start && isApostrofe ( array [ newpos - 1 ] ) ) newpos -- ; if ( newpos > start ) tokens . add ( new String ( array , start , newpos - start ) ) ; } return tokens ; }
get list of tokens from the text for indexing
266
10
17,781
public void updateTenant ( Tenant updatedTenant ) { assert m_tenant . getName ( ) . equals ( updatedTenant . getName ( ) ) ; m_tenant = updatedTenant ; }
Update this DBService s Tenant with the given one . This is called when the tenant s definition has been updated in an upward - compatible way such as adding or removing users .
46
38
17,782
public static boolean isSystemTable ( String storeName ) { return storeName . equals ( SchemaService . APPS_STORE_NAME ) || storeName . equals ( TaskManagerService . TASKS_STORE_NAME ) || storeName . equals ( TenantService . TENANTS_STORE_NAME ) ; }
Return true if the given store name is a system table aka metadata table . System tables store column values as strings . All other tables use binary column values .
71
31
17,783
private boolean storeExists ( String tableName ) { KeyspaceMetadata ksMetadata = m_cluster . getMetadata ( ) . getKeyspace ( m_keyspace ) ; return ( ksMetadata != null ) && ( ksMetadata . getTable ( tableName ) != null ) ; }
Return true if the given table exists in the given keyspace .
68
13
17,784
private ResultSet executeQuery ( Query query , String tableName , Object ... values ) { m_logger . debug ( "Executing statement {} on table {}.{}; total params={}" , new Object [ ] { query , m_keyspace , tableName , values . length } ) ; try { PreparedStatement prepState = getPreparedQuery ( query , tableName ) ; BoundStatement boundState = prepState . bind ( values ) ; return m_session . execute ( boundState ) ; } catch ( Exception e ) { String params = "[" + Utils . concatenate ( Arrays . asList ( values ) , "," ) + "]" ; m_logger . error ( "Query failed: query={}, keyspace={}, table={}, params={}; error: {}" , query , m_keyspace , tableName , params , e ) ; throw e ; } }
Execute the given query for the given table using the given values .
190
14
17,785
private Cluster buildClusterSpecs ( ) { Cluster . Builder builder = Cluster . builder ( ) ; // dbhost String dbhost = getParamString ( "dbhost" ) ; String [ ] nodeAddresses = dbhost . split ( "," ) ; for ( String address : nodeAddresses ) { builder . addContactPoint ( address ) ; } // dbport builder . withPort ( getParamInt ( "dbport" , 9042 ) ) ; // db_timeout_millis and db_connect_retry_wait_millis SocketOptions socketOpts = new SocketOptions ( ) ; socketOpts . setReadTimeoutMillis ( getParamInt ( "db_timeout_millis" , 10000 ) ) ; socketOpts . setConnectTimeoutMillis ( getParamInt ( "db_connect_retry_wait_millis" , 5000 ) ) ; builder . withSocketOptions ( socketOpts ) ; // dbuser/dbpassword String dbuser = getParamString ( "dbuser" ) ; if ( ! Utils . isEmpty ( dbuser ) ) { builder . withCredentials ( dbuser , getParamString ( "dbpassword" ) ) ; } // compression builder . withCompression ( Compression . SNAPPY ) ; // TLS/SSL if ( getParamBoolean ( "dbtls" ) ) { builder . withSSL ( getSSLOptions ( ) ) ; } return builder . build ( ) ; }
Build Cluster object from ServerConfig settings .
311
8
17,786
private SSLContext getSSLContext ( String truststorePath , String truststorePassword , String keystorePath , String keystorePassword ) throws Exception { FileInputStream tsf = new FileInputStream ( truststorePath ) ; KeyStore ts = KeyStore . getInstance ( "JKS" ) ; ts . load ( tsf , truststorePassword . toCharArray ( ) ) ; TrustManagerFactory tmf = TrustManagerFactory . getInstance ( TrustManagerFactory . getDefaultAlgorithm ( ) ) ; tmf . init ( ts ) ; FileInputStream ksf = new FileInputStream ( keystorePath ) ; KeyStore ks = KeyStore . getInstance ( "JKS" ) ; ks . load ( ksf , keystorePassword . toCharArray ( ) ) ; KeyManagerFactory kmf = KeyManagerFactory . getInstance ( KeyManagerFactory . getDefaultAlgorithm ( ) ) ; kmf . init ( ks , keystorePassword . toCharArray ( ) ) ; SSLContext ctx = SSLContext . getInstance ( "SSL" ) ; ctx . init ( kmf . getKeyManagers ( ) , tmf . getTrustManagers ( ) , new SecureRandom ( ) ) ; return ctx ; }
Build an SSLContext from the given truststore and keystore parameters .
265
14
17,787
private void connectToCluster ( ) { assert m_cluster != null ; try { m_cluster . init ( ) ; // force connection and throw if unavailable m_session = m_cluster . connect ( ) ; displayClusterInfo ( ) ; } catch ( Exception e ) { m_logger . error ( "Could not connect to Cassandra cluster" , e ) ; throw new DBNotAvailableException ( e ) ; } }
Attempt to connect to the given cluster and throw if it is unavailable .
93
14
17,788
private void displayClusterInfo ( ) { Metadata metadata = m_cluster . getMetadata ( ) ; m_logger . info ( "Connected to cluster with topography:" ) ; RoundRobinPolicy policy = new RoundRobinPolicy ( ) ; for ( Host host : metadata . getAllHosts ( ) ) { m_logger . info ( " Host {}: datacenter: {}, rack: {}, distance: {}" , new Object [ ] { host . getAddress ( ) , host . getDatacenter ( ) , host . getRack ( ) , policy . distance ( host ) } ) ; } m_logger . info ( "Database contains {} keyspaces" , metadata . getKeyspaces ( ) . size ( ) ) ; }
Display configuration information for the given cluster .
163
8
17,789
private ContentType getContentType ( ) { String contentTypeValue = m_request . getContentType ( ) ; if ( contentTypeValue == null ) { return ContentType . TEXT_XML ; } return new ContentType ( contentTypeValue ) ; }
Get the request s content - type using XML as the default .
54
13
17,790
private ContentType getAcceptType ( ) { // If the format header is present, it overrides the ACCEPT header. String format = m_variableMap . get ( "format" ) ; if ( format != null ) { return new ContentType ( format ) ; } String acceptParts = m_request . getHeader ( HttpDefs . ACCEPT ) ; if ( ! Utils . isEmpty ( acceptParts ) ) { for ( String acceptPart : acceptParts . split ( "," ) ) { ContentType acceptType = new ContentType ( acceptPart ) ; if ( acceptType . isJSON ( ) || acceptType . isXML ( ) || acceptType . isPlainText ( ) ) { return acceptType ; } } } return getContentType ( ) ; }
Get the request s accept type defaulting to content - type if none is specified .
166
17
17,791
private boolean isMessageCompressed ( ) { String contentEncoding = m_request . getHeader ( HttpDefs . CONTENT_ENCODING ) ; if ( contentEncoding != null ) { if ( ! contentEncoding . equalsIgnoreCase ( "gzip" ) ) { throw new IllegalArgumentException ( "Unsupported Content-Encoding: " + contentEncoding ) ; } return true ; } return false ; }
If Content - Encoding is included verify that we support it and return true .
93
16
17,792
public long stop ( long time ) { m_nesting -- ; // not started and stopped if ( m_nesting < 0 ) { m_nesting = 0 ; } if ( m_nesting == 0 ) { long elapsedTime = time - m_startTime ; m_elapsedTime += elapsedTime ; return elapsedTime ; } return 0 ; }
Stop the timer . If timer was started update the elapsed time .
76
13
17,793
@ Override public void addValuesForField ( ) { FieldDefinition fieldDef = m_tableDef . getFieldDef ( m_fieldName ) ; if ( fieldDef == null || ! fieldDef . isCollection ( ) ) { addSVScalar ( ) ; } else { addMVScalar ( ) ; } }
Add scalar value to object record ; add term columns for indexed tokens .
72
15
17,794
public static Set < String > mergeMVFieldValues ( Collection < String > currValueSet , Collection < String > removeValueSet , Collection < String > newValueSet ) { Set < String > resultSet = new HashSet <> ( ) ; if ( currValueSet != null ) { resultSet . addAll ( currValueSet ) ; } if ( removeValueSet != null ) { resultSet . removeAll ( removeValueSet ) ; } if ( newValueSet != null ) { resultSet . addAll ( newValueSet ) ; } return resultSet ; }
Merge the given current remove and new MV field values into a new set .
123
16
17,795
private void addFieldTermReferences ( Set < String > termSet ) { Map < String , Set < String > > fieldTermRefsMap = new HashMap < String , Set < String > > ( ) ; fieldTermRefsMap . put ( m_fieldName , termSet ) ; m_dbTran . addTermReferences ( m_tableDef , m_tableDef . getShardNumber ( m_dbObj ) , fieldTermRefsMap ) ; }
Add references to the given terms for used for this field .
100
12
17,796
private void addMVScalar ( ) { Set < String > values = new HashSet <> ( m_dbObj . getFieldValues ( m_fieldName ) ) ; String fieldValue = Utils . concatenate ( values , CommonDefs . MV_SCALAR_SEP_CHAR ) ; m_dbTran . addScalarValueColumn ( m_tableDef , m_dbObj . getObjectID ( ) , m_fieldName , fieldValue ) ; addTermColumns ( fieldValue ) ; }
Add new MV scalar field .
116
7
17,797
private void addSVScalar ( ) { String fieldValue = m_dbObj . getFieldValue ( m_fieldName ) ; m_dbTran . addScalarValueColumn ( m_tableDef , m_dbObj . getObjectID ( ) , m_fieldName , fieldValue ) ; addTermColumns ( fieldValue ) ; }
Add new SV scalar field .
78
7
17,798
private void addTermColumns ( String fieldValue ) { Set < String > termSet = tokenize ( fieldValue ) ; indexTerms ( termSet ) ; addFieldTermReferences ( termSet ) ; addFieldReference ( ) ; }
Add all Terms columns needed for our scalar field .
50
11
17,799
private void indexTerms ( Set < String > termSet ) { for ( String term : termSet ) { m_dbTran . addTermIndexColumn ( m_tableDef , m_dbObj , m_fieldName , term ) ; } }
Tokenize the given field with the appropriate analyzer and add Terms columns for each term .
54
18