index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/DirectCqlQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.commons.lang.NotImplementedException;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.reads.model.DirectCqlResult;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.query.CqlQuery;
import com.netflix.astyanax.query.PreparedCqlQuery;
/**
*
* Impl for {@link CqlQuery} that allows users to directly send CQL3 over java driver
* @author poberai
*
* @param <K>
* @param <C>
*/
public class DirectCqlQueryImpl<K, C> implements CqlQuery<K, C> {
private final KeyspaceContext ksContext;
private final CFQueryContext<K,C> cfContext;
private final String basicCqlQuery;
public DirectCqlQueryImpl(KeyspaceContext ksCtx, CFQueryContext<K,C> cfCtx, String basicCqlQuery) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.basicCqlQuery = basicCqlQuery;
}
@Override
public OperationResult<CqlResult<K, C>> execute() throws ConnectionException {
return new InternalExecutionImpl(new SimpleStatement(basicCqlQuery)).execute();
}
@Override
public ListenableFuture<OperationResult<CqlResult<K, C>>> executeAsync() throws ConnectionException {
return new InternalExecutionImpl(new SimpleStatement(basicCqlQuery)).executeAsync();
}
@Override
public CqlQuery<K, C> useCompression() {
throw new UnsupportedOperationException("Operation not supported");
}
protected class InternalPreparedStatement implements PreparedCqlQuery<K,C> {
private final PreparedStatement pStatement;
protected InternalPreparedStatement() {
pStatement = ksContext.getSession().prepare(basicCqlQuery);
}
@Override
public <V> PreparedCqlQuery<K, C> withByteBufferValue(V value, Serializer<V> serializer) {
return new InternalBoundStatement(pStatement).withByteBufferValue(value, serializer);
}
@Override
public PreparedCqlQuery<K, C> withValue(ByteBuffer value) {
return new InternalBoundStatement(pStatement).withValue(value);
}
@Override
public PreparedCqlQuery<K, C> withValues(List<ByteBuffer> values) {
return new InternalBoundStatement(pStatement).withValues(values);
}
@Override
public PreparedCqlQuery<K, C> withStringValue(String value) {
return new InternalBoundStatement(pStatement).withStringValue(value);
}
@Override
public PreparedCqlQuery<K, C> withIntegerValue(Integer value) {
return new InternalBoundStatement(pStatement).withIntegerValue(value);
}
@Override
public PreparedCqlQuery<K, C> withBooleanValue(Boolean value) {
return new InternalBoundStatement(pStatement).withBooleanValue(value);
}
@Override
public PreparedCqlQuery<K, C> withDoubleValue(Double value) {
return new InternalBoundStatement(pStatement).withDoubleValue(value);
}
@Override
public PreparedCqlQuery<K, C> withLongValue(Long value) {
return new InternalBoundStatement(pStatement).withLongValue(value);
}
@Override
public PreparedCqlQuery<K, C> withFloatValue(Float value) {
return new InternalBoundStatement(pStatement).withFloatValue(value);
}
@Override
public PreparedCqlQuery<K, C> withShortValue(Short value) {
return new InternalBoundStatement(pStatement).withShortValue(value);
}
@Override
public PreparedCqlQuery<K, C> withUUIDValue(UUID value) {
return new InternalBoundStatement(pStatement).withUUIDValue(value);
}
@Override
public OperationResult<CqlResult<K, C>> execute() throws ConnectionException {
throw new NotImplementedException();
}
@Override
public ListenableFuture<OperationResult<CqlResult<K, C>>> executeAsync() throws ConnectionException {
throw new NotImplementedException();
}
}
protected class InternalBoundStatement implements PreparedCqlQuery<K,C> {
final List<Object> bindList = new ArrayList<Object>();
final BoundStatement boundStatement;
protected InternalBoundStatement(PreparedStatement pStmt) {
boundStatement = new BoundStatement(pStmt);
}
@Override
public OperationResult<CqlResult<K, C>> execute() throws ConnectionException {
boundStatement.bind(bindList.toArray());
return new InternalExecutionImpl(boundStatement).execute();
}
@Override
public ListenableFuture<OperationResult<CqlResult<K, C>>> executeAsync() throws ConnectionException {
boundStatement.bind(bindList.toArray());
return new InternalExecutionImpl(boundStatement).executeAsync();
}
@Override
public <V> PreparedCqlQuery<K, C> withByteBufferValue(V value, Serializer<V> serializer) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withValue(ByteBuffer value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withValues(List<ByteBuffer> value) {
bindList.addAll(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withStringValue(String value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withIntegerValue(Integer value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withBooleanValue(Boolean value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withDoubleValue(Double value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withLongValue(Long value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withFloatValue(Float value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withShortValue(Short value) {
bindList.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withUUIDValue(UUID value) {
bindList.add(value);
return this;
}
}
@Override
public PreparedCqlQuery<K, C> asPreparedStatement() {
return new InternalPreparedStatement();
}
private class InternalExecutionImpl extends CqlAbstractExecutionImpl<CqlResult<K, C>> {
private final Statement query;
public InternalExecutionImpl(Statement query) {
super(ksContext, cfContext);
this.query = query;
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.CQL;
}
@Override
public Statement getQuery() {
return query;
}
@Override
public CqlResult<K, C> parseResultSet(ResultSet resultSet) {
boolean isCountQuery = basicCqlQuery.contains(" count(");
if (isCountQuery) {
return new DirectCqlResult<K,C>(new Long(resultSet.one().getLong(0)));
} else {
return new DirectCqlResult<K,C>(resultSet.all(), (ColumnFamily<K, C>) cf);
}
}
}
}
| 8,100 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CFRowSliceQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.desc;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gt;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gte;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lt;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lte;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.CompositeByteBufferRange;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.RangeQueryOp;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.RangeQueryRecord;
/**
* Base class that contains the utilities for generating queries for read operations via the
* {@link RowSliceQuery} class.
*
* Note that this class is just a place holder for some useful generic utilities.
* See {@link CFRowKeysQueryGen} and {@link CFRowRangeQueryGen} which are the 2 extending classes
* for functionality that actually supports the queries.
*
* @author poberai
*/
public class CFRowSliceQueryGen {
// Thread safe reference to the underlying session object. We need the session object to be able to "prepare" query statements
protected final AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
// the keyspace being queried. Used for all the underlying queries being generated
protected final String keyspace;
// the cf definition which helps extending classes construct the right query as per the schema
protected final CqlColumnFamilyDefinitionImpl cfDef;
// Other useful derivatives of the cf definition that are frequently used by query generators
protected final String partitionKeyCol;
protected final String[] allPrimayKeyCols;
protected final List<ColumnDefinition> clusteringKeyCols;
protected final List<ColumnDefinition> regularCols;
// Condition tracking whether the underlying schema uses composite columns. This is imp since it influences how
// a single Column (composite column) can be decomposed into it's individual components that form different parts of the query.
protected boolean isCompositeColumn;
// bind marker for generating the prepared statements
protected static final String BIND_MARKER = "?";
public CFRowSliceQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.sessionRef.set(session);
partitionKeyCol = cfDef.getPartitionKeyColumnDefinition().getName();
allPrimayKeyCols = cfDef.getAllPkColNames();
clusteringKeyCols = cfDef.getClusteringKeyColumnDefinitionList();
regularCols = cfDef.getRegularColumnDefinitionList();
isCompositeColumn = (clusteringKeyCols.size() > 1);
}
/**
*
* SOME BASIC UTILITY METHODS USED BY ALL THE ROW SLICE QUERY GENERATORS
*/
protected Select selectAllColumnsFromKeyspaceAndCF() {
Select.Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
return select.from(keyspace, cfDef.getName());
}
protected Where addWhereClauseForColumnRange(Where where, CqlColumnSlice<?> columnSlice) {
String clusteringKeyCol = clusteringKeyCols.get(0).getName();
if (!columnSlice.isRangeQuery()) {
return where;
}
if (columnSlice.getStartColumn() != null) {
where.and(gte(clusteringKeyCol, columnSlice.getStartColumn()));
}
if (columnSlice.getEndColumn() != null) {
where.and(lte(clusteringKeyCol, columnSlice.getEndColumn()));
}
if (columnSlice.getReversed()) {
where.orderBy(desc(clusteringKeyCol));
}
if (columnSlice.getLimit() != -1) {
where.limit(columnSlice.getLimit());
}
return where;
}
protected void bindWhereClauseForColumnRange(List<Object> values, CqlColumnSlice<?> columnSlice) {
if (!columnSlice.isRangeQuery()) {
return;
}
if (columnSlice.getStartColumn() != null) {
values.add(columnSlice.getStartColumn());
}
if (columnSlice.getEndColumn() != null) {
values.add(columnSlice.getEndColumn());
}
if (columnSlice.getLimit() != -1) {
values.add(columnSlice.getLimit());
}
return;
}
protected Where addWhereClauseForCompositeColumnRange(Where stmt, CompositeByteBufferRange compositeRange) {
List<RangeQueryRecord> records = compositeRange.getRecords();
int componentIndex = 0;
for (RangeQueryRecord record : records) {
for (RangeQueryOp op : record.getOps()) {
String columnName = clusteringKeyCols.get(componentIndex).getName();
switch (op.getOperator()) {
case EQUAL:
stmt.and(eq(columnName, BIND_MARKER));
componentIndex++;
break;
case LESS_THAN :
stmt.and(lt(columnName, BIND_MARKER));
break;
case LESS_THAN_EQUALS:
stmt.and(lte(columnName, BIND_MARKER));
break;
case GREATER_THAN:
stmt.and(gt(columnName, BIND_MARKER));
break;
case GREATER_THAN_EQUALS:
stmt.and(gte(columnName, BIND_MARKER));
break;
default:
throw new RuntimeException("Cannot recognize operator: " + op.getOperator().name());
}; // end of switch stmt
} // end of inner for for ops for each range query record
}
return stmt;
}
protected void bindWhereClauseForCompositeColumnRange(List<Object> values, CompositeByteBufferRange compositeRange) {
List<RangeQueryRecord> records = compositeRange.getRecords();
for (RangeQueryRecord record : records) {
for (RangeQueryOp op : record.getOps()) {
values.add(op.getValue());
}
}
return;
}
protected Object[] bindMarkerArray(int n) {
Object[] arr = new Object[n];
for (int i=0; i<n; i++) {
arr[i] = BIND_MARKER;
}
return arr;
}
}
| 8,101 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CFRowQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.desc;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gt;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gte;
import static com.datastax.driver.core.querybuilder.QueryBuilder.in;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lt;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lte;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select.Selection;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.RangeQueryOp;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.RangeQueryRecord;
public class CFRowQueryGen {
private final AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
private final String keyspace;
private final CqlColumnFamilyDefinitionImpl cfDef;
private final String partitionKeyCol;
private final String[] allPrimayKeyCols;
private final List<ColumnDefinition> clusteringKeyCols;
private final List<ColumnDefinition> regularCols;
private boolean isCompositeColumn;
private boolean isFlatTable;
private static final String BIND_MARKER = "?";
private final CFRowKeysQueryGen rowKeysQueryGen;
private final CFRowRangeQueryGen rowRangeQueryGen;
private final FlatTableRowQueryGen flatTableRowQueryGen;
private final FlatTableRowSliceQueryGen flatTableRowSliceQueryGen;
private final CFColumnQueryGen columnQueryGen;
public CFRowQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.sessionRef.set(session);
partitionKeyCol = cfDef.getPartitionKeyColumnDefinition().getName();
allPrimayKeyCols = cfDef.getAllPkColNames();
clusteringKeyCols = cfDef.getClusteringKeyColumnDefinitionList();
regularCols = cfDef.getRegularColumnDefinitionList();
isCompositeColumn = (clusteringKeyCols.size() > 1);
isFlatTable = (clusteringKeyCols.size() == 0);
rowKeysQueryGen = new CFRowKeysQueryGen(session, keyspaceName, cfDefinition);
rowRangeQueryGen = new CFRowRangeQueryGen(session, keyspaceName, cfDefinition);
flatTableRowQueryGen = new FlatTableRowQueryGen(session, keyspaceName, cfDefinition);
flatTableRowSliceQueryGen = new FlatTableRowSliceQueryGen(session, keyspaceName, cfDefinition);
columnQueryGen = new CFColumnQueryGen(session, keyspaceName, cfDefinition);
}
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectEntireRow = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
RegularStatement stmt = select.from(keyspace, cfDef.getName()).where(eq(partitionKeyCol, BIND_MARKER));
return stmt;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
return pStatement.bind(rowQuery.getRowKey());
}
};
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectColumnSliceWithClusteringKey = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform column slice query with clusteringKeyCols.size: " + clusteringKeyCols.size());
}
// THIS IS A QUERY WHERE THE COLUMN NAME IS DYNAMIC E.G TIME SERIES
Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
int numCols = rowQuery.getColumnSlice().getColumns().size();
List<Object> colSelection = new ArrayList<Object>();
for (int i=0; i<numCols; i++) {
colSelection.add(BIND_MARKER);
}
return select
.from(keyspace, cfDef.getName())
.where(eq(partitionKeyCol, BIND_MARKER))
.and(in(clusteringKeyCols.get(0).getName(), colSelection.toArray(new Object[colSelection.size()])));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
List<Object> objects = new ArrayList<Object>();
objects.add(rowQuery.getRowKey());
for (Object col : rowQuery.getColumnSlice().getColumns()) {
objects.add(col);
}
return pStatement.bind(objects.toArray(new Object[objects.size()]));
}
};
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectColumnRangeWithClusteringKey = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform col range query with current schema, missing pk cols");
}
Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
Where where = select.from(keyspace, cfDef.getName())
.where(eq(partitionKeyCol, BIND_MARKER));
String clusterKeyCol = clusteringKeyCols.get(0).getName();
CqlColumnSlice<?> columnSlice = rowQuery.getColumnSlice();
if (columnSlice.getStartColumn() != null) {
where.and(gte(clusterKeyCol, BIND_MARKER));
}
if (columnSlice.getEndColumn() != null) {
where.and(lte(clusterKeyCol, BIND_MARKER));
}
if (columnSlice.getReversed()) {
where.orderBy(desc(clusterKeyCol));
}
if (!rowQuery.isPaginating()) {
// Column limits are applicable only when we are not paginating
if (columnSlice.getLimit() != -1) {
where.limit(columnSlice.getLimit());
}
}
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform col range query with current schema, missing pk cols");
}
List<Object> values = new ArrayList<Object>();
values.add(rowQuery.getRowKey());
CqlColumnSlice<?> columnSlice = rowQuery.getColumnSlice();
if (columnSlice.getStartColumn() != null) {
values.add(columnSlice.getStartColumn());
}
if (columnSlice.getEndColumn() != null) {
values.add(columnSlice.getEndColumn());
}
return pStatement.bind(values.toArray(new Object[values.size()]));
}
};
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectWithCompositeColumn = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
Where stmt = select.from(keyspace, cfDef.getName())
.where(eq(partitionKeyCol, BIND_MARKER));
List<RangeQueryRecord> records = rowQuery.getCompositeRange().getRecords();
int componentIndex = 0;
for (RangeQueryRecord record : records) {
for (RangeQueryOp op : record.getOps()) {
String columnName = clusteringKeyCols.get(componentIndex).getName();
switch (op.getOperator()) {
case EQUAL:
stmt.and(eq(columnName, BIND_MARKER));
componentIndex++;
break;
case LESS_THAN :
stmt.and(lt(columnName, BIND_MARKER));
break;
case LESS_THAN_EQUALS:
stmt.and(lte(columnName, BIND_MARKER));
break;
case GREATER_THAN:
stmt.and(gt(columnName, BIND_MARKER));
break;
case GREATER_THAN_EQUALS:
stmt.and(gte(columnName, BIND_MARKER));
break;
default:
throw new RuntimeException("Cannot recognize operator: " + op.getOperator().name());
}; // end of switch stmt
} // end of inner for for ops for each range query record
}
return stmt;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
List<RangeQueryRecord> records = rowQuery.getCompositeRange().getRecords();
List<Object> values = new ArrayList<Object>();
values.add(rowQuery.getRowKey());
for (RangeQueryRecord record : records) {
for (RangeQueryOp op : record.getOps()) {
switch (op.getOperator()) {
case EQUAL:
values.add(op.getValue());
break;
case LESS_THAN :
values.add(op.getValue());
break;
case LESS_THAN_EQUALS:
values.add(op.getValue());
break;
case GREATER_THAN:
values.add(op.getValue());
break;
case GREATER_THAN_EQUALS:
values.add(op.getValue());
break;
default:
throw new RuntimeException("Cannot recognize operator: " + op.getOperator().name());
}; // end of switch stmt
} // end of inner for for ops for each range query record
}
return pStatement.bind(values.toArray(new Object[values.size()]));
}
};
public Statement getQueryStatement(final CqlRowQueryImpl<?,?> rowQuery, boolean useCaching) {
if (isFlatTable) {
return flatTableRowQueryGen.getQueryStatement(rowQuery, useCaching);
}
switch (rowQuery.getQueryType()) {
case AllColumns:
return SelectEntireRow.getBoundStatement(rowQuery, useCaching);
case ColumnSlice:
return SelectColumnSliceWithClusteringKey.getBoundStatement(rowQuery, useCaching);
case ColumnRange:
if (isCompositeColumn) {
return SelectWithCompositeColumn.getBoundStatement(rowQuery, useCaching);
} else {
return SelectColumnRangeWithClusteringKey.getBoundStatement(rowQuery, useCaching);
}
default :
throw new RuntimeException("RowQuery use case not supported. Fix this!!");
}
}
public Statement getQueryStatement(final CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
if (isFlatTable) {
return flatTableRowSliceQueryGen.getQueryStatement(rowSliceQuery, useCaching);
}
switch (rowSliceQuery.getRowQueryType()) {
case RowKeys:
return rowKeysQueryGen.getQueryStatement(rowSliceQuery, useCaching);
case RowRange:
return rowRangeQueryGen.getQueryStatement(rowSliceQuery, useCaching);
default :
throw new RuntimeException("RowSliceQuery use case not supported. Fix this!!");
}
}
public Statement getQueryStatement(final CqlColumnQueryImpl<?> columnQuery, boolean useCaching) {
return columnQueryGen.getQueryStatement(columnQuery, useCaching);
}
}
| 8,102 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlColumnFamilyQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.reads.model.CqlRowSlice;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.query.AllRowsQuery;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.query.CqlQuery;
import com.netflix.astyanax.query.IndexQuery;
import com.netflix.astyanax.query.RowQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Base impl for {@link ColumnFamilyQuery} interface. This class is the root for all read operations in Astyanax.
* From this class, we can branch into either {@link RowQuery} or {@link RowSliceQuery}.
*
* The current class manages the column family context, retry policy and the consistency level for the read queries underneath.
*
* Important classes to see are
* {@link CqlRowQueryImpl}
* {@link CqlRowSliceQueryImpl}
* {@link CqlAllRowsQueryImpl}
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlColumnFamilyQueryImpl<K, C> implements ColumnFamilyQuery<K, C> {
private final KeyspaceContext ksContext;
private final CFQueryContext<K,C> cfContext;
private boolean useCaching = false;
public CqlColumnFamilyQueryImpl(KeyspaceContext ksCtx, ColumnFamily<K,C> cf) {
this.ksContext = ksCtx;
this.cfContext = new CFQueryContext<K,C>(cf);
this.cfContext.setConsistencyLevel(ConsistencyLevel.CL_ONE);
}
@Override
public ColumnFamilyQuery<K, C> setConsistencyLevel(ConsistencyLevel clLevel) {
this.cfContext.setConsistencyLevel(clLevel);
return this;
}
@Override
public ColumnFamilyQuery<K, C> withRetryPolicy(RetryPolicy retry) {
this.cfContext.setRetryPolicy(retry.duplicate());
return this;
}
@Override
public ColumnFamilyQuery<K, C> pinToHost(Host host) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public RowQuery<K, C> getKey(K rowKey) {
return new CqlRowQueryImpl<K, C>(ksContext, cfContext, rowKey, useCaching);
}
@Override
public RowQuery<K, C> getRow(K rowKey) {
return new CqlRowQueryImpl<K, C>(ksContext, cfContext, rowKey, useCaching);
}
@Override
public RowSliceQuery<K, C> getKeyRange(K startKey, K endKey, String startToken, String endToken, int count) {
return getRowRange(startKey, endKey, startToken, endToken, count);
}
@Override
public RowSliceQuery<K, C> getRowRange(K startKey, K endKey, String startToken, String endToken, int count) {
CqlRowSlice<K> rowSlice = new CqlRowSlice<K>(startKey, endKey, startToken, endToken, count);
return new CqlRowSliceQueryImpl<K, C>(ksContext, cfContext, rowSlice, useCaching);
}
@Override
public RowSliceQuery<K, C> getKeySlice(K... keys) {
return getRowSlice(keys);
}
@Override
public RowSliceQuery<K, C> getRowSlice(K... keys) {
List<K> keyList = Arrays.asList(keys);
return getRowSlice(keyList);
}
@Override
public RowSliceQuery<K, C> getKeySlice(Collection<K> keys) {
return getRowSlice(keys);
}
@Override
public RowSliceQuery<K, C> getRowSlice(Collection<K> keys) {
CqlRowSlice<K> rowSlice = new CqlRowSlice<K>(keys);
return new CqlRowSliceQueryImpl<K, C>(ksContext, cfContext, rowSlice, useCaching);
}
@Override
public RowSliceQuery<K, C> getKeySlice(Iterable<K> keys) {
return getRowSlice(keys);
}
@Override
public RowSliceQuery<K, C> getRowSlice(Iterable<K> keys) {
List<K> keyList = new ArrayList<K>();
for (K key : keys) {
keyList.add(key);
}
return getRowSlice(keyList);
}
@Override
public AllRowsQuery<K, C> getAllRows() {
return new CqlAllRowsQueryImpl<K, C>(ksContext.getKeyspaceContext(), cfContext.getColumnFamily());
}
@Override
public CqlQuery<K, C> withCql(String cql) {
return new DirectCqlQueryImpl<K, C>(ksContext, cfContext, cql);
}
@Override
public IndexQuery<K, C> searchWithIndex() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyQuery<K, C> withCaching(boolean condition) {
this.useCaching = condition;
return this;
}
}
| 8,103 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/QueryGenCache.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
/**
* Template for {@link PreparedStatement} caching for a query Q.
* The class provides the basic functionality to store a cached reference to the PreparedStatement
* that is generated by the extending class. Hence actual logic for constructing the PrepatedStatement
* and binding values to that statement is not defined here. That must be provided by the extending classes.
*
* @author poberai
*
* @param <Q>
*/
public abstract class QueryGenCache<Q> {
private static final Logger LOG = LoggerFactory.getLogger(QueryGenCache.class);
// reference to the session object. This is required for "preparing" a statement
private AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
// The cached reference to the query constructed by extending classes
private final AtomicReference<PreparedStatement> cachedStatement = new AtomicReference<PreparedStatement>(null);
/**
* Constructor
* @param sessionR
*/
public QueryGenCache(AtomicReference<Session> sessionR) {
this.sessionRef = sessionR;
}
/**
* Get the bound statement from the prepared statement
* @param query
* @param useCaching
* @return BoundStatement
*/
public BoundStatement getBoundStatement(Q query, boolean useCaching) {
PreparedStatement pStatement = getPreparedStatement(query, useCaching);
return bindValues(pStatement, query);
}
/**
* Get the bound statemnent by either constructing the query or using the cached statement underneath.
* Note that the caller can provide useCaching as a knob to turn caching ON/OFF.
* If false, then the query is just constructed using the extending class and returned.
* If true, then the cached reference is consulted. If the cache is empty, then the query is constructed
* and used to seed the cache.
*
* @param query
* @param useCaching
* @return PreparedStatement
*/
public PreparedStatement getPreparedStatement(Q query, boolean useCaching) {
PreparedStatement pStatement = null;
if (useCaching) {
pStatement = cachedStatement.get();
}
if (pStatement == null) {
try {
RegularStatement stmt = getQueryGen(query).call();
if (LOG.isDebugEnabled()) {
LOG.debug("Query: " + stmt.getQueryString());
}
pStatement = sessionRef.get().prepare(stmt.getQueryString());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
if (useCaching && cachedStatement.get() == null) {
cachedStatement.set(pStatement);
}
return pStatement;
}
/**
* Extending classes must implement this with logic for constructing the java driver query from the given Astyanax query
* @param query
* @return Callable<RegularStatement>
*/
public abstract Callable<RegularStatement> getQueryGen(Q query);
/**
* Extending classes must implement this with logic for binding the right Astyanax query data with the pre-constructed
* prepared statement in the right order.
* @param pStatement
* @param query
* @return BoundStatement
*/
public abstract BoundStatement bindValues(PreparedStatement pStatement, Q query);
}
| 8,104 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/FlatTableRowQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Select.Selection;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
/**
* Read query generator for queries on flat tables i.e tables with no clustering keys.
*
* The class lives along other implementations like {@link CFRowQueryGen}, {@link CFRowRangeQueryGen} and {@link CFRowKeysQueryGen}
* The structure of queries for flat tables was different enough that they warranted their own class. If your schema contains clustering keys
* then see {@link CFRowQueryGen}, {@link CFRowRangeQueryGen} and {@link CFRowKeysQueryGen} for implementation details.
*
* Note that the class manages several individual query generators for different use cases like
* 1. Selecting the entire row
* 2. Performing a column slice operation i.e column collection
*
* Each of these query generators uses the {@link QueryGenCache} to maintain a cached reference to the {@link PreparedStatement}
* that it creates, which can then be leveraged by subsequent flat table queries that have the same signature.
*
* Note the one must use caching for flat table queries with EXTREME CAUTION. The cacheability of a query depends on the actual
* signature of a query. If you use different queries with different signatures for the same column slice operations, then caching will
* not work. Here is an example where caching will break queries.
*
* Consider a query where you want to perform a column slice operation i.e cherry pick some column for a given row.
* The Astyanax query for that will look somewhat like this
*
* ks.prepareQuery( myCF )
* .getRow( 1 )
* .getColumn( first_name )
* .execute();
*
* Now if the table is a flat table, then the query for this will look something like
*
* SELECT first_name FROM ks.myCF WHERE key = ? ;
*
* Note the bind marker for the row key. That is the only parameter here which is dynamic and the column name here i.e "first_name" is not
* and hence is part of the signature of this query.
*
* Now if we were to attempt to re-use the same prepared statement for a query like this
*
* ks.prepareQuery( myCF )
* .getRow( 1 )
* .getColumn( last_name ) <------ NOTE THAT WE ARE CHANGING OUR COLUMN SLICE AND HENCE VIOLATING THE QUERY SIGNATURE
* .execute();
*
* Then this will break since the CQL query required for this is
*
* SELECT first_name FROM ks.myCF WHERE key = ? ;
*
* In cases like this, DO NOT use statement caching.
*
* @author poberai
*
*/
public class FlatTableRowQueryGen {
// Reference to the session that is needed for "preparing" the statements
private AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
private final String keyspace;
private final CqlColumnFamilyDefinitionImpl cfDef;
private final String partitionKeyCol;
private final String[] allPrimayKeyCols;
private final List<ColumnDefinition> regularCols;
private static final String BIND_MARKER = "?";
/**
* Constructor
* @param session
* @param keyspaceName
* @param cfDefinition
*/
public FlatTableRowQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.sessionRef.set(session);
partitionKeyCol = cfDef.getPartitionKeyColumnDefinition().getName();
allPrimayKeyCols = cfDef.getAllPkColNames();
regularCols = cfDef.getRegularColumnDefinitionList();
}
/**
* Query generator that generates a query to read the entire row, i.e all the columns.
* Note that since it implements the {@link QueryGenCache} it also maintains an inner cached reference
* to the {@link PreparedStatement} that it creates which can then be re-used by subsequent queries that
* have the same signature (i.e read all columns)
*/
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectEntireRow = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
RegularStatement stmt = select.from(keyspace, cfDef.getName()).where(eq(partitionKeyCol, BIND_MARKER));
return stmt;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
return pStatement.bind(rowQuery.getRowKey());
}
};
/**
* Query generator that generates a query to peform a column slice operation on the specified row.
* Note that performing column slice operations on flat tables is dangerous since the query signature is not the same,
* hence use this with caution. See above for an explanation on query signatures and query cacheability.
*
* Note that since it implements the {@link QueryGenCache} it also maintains an inner cached reference
* to the {@link PreparedStatement} that it creates which can then be re-used by subsequent queries that
* have the same signature (i.e read the same column slice for a given row)
*/
private QueryGenCache<CqlRowQueryImpl<?,?>> SelectColumnSlice = new QueryGenCache<CqlRowQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowQueryImpl<?, ?> rowQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select.Selection select = QueryBuilder.select();
select.column(partitionKeyCol);
for (Object col : rowQuery.getColumnSlice().getColumns()) {
String columnName = (String)col;
select.column(columnName).ttl(columnName).writeTime(columnName);
}
return select.from(keyspace, cfDef.getName()).where(eq(partitionKeyCol, BIND_MARKER));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowQueryImpl<?, ?> rowQuery) {
return pStatement.bind(rowQuery.getRowKey());
}
};
public Statement getQueryStatement(final CqlRowQueryImpl<?,?> rowQuery, boolean useCaching) {
switch (rowQuery.getQueryType()) {
case AllColumns:
return SelectEntireRow.getBoundStatement(rowQuery, useCaching);
case ColumnSlice:
return SelectColumnSlice.getBoundStatement(rowQuery, useCaching);
case ColumnRange:
throw new RuntimeException("Cannot perform col range query with current schema, missing pk cols");
default :
throw new RuntimeException("Flat table RowQuery use case not supported. Fix this!!");
}
}
}
| 8,105 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlRowSliceQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.reads.model.CqlRangeBuilder;
import com.netflix.astyanax.cql.reads.model.CqlRangeImpl;
import com.netflix.astyanax.cql.reads.model.CqlRowListImpl;
import com.netflix.astyanax.cql.reads.model.CqlRowListIterator;
import com.netflix.astyanax.cql.reads.model.CqlRowSlice;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.query.RowSliceColumnCountQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.serializers.CompositeRangeBuilder;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.CompositeByteBufferRange;
/**
* Impl for {@link RowSliceQuery} interface.
*
* Just like {@link CqlRowQueryImpl} this class only manages the context for the row slice query, but does not construct the actual
* CQL query itself. For more details on how the actual query is constructed see classes
* {@link CFRowKeysQueryGen} and {@link CFRowRangeQueryGen}
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlRowSliceQueryImpl<K, C> implements RowSliceQuery<K, C> {
private final KeyspaceContext ksContext;
private final CFQueryContext<K,C> cfContext;
private final CqlRowSlice<K> rowSlice;
private CqlColumnSlice<C> columnSlice = new CqlColumnSlice<C>();
private CompositeByteBufferRange compositeRange = null;
private final boolean isPaginating;
private boolean useCaching = false;
public enum RowSliceQueryType {
RowKeys, RowRange
}
public enum ColumnSliceQueryType {
AllColumns, ColumnSet, ColumnRange;
}
private final RowSliceQueryType rowQueryType;
private ColumnSliceQueryType colQueryType = ColumnSliceQueryType.AllColumns;
public CqlRowSliceQueryImpl(KeyspaceContext ksCtx, CFQueryContext<K,C> cfCtx, CqlRowSlice<K> rSlice, boolean useCaching) {
this(ksCtx, cfCtx, rSlice, true, useCaching);
}
public CqlRowSliceQueryImpl(KeyspaceContext ksCtx, CFQueryContext<K,C> cfCtx, CqlRowSlice<K> rSlice, boolean condition, boolean useCaching) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.rowSlice = rSlice;
this.isPaginating = condition;
this.rowQueryType = (rowSlice.isRangeQuery()) ? RowSliceQueryType.RowRange : RowSliceQueryType.RowKeys;
this.useCaching = useCaching;
}
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return new InternalRowQueryExecutionImpl(this).execute();
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
return new InternalRowQueryExecutionImpl(this).executeAsync();
}
@Override
public RowSliceQuery<K, C> withColumnSlice(C... columns) {
colQueryType = ColumnSliceQueryType.ColumnSet;
return withColumnSlice(Arrays.asList(columns));
}
@Override
public RowSliceQuery<K, C> withColumnSlice(Collection<C> columns) {
colQueryType = ColumnSliceQueryType.ColumnSet;
this.columnSlice = new CqlColumnSlice<C>(columns);
return this;
}
@Override
public RowSliceQuery<K, C> withColumnSlice(ColumnSlice<C> columns) {
colQueryType = ColumnSliceQueryType.ColumnSet;
this.columnSlice = new CqlColumnSlice<C>(columns);
return this;
}
@Override
public RowSliceQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
colQueryType = ColumnSliceQueryType.ColumnRange;
this.columnSlice = new CqlColumnSlice<C>(new CqlRangeBuilder<C>()
.setColumn("column1")
.setStart(startColumn)
.setEnd(endColumn)
.setReversed(reversed)
.setLimit(count)
.build());
return this;
}
@Override
public RowSliceQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int limit) {
colQueryType = ColumnSliceQueryType.ColumnRange;
Serializer<C> colSerializer = cfContext.getColumnFamily().getColumnSerializer();
C start = (startColumn != null && startColumn.capacity() > 0) ? colSerializer.fromByteBuffer(startColumn) : null;
C end = (endColumn != null && endColumn.capacity() > 0) ? colSerializer.fromByteBuffer(endColumn) : null;
return this.withColumnRange(start, end, reversed, limit);
}
@SuppressWarnings("unchecked")
@Override
public RowSliceQuery<K, C> withColumnRange(ByteBufferRange range) {
colQueryType = ColumnSliceQueryType.ColumnRange;
if (range instanceof CompositeByteBufferRange) {
this.compositeRange = (CompositeByteBufferRange) range;
} else if (range instanceof CompositeRangeBuilder) {
this.compositeRange = ((CompositeRangeBuilder)range).build();
} else if (range instanceof CqlRangeImpl) {
this.columnSlice.setCqlRange((CqlRangeImpl<C>) range);
} else {
return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit());
}
return this;
}
@Override
public RowSliceColumnCountQuery<K> getColumnCounts() {
Statement query = new InternalRowQueryExecutionImpl(this).getQuery();
return new CqlRowSliceColumnCountQueryImpl<K>(ksContext, cfContext, query);
}
@SuppressWarnings("unchecked")
private class InternalRowQueryExecutionImpl extends CqlAbstractExecutionImpl<Rows<K, C>> {
private final CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
private final CqlRowSliceQueryImpl<K, C> rowSliceQuery;
public InternalRowQueryExecutionImpl(CqlRowSliceQueryImpl<K, C> rSliceQuery) {
super(ksContext, cfContext);
this.rowSliceQuery = rSliceQuery;
}
public InternalRowQueryExecutionImpl(KeyspaceContext ksContext, CFQueryContext<?, ?> cfContext) {
super(ksContext, cfContext);
this.rowSliceQuery = null;
}
@Override
public Statement getQuery() {
return cfDef.getRowQueryGenerator().getQueryStatement(rowSliceQuery, useCaching);
}
@Override
public Rows<K, C> parseResultSet(ResultSet rs) throws NotFoundException {
if (!isPaginating) {
List<com.datastax.driver.core.Row> rows = rs.all();
if (rows == null || rows.isEmpty()) {
return new CqlRowListImpl<K, C>();
}
return new CqlRowListImpl<K, C>(rows, (ColumnFamily<K, C>) cf);
} else {
if (rs == null) {
return new CqlRowListImpl<K, C>();
}
return new CqlRowListIterator<K, C>(rs, (ColumnFamily<K, C>) cf);
}
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.GET_ROW;
}
}
public CqlRowSlice<K> getRowSlice() {
return rowSlice;
}
public CqlColumnSlice<C> getColumnSlice() {
return columnSlice;
}
public CompositeByteBufferRange getCompositeRange() {
return compositeRange;
}
public ColumnSliceQueryType getColQueryType() {
return colQueryType;
}
public RowSliceQueryType getRowQueryType() {
return rowQueryType;
}
}
| 8,106 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/FlatTableRowSliceQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gte;
import static com.datastax.driver.core.querybuilder.QueryBuilder.in;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lte;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.reads.model.CqlRowSlice.RowRange;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.query.RowSliceQuery;
/**
* Just like {@link FlatTableRowQueryGen} this class encapsulates the functionality for row query generation for
* Astyanax {@link RowSliceQuery}(s).
*
* The class uses a collection of query generators to handle all sort of RowSliceQuery permutations like
* 1. Selecting all columns for a row collection
* 2. Selecting a column set for a row collection
* 3. Selecting all columns for a row range
* 4. Selecting a column set for a row range
*
* Note that this class supports query generation for flat tables only.
* For tables with clustering keys see {@link CFRowKeysQueryGen} and {@link CFRowRangeQueryGen}.
*
* Also, just like the other query generators, use this with caution when using caching of {@link PreparedStatement}
* See {@link FlatTableRowQueryGen} for a detailed explanation of why PreparedStatement caching will not work for queries
* that do not have the same signatures.
*
* @author poberai
*
*/
public class FlatTableRowSliceQueryGen {
protected AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
protected final String keyspace;
protected final CqlColumnFamilyDefinitionImpl cfDef;
protected final String partitionKeyCol;
protected final String[] allPrimayKeyCols;
protected final List<ColumnDefinition> regularCols;
protected static final String BIND_MARKER = "?";
public FlatTableRowSliceQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.sessionRef.set(session);
partitionKeyCol = cfDef.getPartitionKeyColumnDefinition().getName();
allPrimayKeyCols = cfDef.getAllPkColNames();
regularCols = cfDef.getRegularColumnDefinitionList();
}
/**
*
* SOME BASIC UTILITY METHODS USED BY ALL THE ROW SLICE QUERY GENERATORS
*/
protected Select selectAllColumnsFromKeyspaceAndCF() {
Select.Selection select = QueryBuilder.select();
for (int i=0; i<allPrimayKeyCols.length; i++) {
select.column(allPrimayKeyCols[i]);
}
for (ColumnDefinition colDef : regularCols) {
String colName = colDef.getName();
select.column(colName).ttl(colName).writeTime(colName);
}
return select.from(keyspace, cfDef.getName());
}
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectAllColumnsForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
return select.where(in(partitionKeyCol, rowSliceQuery.getRowSlice().getKeys().toArray()));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return pStatement.bind(rowSliceQuery.getRowSlice().getKeys().toArray());
}
};
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnSetForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select.Selection select = QueryBuilder.select();
select.column(partitionKeyCol);
for (Object col : rowSliceQuery.getColumnSlice().getColumns()) {
String columnName = (String)col;
select.column(columnName).ttl(columnName).writeTime(columnName);
}
return select.from(keyspace, cfDef.getName()).where(in(partitionKeyCol, rowSliceQuery.getRowSlice().getKeys().toArray()));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
values.addAll(rowSliceQuery.getRowSlice().getKeys());
return pStatement.bind(values.toArray());
}
};
private Where addWhereClauseForRowRange(String keyAlias, Select select, RowRange<?> rowRange) {
Where where = null;
boolean keyIsPresent = false;
boolean tokenIsPresent = false;
if (rowRange.getStartKey() != null || rowRange.getEndKey() != null) {
keyIsPresent = true;
}
if (rowRange.getStartToken() != null || rowRange.getEndToken() != null) {
tokenIsPresent = true;
}
if (keyIsPresent && tokenIsPresent) {
throw new RuntimeException("Cannot provide both token and keys for range query");
}
if (keyIsPresent) {
if (rowRange.getStartKey() != null && rowRange.getEndKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER))
.and(lte(keyAlias, BIND_MARKER));
} else if (rowRange.getStartKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER));
} else if (rowRange.getEndKey() != null) {
where = select.where(lte(keyAlias, BIND_MARKER));
}
} else if (tokenIsPresent) {
String tokenOfKey ="token(" + keyAlias + ")";
if (rowRange.getStartToken() != null && rowRange.getEndToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER))
.and(lte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getStartToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getEndToken() != null) {
where = select.where(lte(tokenOfKey, BIND_MARKER));
}
} else {
where = select.where();
}
if (rowRange.getCount() > 0) {
// TODO: fix this
//where.limit(rowRange.getCount());
}
return where;
}
private void bindWhereClauseForRowRange(List<Object> values, RowRange<?> rowRange) {
boolean keyIsPresent = false;
boolean tokenIsPresent = false;
if (rowRange.getStartKey() != null || rowRange.getEndKey() != null) {
keyIsPresent = true;
}
if (rowRange.getStartToken() != null || rowRange.getEndToken() != null) {
tokenIsPresent = true;
}
if (keyIsPresent && tokenIsPresent) {
throw new RuntimeException("Cannot provide both token and keys for range query");
}
if (keyIsPresent) {
if (rowRange.getStartKey() != null) {
values.add(rowRange.getStartKey());
}
if (rowRange.getEndKey() != null) {
values.add(rowRange.getEndKey());
}
} else if (tokenIsPresent) {
BigInteger startTokenB = rowRange.getStartToken() != null ? new BigInteger(rowRange.getStartToken()) : null;
BigInteger endTokenB = rowRange.getEndToken() != null ? new BigInteger(rowRange.getEndToken()) : null;
Long startToken = startTokenB.longValue();
Long endToken = endTokenB.longValue();
if (startToken != null && endToken != null) {
if (startToken != null) {
values.add(startToken);
}
if (endToken != null) {
values.add(endToken);
}
}
if (rowRange.getCount() > 0) {
// TODO: fix this
//where.limit(rowRange.getCount());
}
return;
}
}
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectAllColumnsForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
return addWhereClauseForRowRange(partitionKeyCol, select, rowSliceQuery.getRowSlice().getRange());
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
return pStatement.bind(values.toArray(new Object[values.size()]));
}
};
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnSetForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select.Selection select = QueryBuilder.select();
select.column(partitionKeyCol);
for (Object col : rowSliceQuery.getColumnSlice().getColumns()) {
String columnName = (String)col;
select.column(columnName).ttl(columnName).writeTime(columnName);
}
Select selection = select.from(keyspace, cfDef.getName());
Where where = addWhereClauseForRowRange(partitionKeyCol, selection, rowSliceQuery.getRowSlice().getRange());
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
return pStatement.bind(values.toArray());
}
};
public BoundStatement getQueryStatement(CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
switch (rowSliceQuery.getRowQueryType()) {
case RowKeys:
return getRowKeysQueryStatement(rowSliceQuery, useCaching);
case RowRange:
return getRowRangeQueryStatement(rowSliceQuery, useCaching);
default :
throw new RuntimeException("RowSliceQuery use case not supported.");
}
}
public BoundStatement getRowKeysQueryStatement(CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
switch (rowSliceQuery.getColQueryType()) {
case AllColumns:
return SelectAllColumnsForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
case ColumnSet:
return SelectColumnSetForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
case ColumnRange:
throw new RuntimeException("RowSliceQuery use case not supported.");
default :
throw new RuntimeException("RowSliceQuery use case not supported.");
}
}
public BoundStatement getRowRangeQueryStatement(CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
switch (rowSliceQuery.getColQueryType()) {
case AllColumns:
return SelectAllColumnsForRowRange.getBoundStatement(rowSliceQuery, useCaching);
case ColumnSet:
return SelectColumnSetForRowRange.getBoundStatement(rowSliceQuery, useCaching);
case ColumnRange:
throw new RuntimeException("RowSliceQuery use case not supported.");
default :
throw new RuntimeException("RowSliceQuery use case not supported.");
}
}
}
| 8,107 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlColumnQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.reads.model.CqlColumnImpl;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.query.ColumnQuery;
/**
* Impl for the {@link ColumnQuery} interface using the java driver.
* This class is responsible for selecting a single column for the specified row key.
*
* Note that this class acts like a placeholder for all the query context, but does not construct the query itself.
* For details on how the query is actually constructed see {@link CFColumnQueryGen}
*
* @author poberai
*
* @param <C>
*/
public class CqlColumnQueryImpl<C> implements ColumnQuery<C> {
private final KeyspaceContext ksContext;
private final CFQueryContext<?,C> cfContext;
private final Object rowKey;
private final C columnName;
private boolean useCaching = false;
private final CqlColumnFamilyDefinitionImpl cfDef;
CqlColumnQueryImpl(KeyspaceContext ksCtx, CFQueryContext<?,C> cfCtx, Object rowKey, C colName, boolean caching) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.rowKey = rowKey;
this.columnName = colName;
this.useCaching = caching;
ColumnFamily<?,?> cf = cfCtx.getColumnFamily();
cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
}
@Override
public OperationResult<Column<C>> execute() throws ConnectionException {
return new InternalColumnQueryExecutionImpl(this).execute();
}
@Override
public ListenableFuture<OperationResult<Column<C>>> executeAsync() throws ConnectionException {
return new InternalColumnQueryExecutionImpl(this).executeAsync();
}
private class InternalColumnQueryExecutionImpl extends CqlAbstractExecutionImpl<Column<C>> {
private final CqlColumnQueryImpl<?> columnQuery;
public InternalColumnQueryExecutionImpl(CqlColumnQueryImpl<?> query) {
super(ksContext, cfContext);
this.columnQuery = query;
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.GET_COLUMN;
}
@Override
public Statement getQuery() {
return cfDef.getRowQueryGenerator().getQueryStatement(columnQuery, useCaching);
}
@Override
public Column<C> parseResultSet(ResultSet rs) throws NotFoundException {
Row row = rs.one();
if (row == null) {
return null;
}
CqlColumnImpl<C> cqlCol = new CqlColumnImpl<C>((C) columnName, row, 0);
return cqlCol;
}
}
public Object getRowKey() {
return rowKey;
}
public C getColumnName() {
return columnName;
}
public ColumnFamily<?,C> getCF() {
return this.cfContext.getColumnFamily();
}
}
| 8,108 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlColumnCountQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.util.List;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.query.ColumnCountQuery;
import com.netflix.astyanax.query.RowQuery;
/**
* Impl for {@link ColumnCountQuery}
*
* Note that since this query essentially derives itself from the {@link RowQuery} interface, it also uses the statement
* constructed by the {@link CqlRowQueryImpl} class. The difference in functionality is in how the records form the result set
* are parsed. Here we look at the number of rows returned for the same row key.
*
* Note that since CQL3 can treat columns as rows
* (depending on the schema), we look for multiple rows with the same row keys. If there are multiple rows, then we count the number
* of rows for each unique row key. If there is just one row and the schema definition is like a flat table, then we just count the actual no of data columns returned
* in the result set.
*
* See {@link CqlRowQueryImpl} for more details on how the query is actually constructed
*
* @author poberai
*
*/
public class CqlColumnCountQueryImpl implements ColumnCountQuery {
private final KeyspaceContext ksContext;
private final CFQueryContext<?, ?> cfContext;
private final Statement query;
public CqlColumnCountQueryImpl(KeyspaceContext ksCtx, CFQueryContext<?,?> cfCtx, Statement query) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.query = query;
}
@Override
public OperationResult<Integer> execute() throws ConnectionException {
return new InternalColumnCountExecutionImpl(query).execute();
}
@Override
public ListenableFuture<OperationResult<Integer>> executeAsync() throws ConnectionException {
return new InternalColumnCountExecutionImpl(query).executeAsync();
}
private class InternalColumnCountExecutionImpl extends CqlAbstractExecutionImpl<Integer> {
public InternalColumnCountExecutionImpl(Statement query) {
super(ksContext, cfContext);
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.GET_COLUMN_COUNT;
}
@Override
public Statement getQuery() {
return query;
}
@Override
public Integer parseResultSet(ResultSet resultSet) {
List<Row> rows = resultSet.all();
if (rows != null) {
return rows.size();
} else {
return 0;
}
}
}
}
| 8,109 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlRowCopier.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.util.Iterator;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.RowCopier;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlKeyspaceImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.reads.model.CqlColumnImpl;
import com.netflix.astyanax.cql.writes.CqlColumnListMutationImpl;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.query.RowQuery;
/**
* Impl for {@link RowCopier}
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlRowCopier<K,C> implements RowCopier<K,C> {
private boolean useOriginalTimestamp = false;
private final RowQuery<K,C> rowQuery;
private final ColumnFamily<K,C> cf;
private final K rowKey;
private final KeyspaceContext ksContext;
public CqlRowCopier(ColumnFamily<K,C> cf, K rowKey, RowQuery<K,C> query, KeyspaceContext ksContext) {
this.cf = cf;
this.rowKey = rowKey;
this.rowQuery = query;
this.ksContext = ksContext;
}
@Override
public OperationResult<Void> execute() throws ConnectionException {
return getMutationBatch().execute();
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return getMutationBatch().executeAsync();
}
@Override
public RowCopier<K, C> withOriginalTimestamp(boolean useTimestamp) {
this.useOriginalTimestamp = useTimestamp;
return this;
}
private MutationBatch getMutationBatch() throws ConnectionException {
ColumnList<C> columnList = rowQuery.execute().getResult();
CqlKeyspaceImpl ksImpl = new CqlKeyspaceImpl(ksContext);
MutationBatch mBatch = ksImpl.prepareMutationBatch();
CqlColumnListMutationImpl<K,C> colListMutation = (CqlColumnListMutationImpl<K, C>)mBatch.withRow(cf, rowKey);
Iterator<Column<C>> iter = columnList.iterator();
boolean first = true;
while(iter.hasNext()) {
CqlColumnImpl<C> col = (CqlColumnImpl<C>) iter.next();
if (first && useOriginalTimestamp) {
colListMutation.setTimestamp(col.getTimestamp());
first = false;
}
colListMutation.putColumnWithGenericValue(col.getName(), col.getGenericValue(), null);
}
return mBatch;
}
}
| 8,110 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlAllRowsQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.ExceptionCallback;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.RowCallback;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.reads.model.CqlRangeBuilder;
import com.netflix.astyanax.cql.reads.model.CqlRangeImpl;
import com.netflix.astyanax.cql.reads.model.CqlRowListImpl;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.partitioner.Murmur3Partitioner;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.AllRowsQuery;
import com.netflix.astyanax.query.CheckpointManager;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.shallows.EmptyCheckpointManager;
/**
* Impl for {@link AllRowsQuery} that uses the java driver underneath.
* Note that it is easier and more intuitive to just use the AllRowsReader recipe instead.
* See https://github.com/Netflix/astyanax/wiki/AllRowsReader-All-rows-query for details on how to use the recipe.
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlAllRowsQueryImpl<K,C> implements AllRowsQuery<K,C> {
private static final Logger LOG = LoggerFactory.getLogger(CqlAllRowsQueryImpl.class);
private static final Partitioner DEFAULT_PARTITIONER = Murmur3Partitioner.get();
private final static int DEFAULT_PAGE_SIZE = 100;
private final Keyspace keyspace;
private final ColumnFamily<K, C> columnFamily;
private Integer rowLimit = DEFAULT_PAGE_SIZE;
private Integer concurrencyLevel; // Default to null will force ring describe
private ExecutorService executor;
private CheckpointManager checkpointManager = new EmptyCheckpointManager();
private RowCallback<K, C> rowCallback;
private boolean repeatLastToken;
private ColumnSlice<C> columnSlice;
private String startToken;
private String endToken;
private Boolean includeEmptyRows; // Default to null will discard tombstones
private List<Future<Boolean>> futures = Lists.newArrayList();
private AtomicBoolean cancelling = new AtomicBoolean(false);
private Partitioner partitioner = DEFAULT_PARTITIONER;
private ConsistencyLevel consistencyLevel;
private ExceptionCallback exceptionCallback;
private AtomicReference<Exception> error = new AtomicReference<Exception>();
public CqlAllRowsQueryImpl(Keyspace ks, ColumnFamily<K,C> cf) {
this.keyspace = ks;
this.columnFamily = cf;
}
@Override
public AllRowsQuery<K, C> setBlockSize(int blockSize) {
setRowLimit(blockSize);
return this;
}
@Override
public AllRowsQuery<K, C> setRowLimit(int rowLimit) {
this.rowLimit = rowLimit;
return this;
}
@Override
public AllRowsQuery<K, C> setExceptionCallback(ExceptionCallback cb) {
this.exceptionCallback = cb;
return this;
}
@Override
public AllRowsQuery<K, C> setCheckpointManager(CheckpointManager manager) {
this.checkpointManager = manager;
return this;
}
@Override
public AllRowsQuery<K, C> setRepeatLastToken(boolean condition) {
this.repeatLastToken = condition;
return this;
}
@Override
public AllRowsQuery<K, C> setIncludeEmptyRows(boolean flag) {
this.includeEmptyRows = flag;
return this;
}
@Override
public AllRowsQuery<K, C> withColumnSlice(C... columns) {
return withColumnSlice(Arrays.asList(columns));
}
@Override
public AllRowsQuery<K, C> withColumnSlice(Collection<C> columns) {
this.columnSlice = new CqlColumnSlice<C>(columns);
return this;
}
@Override
public AllRowsQuery<K, C> withColumnSlice(ColumnSlice<C> columns) {
this.columnSlice = new CqlColumnSlice<C>(columns);
return this;
}
@Override
public AllRowsQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) columnFamily.getColumnFamilyDefinition();
String pkColName = cfDef.getPartitionKeyColumnDefinitionList().get(1).getName();
this.columnSlice = new CqlColumnSlice<C>(new CqlRangeBuilder<C>()
.setColumn(pkColName)
.setStart(startColumn)
.setEnd(endColumn)
.setReversed(reversed)
.setLimit(count)
.build());
return this;
}
@Override
public AllRowsQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int limit) {
Serializer<C> colSerializer = columnFamily.getColumnSerializer();
C start = (startColumn != null && startColumn.capacity() > 0) ? colSerializer.fromByteBuffer(startColumn) : null;
C end = (endColumn != null && endColumn.capacity() > 0) ? colSerializer.fromByteBuffer(endColumn) : null;
return this.withColumnRange(start, end, reversed, limit);
}
@Override
public AllRowsQuery<K, C> withColumnRange(ByteBufferRange range) {
if (range instanceof CqlRangeImpl) {
this.columnSlice = new CqlColumnSlice<C>();
((CqlColumnSlice<C>) this.columnSlice).setCqlRange((CqlRangeImpl<C>) range);
return this;
} else {
return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit());
}
}
@Override
public AllRowsQuery<K, C> setConcurrencyLevel(int numberOfThreads) {
this.concurrencyLevel = numberOfThreads;
return this;
}
@Override
@Deprecated
public AllRowsQuery<K, C> setThreadCount(int numberOfThreads) {
this.concurrencyLevel = numberOfThreads;
return this;
}
@Override
public void executeWithCallback(RowCallback<K, C> callback) throws ConnectionException {
this.rowCallback = callback;
executeTasks();
}
@Override
public AllRowsQuery<K, C> forTokenRange(BigInteger start, BigInteger end) {
return forTokenRange(start.toString(), end.toString());
}
@Override
public AllRowsQuery<K, C> forTokenRange(String start, String end) {
this.startToken = start;
this.endToken = end;
return this;
}
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
final AtomicReference<ConnectionException> reference = new AtomicReference<ConnectionException>(null);
final List<Row<K,C>> list = Collections.synchronizedList(new LinkedList<Row<K,C>>());
RowCallback<K,C> rowCallback = new RowCallback<K,C>() {
@Override
public void success(Rows<K,C> rows) {
if (rows != null && !rows.isEmpty()) {
for (Row<K,C> row : rows) {
list.add(row);
}
}
}
@Override
public boolean failure(ConnectionException e) {
reference.set(e);
return false;
}
};
executeWithCallback(rowCallback);
if (reference.get() != null) {
throw reference.get();
}
CqlRowListImpl<K,C> allRows = new CqlRowListImpl<K,C>(list);
return new CqlOperationResultImpl<Rows<K,C>>(null, allRows);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
throw new UnsupportedOperationException();
}
private Boolean executeTasks() throws ConnectionException {
error.set(null);
List<Callable<Boolean>> subtasks = Lists.newArrayList();
// We are iterating the entire ring using an arbitrary number of threads
if (this.concurrencyLevel != null || startToken != null || endToken != null) {
List<TokenRange> tokens = partitioner.splitTokenRange(
startToken == null ? partitioner.getMinToken() : startToken,
endToken == null ? partitioner.getMinToken() : endToken,
this.concurrencyLevel == null ? 1 : this.concurrencyLevel);
for (TokenRange range : tokens) {
subtasks.add(makeTokenRangeTask(range.getStartToken(), range.getEndToken()));
}
}
// We are iterating through each token range
else {
List<TokenRange> ranges = keyspace.describeRing(null, null);
for (TokenRange range : ranges) {
if (range.getStartToken().equals(range.getEndToken())) {
subtasks.add(makeTokenRangeTask(range.getStartToken(), range.getEndToken()));
} else {
subtasks.add(makeTokenRangeTask(partitioner.getTokenMinusOne(range.getStartToken()), range.getEndToken()));
}
}
}
try {
// Use a local executor
if (executor == null) {
ExecutorService localExecutor = Executors
.newFixedThreadPool(subtasks.size(),
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("AstyanaxAllRowsQuery-%d")
.build());
try {
futures.addAll(startTasks(localExecutor, subtasks));
return waitForTasksToFinish();
}
finally {
localExecutor.shutdownNow();
}
}
// Use an externally provided executor
else {
futures.addAll(startTasks(executor, subtasks));
return waitForTasksToFinish();
}
}
catch (Exception e) {
error.compareAndSet(null, e);
LOG.warn("AllRowsReader terminated. " + e.getMessage(), e);
cancel();
throw new RuntimeException(error.get());
}
}
private Callable<Boolean> makeTokenRangeTask(final String startToken, final String endToken) {
return new Callable<Boolean>() {
@Override
public Boolean call() {
try {
String currentToken;
try {
currentToken = checkpointManager.getCheckpoint(startToken);
if (currentToken == null) {
currentToken = startToken;
}
else if (currentToken.equals(endToken)) {
return true;
}
} catch (Exception e) {
error.compareAndSet(null, e);
LOG.error("Failed to get checkpoint for startToken " + startToken, e);
cancel();
throw new RuntimeException("Failed to get checkpoint for startToken " + startToken, e);
}
int localPageSize = rowLimit;
int rowsToSkip = 0;
while (!cancelling.get()) {
RowSliceQuery<K, C> query = prepareQuery().getKeyRange(null, null, currentToken, endToken, -1);
if (columnSlice != null)
query.withColumnSlice(columnSlice);
Rows<K, C> rows = query.execute().getResult();
if (!rows.isEmpty()) {
try {
if (rowCallback != null) {
try {
rowCallback.success(rows);
} catch (Exception e) {
LOG.error("Failed to process rows", e);
cancel();
return false;
}
} else {
LOG.error("Row function is empty");
}
} catch (Exception e) {
error.compareAndSet(null, e);
LOG.warn(e.getMessage(), e);
cancel();
throw new RuntimeException("Error processing row", e);
}
// Get the next block
if (rows.size() == rowLimit) {
Row<K, C> lastRow = rows.getRowByIndex(rows.size() - 1);
String lastToken = partitioner.getTokenForKey(lastRow.getRawKey());
checkpointManager.trackCheckpoint(startToken, currentToken);
if (repeatLastToken) {
// Start token is non-inclusive
currentToken = partitioner.getTokenMinusOne(lastToken);
// Determine the number of rows to skip in the response. Since we are repeating the
// last token it's possible (although unlikely) that there is more than one key mapping to the
// token. We therefore count backwards the number of keys that have the same token and skip
// that number in the next iteration of the loop. If, for example, 3 keys matched but only 2 were
// returned in this iteration then the first 2 keys will be skipped from the next response.
rowsToSkip = 1;
for (int i = rows.size() - 2; i >= 0; i--, rowsToSkip++) {
if (!lastToken.equals(partitioner.getTokenForKey(rows.getRowByIndex(i).getRawKey()))) {
break;
}
}
if (rowsToSkip == localPageSize) {
localPageSize++;
}
} else {
currentToken = lastToken;
}
continue;
}
}
// We're done!
checkpointManager.trackCheckpoint(startToken, endToken);
return true;
}
cancel();
return false;
} catch (Exception e) {
error.compareAndSet(null, e);
LOG.error("Error process token/key range", e);
cancel();
throw new RuntimeException("Error process token/key range", e);
}
}
};
}
/**
* Submit all the callables to the executor by synchronize their execution so they all start
* AFTER the have all been submitted.
* @param executor
* @param callables
* @return
*/
private List<Future<Boolean>> startTasks(ExecutorService executor, List<Callable<Boolean>> callables) {
List<Future<Boolean>> tasks = Lists.newArrayList();
for (Callable<Boolean> callable : callables) {
tasks.add(executor.submit(callable));
}
return tasks;
}
/**
* Wait for all tasks to finish.
*
* @param futures
* @return true if all tasks returned true or false otherwise.
*/
private boolean waitForTasksToFinish() throws Exception {
for (Future<Boolean> future : futures) {
try {
if (!future.get()) {
cancel();
return false;
}
} catch (Exception e) {
error.compareAndSet(null, e);
cancel();
throw e;
}
}
return true;
}
private ColumnFamilyQuery<K, C> prepareQuery() {
ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily);
if (consistencyLevel != null)
query.setConsistencyLevel(consistencyLevel);
return query;
}
/**
* Cancel all pending range iteration tasks. This will cause all internal threads to exit and
* call() to return false.
*/
public synchronized void cancel() {
cancelling.compareAndSet(false, true);
}
}
| 8,111 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CFRowRangeQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.gte;
import static com.datastax.driver.core.querybuilder.QueryBuilder.in;
import static com.datastax.driver.core.querybuilder.QueryBuilder.lte;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.reads.model.CqlRowSlice.RowRange;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.CompositeByteBufferRange;
/**
* This class encapsulates all the query generators for row range queries. There are different row query
* generators depending on the specific query signature.
*
* e.g
* 1. Select all columns for all the rows in the row range
* 2. Select row ranges with column slice
* 3. Select row ranges with column range
* 4. Select row ranges using a composite range builder for composite column based schema
*
* Note that for simplicity and brevity, there is another class that handles similar operations for queries that
* specify a collection of row keys as opposed to a row range.
* See {@link CFRowKeysQueryGen} for that implementation. The current class is meant for row range queries only.
*
* Each of the query generators uses the {@link QueryGenCache} so that it can cache the {@link PreparedStatement} as well
* for future use by queries with the same signatures.
*
* But one must use this with care, since the subsequent query must have the exact signature, else binding values with
* the previously constructed prepared statement will break.
*
* Here is a simple example of a bad query that is not cacheable.
*
* Say that we want a simple query with a column range in it.
*
* ks.prepareQuery(myCF)
* .getRow("1")
* .withColumnSlice("colStart")
* .execute();
*
* In most cases this query lends itself to a CQL3 representation as follows
*
* SELECT * FROM ks.mfCF WHERE KEY = ? AND COLUMN1 > ?;
*
* Now say that we want to perform a successive query (with caching turned ON), but add to the column range query
*
* ks.prepareQuery(myCF)
* .getRow("1")
* .withColumnSlice("colStart", "colEnd")
* .execute();
*
* NOTE THE USE OF BOTH colStart AND colEnd <----- THIS IS A DIFFERENT QUERY SIGNATURE
* AND THE CQL QUERY WILL PROBABLY LOOK LIKE
*
* SELECT * FROM ks.mfCF WHERE KEY = ? AND COLUMN1 > ? AND COLUMN1 < ?; <----- NOTE THE EXTRA BIND MARKER AT THE END FOR THE colEnd
*
* If we re-use the previously cached prepared statement, then it will not work for the new query signature. The way out of this is to NOT
* use caching with different query signatures.
*
* @author poberai
*
*/
public class CFRowRangeQueryGen extends CFRowSliceQueryGen {
/**
* Constructor
*
* @param session
* @param keyspaceName
* @param cfDefinition
*/
public CFRowRangeQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
super(session, keyspaceName, cfDefinition);
}
/**
* Private helper for constructing the where clause for row ranges
* @param keyAlias
* @param select
* @param rowRange
* @return
*/
private Where addWhereClauseForRowRange(String keyAlias, Select select, RowRange<?> rowRange) {
Where where = null;
boolean keyIsPresent = false;
boolean tokenIsPresent = false;
if (rowRange.getStartKey() != null || rowRange.getEndKey() != null) {
keyIsPresent = true;
}
if (rowRange.getStartToken() != null || rowRange.getEndToken() != null) {
tokenIsPresent = true;
}
if (keyIsPresent && tokenIsPresent) {
throw new RuntimeException("Cannot provide both token and keys for range query");
}
if (keyIsPresent) {
if (rowRange.getStartKey() != null && rowRange.getEndKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER))
.and(lte(keyAlias, BIND_MARKER));
} else if (rowRange.getStartKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER));
} else if (rowRange.getEndKey() != null) {
where = select.where(lte(keyAlias, BIND_MARKER));
}
} else if (tokenIsPresent) {
String tokenOfKey ="token(" + keyAlias + ")";
if (rowRange.getStartToken() != null && rowRange.getEndToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER))
.and(lte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getStartToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getEndToken() != null) {
where = select.where(lte(tokenOfKey, BIND_MARKER));
}
} else {
where = select.where();
}
if (rowRange.getCount() > 0) {
// TODO: fix this
//where.limit(rowRange.getCount());
}
return where;
}
/**
* Private helper for constructing the bind values for the given row range. Note that the assumption here is that
* we have a previously constructed prepared statement that we can bind these values with.
*
* @param keyAlias
* @param select
* @param rowRange
* @return
*/
private void bindWhereClauseForRowRange(List<Object> values, RowRange<?> rowRange) {
boolean keyIsPresent = false;
boolean tokenIsPresent = false;
if (rowRange.getStartKey() != null || rowRange.getEndKey() != null) {
keyIsPresent = true;
}
if (rowRange.getStartToken() != null || rowRange.getEndToken() != null) {
tokenIsPresent = true;
}
if (keyIsPresent && tokenIsPresent) {
throw new RuntimeException("Cannot provide both token and keys for range query");
}
if (keyIsPresent) {
if (rowRange.getStartKey() != null) {
values.add(rowRange.getStartKey());
}
if (rowRange.getEndKey() != null) {
values.add(rowRange.getEndKey());
}
} else if (tokenIsPresent) {
BigInteger startTokenB = rowRange.getStartToken() != null ? new BigInteger(rowRange.getStartToken()) : null;
BigInteger endTokenB = rowRange.getEndToken() != null ? new BigInteger(rowRange.getEndToken()) : null;
Long startToken = startTokenB.longValue();
Long endToken = endTokenB.longValue();
if (startToken != null && endToken != null) {
if (startToken != null) {
values.add(startToken);
}
if (endToken != null) {
values.add(endToken);
}
}
if (rowRange.getCount() > 0) {
// TODO: fix this
//where.limit(rowRange.getCount());
}
return;
}
}
/**
* Query generator for selecting all columns for the specified row range.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e all columns)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectAllColumnsForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
return addWhereClauseForRowRange(partitionKeyCol, select, rowSliceQuery.getRowSlice().getRange());
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
return pStatement.bind(values.toArray(new Object[values.size()]));
}
};
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnSetForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
// THIS IS A QUERY WHERE THE COLUMN NAME IS DYNAMIC E.G TIME SERIES
RowRange<?> range = rowSliceQuery.getRowSlice().getRange();
Collection<?> cols = rowSliceQuery.getColumnSlice().getColumns();
Object[] columns = cols.toArray(new Object[cols.size()]);
Select select = selectAllColumnsFromKeyspaceAndCF();
if (columns != null && columns.length > 0) {
select.allowFiltering();
}
Where where = addWhereClauseForRowRange(partitionKeyCol, select, range);
where.and(in(clusteringKeyCols.get(0).getName(), columns));
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
values.addAll(rowSliceQuery.getColumnSlice().getColumns());
return pStatement.bind(values.toArray());
}
};
/**
* Query generator for selecting a specified column range with a specified row range.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e similar column range for the row range)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnRangeForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
CqlColumnSlice<?> columnSlice = rowSliceQuery.getColumnSlice();
if (columnSlice != null && columnSlice.isRangeQuery()) {
select.allowFiltering();
}
Where where = addWhereClauseForRowRange(partitionKeyCol, select, rowSliceQuery.getRowSlice().getRange());
where = addWhereClauseForColumnRange(where, columnSlice);
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
bindWhereClauseForColumnRange(values, rowSliceQuery.getColumnSlice());
return pStatement.bind(values.toArray());
}
};
/**
* Query generator for selecting a specified composite column range with a specified row range.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e similar composite column range for the row range)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectCompositeColumnRangeForRowRange = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
CompositeByteBufferRange compositeRange = rowSliceQuery.getCompositeRange();
if (compositeRange != null) {
select.allowFiltering();
}
Where where = addWhereClauseForRowRange(partitionKeyCol, select, rowSliceQuery.getRowSlice().getRange());
where = addWhereClauseForCompositeColumnRange(where, compositeRange);
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
bindWhereClauseForRowRange(values, rowSliceQuery.getRowSlice().getRange());
bindWhereClauseForCompositeColumnRange(values, rowSliceQuery.getCompositeRange());
return pStatement.bind(values.toArray());
}
};
/**
* Main method used to generate the query for the specified row slice query.
* Note that depending on the query signature, the caller may choose to enable/disable caching
*
* @param rowSliceQuery: The Astaynax query for which we need to generate a java driver query
* @param useCaching: boolean condition indicating whether we should use a previously cached prepared stmt or not.
* If false, then the cache is ignored and we generate the prepared stmt for this query
* If true, then the cached prepared stmt is used. If the cache has not been inited,
* then the prepared stmt is constructed for this query and subsequently cached
*
* @return BoundStatement: they statement for this Astyanax query
*/
public BoundStatement getQueryStatement(CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
switch (rowSliceQuery.getColQueryType()) {
case AllColumns:
return SelectAllColumnsForRowRange.getBoundStatement(rowSliceQuery, useCaching);
case ColumnSet:
return SelectColumnSetForRowRange.getBoundStatement(rowSliceQuery, useCaching);
case ColumnRange:
if (isCompositeColumn) {
return SelectCompositeColumnRangeForRowRange.getBoundStatement(rowSliceQuery, useCaching);
} else {
return SelectColumnRangeForRowRange.getBoundStatement(rowSliceQuery, useCaching);
}
default :
throw new RuntimeException("RowSliceQuery with row range use case not supported.");
}
}
}
| 8,112 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/DirectCqlResult.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.util.ArrayList;
import java.util.List;
import com.datastax.driver.core.ColumnDefinitions.Definition;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.model.Rows;
/**
* Impl for {@link CqlResult} that parses the {@link ResultSet} from java driver.
* Note that the class does not interpret the column family definition but instead simply
* parses each CQL column returned in the result set as an Astyanax column.
* Hence this class is applicable in instances where the table is considered to be a flat CQL table.
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class DirectCqlResult<K, C> implements CqlResult<K, C> {
private Long number = null;
private CqlRowListImpl<K, C> rows;
public DirectCqlResult(List<Row> rows, ColumnFamily<K,C> cf) {
List<com.netflix.astyanax.model.Row<K,C>> rowList = new ArrayList<com.netflix.astyanax.model.Row<K,C>>();
for (Row row : rows) {
rowList.add(getAstyanaxRow(row, cf));
}
this.rows = new CqlRowListImpl<K, C>(rowList);
}
public DirectCqlResult(Long number) {
this.number = number;
}
@Override
public Rows<K, C> getRows() {
return this.rows;
}
@Override
public int getNumber() {
return number.intValue();
}
@Override
public boolean hasRows() {
return rows != null && rows.size() > 0;
}
@Override
public boolean hasNumber() {
return this.number != null;
}
private com.netflix.astyanax.model.Row<K, C> getAstyanaxRow(Row row, ColumnFamily<K,C> cf) {
CqlRowImpl<K,C> rowImpl = new CqlRowImpl<K,C>(getAstyanaxRowKey(row, cf), getAstyanaxColumnList(row), cf);
return rowImpl;
}
private K getAstyanaxRowKey(Row row, ColumnFamily<K,C> cf) {
Serializer<K> keySerializer = cf.getKeySerializer();
return (K) CqlTypeMapping.getDynamicColumn(row, keySerializer, 0, cf);
}
private CqlColumnListImpl<C> getAstyanaxColumnList(Row row) {
List<CqlColumnImpl<C>> list = new ArrayList<CqlColumnImpl<C>>();
List<Definition> colDefs = row.getColumnDefinitions().asList();
int index = 0;
for (Definition colDef : colDefs) {
C columnName = (C) colDef.getName();
list.add(new CqlColumnImpl<C>(columnName, row, index, colDef));
index++;
}
return new CqlColumnListImpl<C>(list);
}
}
| 8,113 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRangeBuilder.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
/**
* Helpful class that tracks the state for building a {@link ColumnSlice} query using column range specification.
*
* @author poberai
*
* @param <T>
*/
public class CqlRangeBuilder<T> {
private T start = null;
private T end = null;
private int limit = -1;
private boolean reversed = false;
private int fetchSize = -1;
private String columnName = "column1";
public CqlRangeBuilder<T> withRange(CqlRangeImpl<T> oldRange) {
if (oldRange != null) {
this.start = oldRange.getCqlStart();
this.end = oldRange.getCqlEnd();
this.limit = oldRange.getLimit();
this.reversed = oldRange.isReversed();
this.fetchSize = oldRange.getFetchSize();
}
return this;
}
public CqlRangeBuilder<T> setLimit(int count) {
this.limit = count;
return this;
}
public int getLimit() {
return this.limit;
}
public CqlRangeBuilder<T> setReversed(boolean reversed) {
this.reversed = reversed;
return this;
}
public boolean getReversed() {
return this.reversed;
}
public CqlRangeBuilder<T> setStart(T value) {
start = value;
return this;
}
public T getStart() {
return this.start;
}
public CqlRangeBuilder<T> setEnd(T value) {
end = value;
return this;
}
public T getEnd() {
return this.end;
}
public CqlRangeBuilder<T> setColumn(String name) {
this.columnName = name;
return this;
}
public String getColumn() {
return this.columnName;
}
public CqlRangeBuilder<T> setFetchSize(int count) {
this.fetchSize = count;
return this;
}
public int getFetchSize() {
return this.fetchSize;
}
public CqlRangeImpl<T> build() {
return new CqlRangeImpl<T>(columnName, start, end, limit, reversed, fetchSize);
}
}
| 8,114 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRowListImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import com.datastax.driver.core.ResultSet;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
/**
* Impl for {@link Rows} that parses the {@link ResultSet} from java driver and translates back to Astyanax Rows.
* Note that if your schema has a clustering key, then each individual row from the result set is a unique column,
* and all result set rows with the same partition key map to a unique Astyanax row.
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlRowListImpl<K, C> implements Rows<K, C> {
private final List<Row<K,C>> rows;
private final Map<K, Row<K,C>> lookup;
private final ColumnFamily<K,C> cf;
private final CqlColumnFamilyDefinitionImpl cfDef;
public CqlRowListImpl() {
this.rows = new ArrayList<Row<K, C>>();
this.lookup = new HashMap<K, Row<K,C>>();
this.cf = null;
this.cfDef = null;
}
public CqlRowListImpl(List<Row<K,C>> newRows) {
this.rows = new ArrayList<Row<K, C>>();
this.rows.addAll(newRows);
this.lookup = new HashMap<K, Row<K,C>>();
for (Row<K,C> row : this.rows) {
this.lookup.put(row.getKey(), row);
}
this.cf = null;
this.cfDef = null;
}
public CqlRowListImpl(List<com.datastax.driver.core.Row> resultRows, ColumnFamily<K,C> cf) {
this.rows = new ArrayList<Row<K, C>>();
this.lookup = new HashMap<K, Row<K,C>>();
this.cf = cf;
this.cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
Serializer<?> keySerializer = cf.getKeySerializer();
K prevKey = null;
List<com.datastax.driver.core.Row> tempList = new ArrayList<com.datastax.driver.core.Row>();
for (com.datastax.driver.core.Row row : resultRows) {
K rowKey = (K) CqlTypeMapping.getDynamicColumn(row, keySerializer, 0, cf);
if (prevKey == null || prevKey.equals(rowKey)) {
tempList.add(row);
} else {
// we found a set of contiguous rows that match with the same row key
addToResultRows(tempList);
tempList = new ArrayList<com.datastax.driver.core.Row>();
tempList.add(row);
}
prevKey = rowKey;
}
// flush the final list
if (tempList.size() > 0) {
addToResultRows(tempList);
}
for (Row<K,C> row : rows) {
this.lookup.put(row.getKey(), row);
}
}
private void addToResultRows(List<com.datastax.driver.core.Row> rowList) {
if (cfDef.getClusteringKeyColumnDefinitionList().size() == 0 || cfDef.getRegularColumnDefinitionList().size() > 1) {
for (com.datastax.driver.core.Row row : rowList) {
this.rows.add(new CqlRowImpl<K, C>(row, cf));
}
} else {
this.rows.add(new CqlRowImpl<K, C>(rowList, cf));
}
}
@Override
public Iterator<Row<K, C>> iterator() {
return rows.iterator();
}
@Override
public Row<K, C> getRow(K key) {
return lookup.get(key);
}
@Override
public int size() {
return rows.size();
}
@Override
public boolean isEmpty() {
return rows.isEmpty();
}
@Override
public Row<K, C> getRowByIndex(int index) {
return rows.get(index);
}
@Override
public Collection<K> getKeys() {
return Lists.transform(rows, new Function<Row<K,C>, K>() {
@Override
public K apply(Row<K, C> row) {
return row.getKey();
}
});
}
}
| 8,115 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRowSlice.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.util.Collection;
import com.netflix.astyanax.query.RowSliceQuery;
/**
* Helper class that encapsulates a row slice for a {@link RowSliceQuery}
*
* Note that there are 2 essential components for a row slice
*
* 1. Collection of individual row keys
* 2. Row range specification.
*
* The class has data structures to represent both these components and also has helpful methods to identify
* the type of row slice query.
*
* @author poberai
*
* @param <K>
*/
public class CqlRowSlice<K> {
// Stuff needed for the direct query using the in() clause
private Collection<K> keys;
private RowRange<K> range = new RowRange<K>();
public static class RowRange<K> {
// Stuff needed for the row range query
private K startKey;
private K endKey;
private String startToken;
private String endToken;
int count;
public K getStartKey() {
return startKey;
}
public K getEndKey() {
return endKey;
}
public String getStartToken() {
return startToken;
}
public String getEndToken() {
return endToken;
}
public int getCount() {
return count;
}
}
public CqlRowSlice(Collection<K> keys) {
this.keys = keys;
}
public CqlRowSlice(K startKey, K endKey, String startToken, String endToken, int count) {
this.range.startKey = startKey;
this.range.endKey = endKey;
this.range.startToken = startToken;
this.range.endToken = endToken;
this.range.count = count;
}
public Collection<K> getKeys() {
return keys;
}
public RowRange<K> getRange() {
return range;
}
public boolean isCollectionQuery() {
return this.keys != null && keys.size() > 0;
}
public boolean isRangeQuery() {
return !isCollectionQuery() && range != null;
}
}
| 8,116 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRowListIterator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.ResultSet;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
/**
* Impl for {@link Rows} that parses the {@link ResultSet} from java driver and translates back to Astyanax Rows.
* Note that if your schema has a clustering key, then each individual row from the result set is a unique column,
* and all result set rows with the same partition key map to a unique Astyanax row.
*
* Note that this class leverages the cursor support from java driver and expects the user to use the iterator based
* approach when reading through results which contain multiple rows.
*
* Some users may want to read all the data instead of using an iterator approach. To handle this situation,
* the class maintains some state that indicates how the object is first accessed in order to avoid iterating twice
* over the same result set.
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlRowListIterator<K,C> implements Rows<K,C> {
private enum State {
UnSet, PreFetch, PrefetchDone, Iterator;
}
private final ResultSet resultSet;
private final ColumnFamily<K,C> cf;
private final Serializer<K> keySerializer;
private final boolean isClusteringKey;
private final AtomicReference<State> stateRef = new AtomicReference<State>(State.UnSet);
private final AtomicInteger iterRowCount = new AtomicInteger(0);
private final AtomicReference<Iterator<Row<K,C>>> iterRef = new AtomicReference<Iterator<Row<K,C>>>(null);
private final List<Row<K,C>> rows = new ArrayList<Row<K,C>>();
private final Map<K, Row<K,C>> lookup = new HashMap<K, Row<K,C>>();
public CqlRowListIterator(ResultSet rs, ColumnFamily<K,C> cf) {
this.resultSet = rs;
this.cf = cf;
this.keySerializer = cf.getKeySerializer();
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
this.isClusteringKey = cfDef.getClusteringKeyColumnDefinitionList().size() > 0;
}
@Override
public Iterator<Row<K, C>> iterator() {
if (iterRef.get() != null) {
return iterRef.get();
//throw new RuntimeException("Cannot re-iterate over rows while already iterating");
}
if (stateRef.get() == State.UnSet) {
stateRef.set(State.Iterator);
}
Iterator<Row<K,C>> rowIter = new Iterator<Row<K,C>>() {
private final Iterator<com.datastax.driver.core.Row> rsIter = resultSet.iterator();
private List<com.datastax.driver.core.Row> currentList = new ArrayList<com.datastax.driver.core.Row>();
private K currentRowKey = null;
@Override
public boolean hasNext() {
if (!isClusteringKey) {
return rsIter.hasNext();
} else {
return rsIter.hasNext() || !currentList.isEmpty();
}
}
@Override
public Row<K, C> next() {
// if (!hasNext()) {
// throw new IllegalStateException();
// }
if (isClusteringKey) {
// Keep reading rows till we find a new rowKey, and then return the prefecthed list as a single row
while (rsIter.hasNext()) {
com.datastax.driver.core.Row rsRow = rsIter.next();
K rowKey = (K) CqlTypeMapping.getDynamicColumn(rsRow, keySerializer, 0, cf);
if (currentRowKey == null || rowKey.equals(currentRowKey)) {
currentList.add(rsRow);
currentRowKey = rowKey;
} else {
// Ok, we have read all columns of a single row. Return the current fully formed row
List<com.datastax.driver.core.Row> newList = new ArrayList<com.datastax.driver.core.Row>();
newList.addAll(currentList);
// reset the currentList and start with the new rowkey
currentList = new ArrayList<com.datastax.driver.core.Row>();
currentList.add(rsRow);
currentRowKey = rowKey;
iterRowCount.incrementAndGet();
return new CqlRowImpl<K,C>(newList, cf);
}
}
// In case we got here, then we have exhausted the rsIter and can just return the last row
List<com.datastax.driver.core.Row> newList = new ArrayList<com.datastax.driver.core.Row>();
newList.addAll(currentList);
// reset the currentList and start with the new rowkey
currentList = new ArrayList<com.datastax.driver.core.Row>();
iterRowCount.incrementAndGet();
return new CqlRowImpl<K,C>(newList, cf);
} else {
// Here each cql row corresponds to a single Astyanax row
if (rsIter.hasNext()) {
com.datastax.driver.core.Row rsRow = rsIter.next();
return new CqlRowImpl<K,C>(rsRow, cf);
} else {
return null; // this should not happen if this is all accessed via the iterator
}
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
iterRef.set(rowIter);
return iterRef.get();
}
@Override
public Collection<K> getKeys() {
consumeAllRows();
return lookup.keySet();
}
@Override
public Row<K, C> getRow(K key) {
consumeAllRows();
return lookup.get(key);
}
@Override
public Row<K, C> getRowByIndex(int i) {
consumeAllRows();
return rows.get(i);
}
@Override
public int size() {
if (stateRef.get() == State.Iterator) {
return this.iterRowCount.get();
} else {
consumeAllRows();
return rows.size();
}
}
@Override
public boolean isEmpty() {
if (stateRef.get() == State.UnSet) {
this.iterator(); // init the iterator
}
if (stateRef.get() == State.Iterator) {
return !this.iterRef.get().hasNext();
} else {
consumeAllRows();
return rows.size() == 0;
}
}
private void consumeAllRows() {
if (this.stateRef.get() == State.PrefetchDone) {
return;
}
if (this.stateRef.get() == State.Iterator) {
throw new RuntimeException("Cannot pre-fetch rows while iterating over rows");
}
this.stateRef.set(State.PreFetch);
// Ok, we made it this far, we can now prefetch
Iterator<Row<K,C>> rowIter = this.iterator();
while (rowIter.hasNext()) {
Row<K,C> row = rowIter.next();
this.rows.add(row);
this.lookup.put(row.getKey(), row);
}
this.iterRef.set(rows.iterator());
stateRef.set(State.PrefetchDone);
}
}
| 8,117 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlColumnListImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.UUID;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
/**
* Class that implements the {@link ColumnList} interface. Note that this class handles the case where the table schema
* could contain a clustering key or just regular columns for a flat table.
*
* In the case of a flat table, each row has a unique set of columns. In the case of a clustering key, each row is a unique column.
* There are 2 separate constructors to this class in order to handle each of these cases.
*
* @author poberai
*
* @param <C>
*/
@SuppressWarnings("unchecked")
public class CqlColumnListImpl<C> implements ColumnList<C> {
private List<Column<C>> columnList = new ArrayList<Column<C>>();
private LinkedHashMap<C, Column<C>> map = new LinkedHashMap<C, Column<C>>();
public CqlColumnListImpl() {
}
/**
* This constructor is meant to be called when we have a table with standard columns i.e no composites, just plain columns
* @param row
*/
public CqlColumnListImpl(Row row, ColumnFamily<?,?> cf) {
ColumnDefinitions cfDefinitions = row.getColumnDefinitions();
int index = 1; // skip the key column
while (index < cfDefinitions.size()) {
String columnName = cfDefinitions.getName(index);
CqlColumnImpl<C> cqlCol = new CqlColumnImpl<C>((C) columnName, row, index);
columnList.add(cqlCol);
map.put((C) columnName, cqlCol);
index+=3; // skip past the ttl and the timestamp
}
}
/**
* This constructor is meant to be used when we are using the CQL3 table but still in the legacy thrift mode
* @param rows
*/
public CqlColumnListImpl(List<Row> rows, ColumnFamily<?, ?> cf) {
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
int columnNameIndex = cfDef.getPartitionKeyColumnDefinitionList().size();
for (Row row : rows) {
Object columnName = CqlTypeMapping.getDynamicColumn(row, cf.getColumnSerializer(), columnNameIndex, cf);
int valueIndex = cfDef.getPartitionKeyColumnDefinitionList().size() + cfDef.getClusteringKeyColumnDefinitionList().size();
CqlColumnImpl<C> cqlCol = new CqlColumnImpl<C>((C) columnName, row, valueIndex);
columnList.add(cqlCol);
map.put((C) columnName, cqlCol);
}
}
public CqlColumnListImpl(List<CqlColumnImpl<C>> newColumnList) {
this.columnList.clear();
for (Column<C> column : newColumnList) {
columnList.add(column);
map.put(column.getName(), column);
}
}
public void trimFirstColumn() {
if (columnList.size() == 0) {
return;
}
Column<C> firstCol = this.columnList.remove(0);
map.remove(firstCol.getName());
}
@Override
public Iterator<Column<C>> iterator() {
return columnList.iterator();
}
@Override
public Collection<C> getColumnNames() {
return map.keySet();
}
@Override
public Column<C> getColumnByName(C columnName) {
return map.get(columnName);
}
@Override
public String getStringValue(C columnName, String defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getStringValue();
}
}
@Override
public String getCompressedStringValue(C columnName, String defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getCompressedStringValue();
}
}
@Override
public Integer getIntegerValue(C columnName, Integer defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getIntegerValue();
}
}
@Override
public Double getDoubleValue(C columnName, Double defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getDoubleValue();
}
}
@Override
public Long getLongValue(C columnName, Long defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getLongValue();
}
}
@Override
public byte[] getByteArrayValue(C columnName, byte[] defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getByteArrayValue();
}
}
@Override
public Boolean getBooleanValue(C columnName, Boolean defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getBooleanValue();
}
}
@Override
public ByteBuffer getByteBufferValue(C columnName, ByteBuffer defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getByteBufferValue();
}
}
@Override
public <T> T getValue(C columnName, Serializer<T> serializer, T defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getValue(serializer);
}
}
@Override
public Date getDateValue(C columnName, Date defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getDateValue();
}
}
@Override
public UUID getUUIDValue(C columnName, UUID defaultValue) {
Column<C> column = map.get(columnName);
if (column == null) {
return defaultValue;
} else {
return column.getUUIDValue();
}
}
@Override
public Column<C> getColumnByIndex(int idx) {
return columnList.get(idx);
}
@Override
public <C2> Column<C2> getSuperColumn(C columnName, Serializer<C2> colSer) {
throw new UnsupportedOperationException("Operaiton not supported");
}
@Override
public <C2> Column<C2> getSuperColumn(int idx, Serializer<C2> colSer) {
throw new UnsupportedOperationException("Operaiton not supported");
}
@Override
public boolean isEmpty() {
return columnList.size() == 0;
}
@Override
public int size() {
return columnList.size();
}
@Override
public boolean isSuperColumn() {
throw new UnsupportedOperationException("Operaiton not supported");
}
}
| 8,118 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlColumnImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import com.datastax.driver.core.ColumnDefinitions.Definition;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ComparatorType;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
/**
* Class that implements the {@link Column} interface.
*
* Note that since columns can be rows in CQL3, this class needs access to the java driver {@link Row}
* within the java driver {@link ResultSet}
*
* The index provided within the row indicates where to start parsing the Column data.
* Also this class handles reading the TTL and Timestamp on the Column as well.
*
* @author poberai
*
* @param <C>
*/
public class CqlColumnImpl<C> implements Column<C> {
private Row row;
private C columnName;
private int index;
private ComparatorType cType;
private boolean isBlob = false;
public CqlColumnImpl() {
}
public CqlColumnImpl(C colName, Row row, int index) {
this.columnName = colName;
this.row = row;
this.index = index;
Definition colDefinition = row.getColumnDefinitions().asList().get(index);
isBlob = colDefinition.getType() == DataType.blob();
}
public CqlColumnImpl(C colName, Row row, int index, Definition colDefinition) {
this.columnName = colName;
this.row = row;
this.index = index;
isBlob = colDefinition.getType() == DataType.blob();
}
@Override
public C getName() {
return columnName;
}
@Override
public ByteBuffer getRawName() {
return StringSerializer.get().toByteBuffer(String.valueOf(columnName));
}
@Override
public long getTimestamp() {
return row.getLong(index+2);
}
@Override
public <V> V getValue(Serializer<V> valSer) {
return valSer.fromByteBuffer(row.getBytes(index));
}
@Override
public String getStringValue() {
return (isBlob) ? StringSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getString(index);
}
@Override
public String getCompressedStringValue() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public byte getByteValue() {
return row.getBytes(index).get();
}
@Override
public short getShortValue() {
Integer i = (isBlob) ? ShortSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getInt(index);
return i.shortValue();
}
@Override
public int getIntegerValue() {
return (isBlob) ? IntegerSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getInt(index);
}
@Override
public float getFloatValue() {
return (isBlob) ? FloatSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getFloat(index);
}
@Override
public double getDoubleValue() {
return (isBlob) ? DoubleSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getDouble(index);
}
@Override
public long getLongValue() {
return (isBlob) ? LongSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getLong(index);
}
@Override
public byte[] getByteArrayValue() {
return row.getBytes(index).array();
}
@Override
public boolean getBooleanValue() {
return (isBlob) ? BooleanSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getBool(index);
}
@Override
public ByteBuffer getByteBufferValue() {
return row.getBytes(index);
}
/**
* @return {@link Date} from {@link com.datastax.driver.core.GettableByIndexData#getTimestamp(int)} for backwards-
* compatibility because this {@link #getTimestamp()} returns column timestamp, not value of a Date-based column.
*/
@Override
public Date getDateValue() {
return (isBlob) ? DateSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getTimestamp(index);
}
@Override
public UUID getUUIDValue() {
return (isBlob) ? UUIDSerializer.get().fromByteBuffer(row.getBytes(index)) : row.getUUID(index);
}
@Override
@Deprecated
public <C2> ColumnList<C2> getSubColumns(Serializer<C2> ser) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public boolean isParentColumn() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public int getTtl() {
return row.getInt(index+1);
}
@Override
public boolean hasValue() {
return (row != null) && !(row.isNull(index));
}
public Object getGenericValue() {
ComparatorType cType = getComparatorType();
return CqlTypeMapping.getDynamicColumn(row, cType.getSerializer(), index, null);
}
public ComparatorType getComparatorType() {
if (cType != null) {
return cType;
}
// Lazy init
DataType type = row.getColumnDefinitions().getType(index);
if (type.isCollection()) {
throw new RuntimeException("This operation does not work for collection objects");
}
String typeString = (type.getName().name()).toUpperCase();
cType = CqlTypeMapping.getComparatorFromCqlType(typeString);
return cType;
}
}
| 8,119 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlColumnSlice.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.util.Collection;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.RowQuery;
/**
* Impl for {@link ColumnSlice}.
*
* See {@link RowQuery} for where ColumnSlice can be used. There are essentially 2 components to a ColumnSLice
* 1. Collection of Columns.
* 2. Column range specification
*
* This class encapsulates data structures for both types of ColumnSlice(s). It also maintains state that helps identify
* the type of query being performed.
*
* @author poberai
*
* @param <C>
*/
public class CqlColumnSlice<C> extends ColumnSlice<C> {
private CqlRangeImpl<C> cqlRange;
private Collection<C> cqlColumns;
public CqlColumnSlice() {
super(null, null);
}
public CqlColumnSlice(C startColumn, C endColumn) {
super(null, null);
}
public CqlColumnSlice(CqlRangeImpl<C> cqlRange) {
super(null, null);
this.cqlRange = cqlRange;
}
public CqlColumnSlice(Collection<C> columns) {
super(null, null);
this.cqlColumns = columns;
}
public void setColumns(Collection<C> columns) {
this.cqlColumns = columns;
}
public void setCqlRange(CqlRangeImpl<C> cqlRange) {
this.cqlRange = cqlRange;
}
public CqlColumnSlice(ColumnSlice<C> columnSlice) {
super(null, null);
if (columnSlice instanceof CqlColumnSlice<?>) {
initFrom(((CqlColumnSlice<C>)columnSlice));
} else {
if (columnSlice.getColumns() != null) {
this.cqlColumns = columnSlice.getColumns();
this.cqlRange = null;
} else {
// this is where the consumer is using the old style range query using the same code i.e no column name specified.
// in this case we must assume the columnName = 'column1' which is the default chosen by CQL3
this.cqlColumns = null;
this.cqlRange = new CqlRangeBuilder<C>()
.setColumn("column1")
.setStart(columnSlice.getStartColumn())
.setEnd(columnSlice.getEndColumn())
.setReversed(columnSlice.getReversed())
.setLimit(columnSlice.getLimit())
.build();
}
}
}
public CqlColumnSlice(CqlColumnSlice<C> cqlColumnSlice) {
super(null, null);
initFrom(cqlColumnSlice);
}
private void initFrom(CqlColumnSlice<C> cqlColumnSlice) {
this.cqlColumns = (Collection<C>) cqlColumnSlice.cqlColumns;
this.cqlRange = cqlColumnSlice.cqlRange;
}
@Override
public ColumnSlice<C> setLimit(int limit) {
this.cqlRange = new CqlRangeBuilder<C>().withRange(cqlRange).setLimit(limit).build();
return this;
}
@Override
public ColumnSlice<C> setReversed(boolean value) {
this.cqlRange = new CqlRangeBuilder<C>().withRange(cqlRange).setReversed(value).build();
return this;
}
public String getColumnName() {
return cqlRange.getColumnName();
}
@Override
public Collection<C> getColumns() {
return cqlColumns;
}
@Override
public C getStartColumn() {
return (cqlRange != null) ? (C) cqlRange.getCqlStart() : null;
}
@Override
public C getEndColumn() {
return (cqlRange != null) ? (C) cqlRange.getCqlEnd() : null;
}
@Override
public boolean getReversed() {
return (cqlRange != null ) ? cqlRange.isReversed() : false;
}
@Override
public int getLimit() {
return (cqlRange != null ) ? cqlRange.getLimit() : -1;
}
public int getFetchSize() {
return (cqlRange != null ) ? cqlRange.getFetchSize() : -1;
}
public boolean isColumnSelectQuery() {
return (this.cqlColumns != null);
}
public boolean isRangeQuery() {
if (isColumnSelectQuery()) {
return false;
}
if (cqlRange != null) {
return true;
}
return false;
}
public boolean isSelectAllQuery() {
return (!isColumnSelectQuery() && !isRangeQuery());
}
public static enum QueryType {
SELECT_ALL, COLUMN_COLLECTION, COLUMN_RANGE;
}
public QueryType getQueryType() {
if (isSelectAllQuery()) {
return QueryType.SELECT_ALL;
} else if (isRangeQuery()) {
return QueryType.COLUMN_RANGE;
} else {
return QueryType.COLUMN_COLLECTION;
}
}
}
| 8,120 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRangeImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.nio.ByteBuffer;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.RowQuery;
/**
* Impl for {@link ByteBufferRange} that tracks the individual components of a {@link ColumnSlice} when using a column range
* specification.
*
* Users of such queries (columns slices with column ranges) can use this class when performing using the {@link RowQuery}
*
* @author poberai
*
* @param <T>
*/
public class CqlRangeImpl<T> implements ByteBufferRange {
private final String columnName;
private final T start;
private final T end;
private final int limit;
private final boolean reversed;
private int fetchSize = -1;
public CqlRangeImpl(String columnName, T start, T end, int limit, boolean reversed, int fetchSize) {
this.columnName = columnName;
this.start = start;
this.end = end;
this.limit = limit;
this.reversed = reversed;
this.fetchSize = fetchSize;
}
@Override
public ByteBuffer getStart() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ByteBuffer getEnd() {
throw new UnsupportedOperationException("Operation not supported");
}
public String getColumnName() {
return columnName;
}
public T getCqlStart() {
return start;
}
public T getCqlEnd() {
return end;
}
@Override
public boolean isReversed() {
return reversed;
}
@Override
public int getLimit() {
return limit;
}
public int getFetchSize() {
return fetchSize;
}
public void setFetchSize(int size) {
fetchSize = size;
}
} | 8,121 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/model/CqlRowImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads.model;
import java.nio.ByteBuffer;
import java.util.List;
import com.datastax.driver.core.ResultSet;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Row;
/**
* Impl for {@link Row} that parses the {@link ResultSet} from java driver and translates back to Astyanax Row.
* Note that if your schema has a clustering key, then each individual row from the result set is a unique column,
* and all result set rows with the same partition key map to a unique Astyanax row.
*
* @author poberai
*
* @param <K>
* @param <C>
*/
@SuppressWarnings("unchecked")
public class CqlRowImpl<K, C> implements Row<K, C> {
private final K rowKey;
private final CqlColumnListImpl<C> cqlColumnList;
private final ColumnFamily<K, C> cf;
public CqlRowImpl(com.datastax.driver.core.Row resultRow, ColumnFamily<K, C> cf) {
this.rowKey = (K) getRowKey(resultRow, cf);
this.cqlColumnList = new CqlColumnListImpl<C>(resultRow, cf);
this.cf = cf;
}
public CqlRowImpl(List<com.datastax.driver.core.Row> rows, ColumnFamily<K, C> cf) {
this.rowKey = (K) getRowKey(rows.get(0), cf);
this.cqlColumnList = new CqlColumnListImpl<C>(rows, cf);
this.cf = cf;
}
public CqlRowImpl(K rKey, CqlColumnListImpl<C> colList, ColumnFamily<K, C> columnFamily) {
this.rowKey = rKey;
this.cqlColumnList = colList;
this.cf = columnFamily;
}
@Override
public K getKey() {
return rowKey;
}
@Override
public ByteBuffer getRawKey() {
return cf.getKeySerializer().toByteBuffer(rowKey);
}
@Override
public ColumnList<C> getColumns() {
return cqlColumnList;
}
private Object getRowKey(com.datastax.driver.core.Row row, ColumnFamily<K, C> cf) {
return CqlTypeMapping.getDynamicColumn(row, cf.getKeySerializer(), 0, cf);
}
}
| 8,122 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/retrypolicies/JavaDriverBasedRetryPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.retrypolicies;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Abstract base for all {@link RetryPolicy} implementation that want to use the retry policy from java driver.
* @author poberai
*
*/
public abstract class JavaDriverBasedRetryPolicy implements RetryPolicy {
@Override
public void begin() {
}
@Override
public void success() {
}
@Override
public void failure(Exception e) {
}
@Override
public boolean allowRetry() {
return false;
}
@Override
public int getAttemptCount() {
return 0;
}
@Override
public RetryPolicy duplicate() {
return null;
}
public abstract com.datastax.driver.core.policies.RetryPolicy getJDRetryPolicy();
}
| 8,123 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/retrypolicies/ChangeConsistencyLevelRetryPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.retrypolicies;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.WriteType;
import com.datastax.driver.core.exceptions.DriverException;
import com.datastax.driver.core.policies.RetryPolicy.RetryDecision;
import com.netflix.astyanax.cql.ConsistencyLevelMapping;
/**
* Class that encapsulates a RetryPolicy that can be used in a configurable way when doing reties.
* Users can choose whether to retry on just reads / writes / unavailable exceptions or just all operations.
* Users can also decide to change the consistency level on different reties.
*
* @author poberai
*
*/
public class ChangeConsistencyLevelRetryPolicy extends JavaDriverBasedRetryPolicy {
// Policies for specific conditions
private boolean retryOnAllConditions = true;
private boolean retryOnReads = false;
private boolean retryOnWrites = false;
private boolean retryOnUnavailable = false;
// The retry count
private int retryCount = 0;
// the next consistency level to use.
private ConsistencyLevel nextConsistencyLevel;
// throw when giving up or ignore failures
private boolean suppressFinalFailure = false;
public ChangeConsistencyLevelRetryPolicy() {
}
public ChangeConsistencyLevelRetryPolicy retryOnAllConditions(boolean condition) {
retryOnAllConditions = condition;
return this;
}
public ChangeConsistencyLevelRetryPolicy retryOnReadTimeouts(boolean condition) {
retryOnReads = condition;
retryOnAllConditions = false;
return this;
}
public ChangeConsistencyLevelRetryPolicy retryOnWriteTimeouts(boolean condition) {
retryOnWrites = condition;
retryOnAllConditions = false;
return this;
}
public ChangeConsistencyLevelRetryPolicy retryOnUnavailable(boolean condition) {
retryOnUnavailable = condition;
retryOnAllConditions = false;
return this;
}
public ChangeConsistencyLevelRetryPolicy withNumRetries(int retries) {
retryCount = retries;
return this;
}
public ChangeConsistencyLevelRetryPolicy withNextConsistencyLevel(com.netflix.astyanax.model.ConsistencyLevel cl) {
nextConsistencyLevel = ConsistencyLevelMapping.getCL(cl);
return this;
}
public ChangeConsistencyLevelRetryPolicy suppressFinalFailure(boolean condition) {
suppressFinalFailure = condition;
return this;
}
private com.datastax.driver.core.policies.RetryPolicy jdRetry = new com.datastax.driver.core.policies.RetryPolicy() {
@Override
public RetryDecision onReadTimeout(Statement query, ConsistencyLevel cl,
int requiredResponses, int receivedResponses,
boolean dataRetrieved, int nbRetry) {
boolean shouldRetry = retryOnAllConditions || retryOnReads;
return checkRetry(query, cl, shouldRetry);
}
@Override
public RetryDecision onWriteTimeout(Statement query, ConsistencyLevel cl,
WriteType writeType, int requiredAcks, int receivedAcks,
int nbRetry) {
boolean shouldRetry = retryOnAllConditions || retryOnWrites;
return checkRetry(query, cl, shouldRetry);
}
@Override
public RetryDecision onUnavailable(Statement query, ConsistencyLevel cl,
int requiredReplica, int aliveReplica, int nbRetry) {
boolean shouldRetry = retryOnAllConditions || retryOnUnavailable;
return checkRetry(query, cl, shouldRetry);
}
@Override
public RetryDecision onRequestError(Statement query, ConsistencyLevel cl, DriverException e, int nbRetry) {
boolean shouldRetry = retryOnAllConditions || retryOnUnavailable;
return checkRetry(query, cl, shouldRetry);
}
@Override
public void init(Cluster cluster) {
// Do nothing
}
@Override
public void close() {
// Do nothing
}
};
@Override
public com.datastax.driver.core.policies.RetryPolicy getJDRetryPolicy() {
return jdRetry;
}
private RetryDecision checkRetry(Statement query, ConsistencyLevel cl, boolean shouldRetry) {
if (!shouldRetry || retryCount <= 0) {
// We are out of retries.
if (suppressFinalFailure) {
return RetryDecision.ignore();
} else {
return RetryDecision.rethrow();
}
}
// Ok we should retry and have some tries left.
retryCount--; // Note this retry
// Check if the consistency level needs to be changed
if (nextConsistencyLevel != null) {
return RetryDecision.retry(nextConsistencyLevel);
} else {
return RetryDecision.retry(cl);
}
}
}
| 8,124 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/FieldMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import javax.persistence.Column;
import javax.persistence.OrderBy;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.serializers.PrefixedSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
/**
* Mapper from a field to a ByteBuffer
* @author elandau
*
* @param <T>
*/
public class FieldMapper<T> {
final Serializer<T> serializer;
final Field field;
final String name;
final boolean reversed;
enum Order {
ASC,
DESC,
}
public FieldMapper(final Field field) {
this(field, null);
}
public FieldMapper(final Field field, ByteBuffer prefix) {
if (prefix != null) {
this.serializer = new PrefixedSerializer<ByteBuffer, T>(prefix, ByteBufferSerializer.get(), (Serializer<T>) MappingUtils.getSerializerForField(field));
}
else {
this.serializer = (Serializer<T>) MappingUtils.getSerializerForField(field);
}
this.field = field;
Column columnAnnotation = field.getAnnotation(Column.class);
if (columnAnnotation == null || columnAnnotation.name().isEmpty()) {
name = field.getName();
}
else {
name = columnAnnotation.name();
}
OrderBy orderByAnnotation = field.getAnnotation(OrderBy.class);
if (orderByAnnotation == null) {
reversed = false;
}
else {
Order order = Order.valueOf(orderByAnnotation.value());
reversed = (order == Order.DESC);
}
}
public Serializer<?> getSerializer() {
return serializer;
}
public ByteBuffer toByteBuffer(Object entity) throws IllegalArgumentException, IllegalAccessException {
return serializer.toByteBuffer(getValue(entity));
}
public T fromByteBuffer(ByteBuffer buffer) {
return serializer.fromByteBuffer(buffer);
}
public T getValue(Object entity) throws IllegalArgumentException, IllegalAccessException {
return (T)field.get(entity);
}
public ByteBuffer valueToByteBuffer(Object value) {
return serializer.toByteBuffer((T)value);
}
public void setValue(Object entity, Object value) throws IllegalArgumentException, IllegalAccessException {
field.set(entity, value);
}
public void setField(Object entity, ByteBuffer buffer) throws IllegalArgumentException, IllegalAccessException {
field.set(entity, fromByteBuffer(buffer));
}
public boolean isAscending() {
return reversed == false;
}
public boolean isDescending() {
return reversed == true;
}
public String getName() {
return name;
}
}
| 8,125 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/CompositeColumnEntityMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.PersistenceException;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Equality;
import com.netflix.astyanax.query.ColumnPredicate;
/**
* Mapper from a CompositeType to an embedded entity. The composite entity is expected
* to have an @Id annotation for each composite component and a @Column annotation for
* the value.
*
* @author elandau
*
*/
public class CompositeColumnEntityMapper {
/**
* Class of embedded entity
*/
private final Class<?> clazz;
/**
* List of serializers for the composite parts
*/
private List<FieldMapper<?>> components = Lists.newArrayList();
/**
* List of valid (i.e. existing) column names
*/
private Set<String> validNames = Sets.newHashSet();
/**
* Mapper for the value part of the entity
*/
private FieldMapper<?> valueMapper;
/**
* Largest buffer size
*/
private int bufferSize = 64;
/**
* Parent field
*/
private final Field containerField;
public CompositeColumnEntityMapper(Field field) {
ParameterizedType containerEntityType = (ParameterizedType) field.getGenericType();
this.clazz = (Class<?>) containerEntityType.getActualTypeArguments()[0];
this.containerField = field;
this.containerField.setAccessible(true);
Field[] declaredFields = clazz.getDeclaredFields();
for (Field f : declaredFields) {
// The value
Column columnAnnotation = f.getAnnotation(Column.class);
if ((columnAnnotation != null)) {
f.setAccessible(true);
FieldMapper fieldMapper = new FieldMapper(f);
components.add(fieldMapper);
validNames.add(fieldMapper.getName());
}
}
// Last one is always treated as the 'value'
valueMapper = components.remove(components.size() - 1);
}
/**
* Iterate through the list and create a column for each element
* @param clm
* @param entity
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
public void fillMutationBatch(ColumnListMutation<ByteBuffer> clm, Object entity) throws IllegalArgumentException, IllegalAccessException {
List<?> list = (List<?>) containerField.get(entity);
if (list != null) {
for (Object element : list) {
fillColumnMutation(clm, element);
}
}
}
public void fillMutationBatchForDelete(ColumnListMutation<ByteBuffer> clm, Object entity) throws IllegalArgumentException, IllegalAccessException {
List<?> list = (List<?>) containerField.get(entity);
if (list == null) {
clm.delete();
}
else {
for (Object element : list) {
clm.deleteColumn(toColumnName(element));
}
}
}
/**
* Add a column based on the provided entity
*
* @param clm
* @param entity
*/
public void fillColumnMutation(ColumnListMutation<ByteBuffer> clm, Object entity) {
try {
ByteBuffer columnName = toColumnName(entity);
ByteBuffer value = valueMapper.toByteBuffer(entity);
clm.putColumn(columnName, value);
} catch(Exception e) {
throw new PersistenceException("failed to fill mutation batch", e);
}
}
/**
* Return the column name byte buffer for this entity
*
* @param obj
* @return
*/
public ByteBuffer toColumnName(Object obj) {
SimpleCompositeBuilder composite = new SimpleCompositeBuilder(bufferSize, Equality.EQUAL);
// Iterate through each component and add to a CompositeType structure
for (FieldMapper<?> mapper : components) {
try {
composite.addWithoutControl(mapper.toByteBuffer(obj));
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
return composite.get();
}
/**
* Set the collection field using the provided column list of embedded entities
* @param entity
* @param name
* @param column
* @return
* @throws Exception
*/
public boolean setField(Object entity, ColumnList<ByteBuffer> columns) throws Exception {
List<Object> list = getOrCreateField(entity);
// Iterate through columns and add embedded entities to the list
for (com.netflix.astyanax.model.Column<ByteBuffer> c : columns) {
list.add(fromColumn(c));
}
return true;
}
public boolean setFieldFromCql(Object entity, ColumnList<ByteBuffer> columns) throws Exception {
List<Object> list = getOrCreateField(entity);
// Iterate through columns and add embedded entities to the list
// for (com.netflix.astyanax.model.Column<ByteBuffer> c : columns) {
list.add(fromCqlColumns(columns));
// }
return true;
}
private List<Object> getOrCreateField(Object entity) throws IllegalArgumentException, IllegalAccessException {
// Get or create the list field
List<Object> list = (List<Object>) containerField.get(entity);
if (list == null) {
list = Lists.newArrayList();
containerField.set(entity, list);
}
return list;
}
/**
* Return an object from the column
*
* @param cl
* @return
*/
public Object fromColumn(com.netflix.astyanax.model.Column<ByteBuffer> c) {
try {
// Allocate a new entity
Object entity = clazz.newInstance();
setEntityFieldsFromColumnName(entity, c.getRawName().duplicate());
valueMapper.setField(entity, c.getByteBufferValue().duplicate());
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
public Object fromCqlColumns(com.netflix.astyanax.model.ColumnList<ByteBuffer> c) {
try {
// Allocate a new entity
Object entity = clazz.newInstance();
Iterator<com.netflix.astyanax.model.Column<ByteBuffer>> columnIter = c.iterator();
columnIter.next();
for (FieldMapper<?> component : components) {
component.setField(entity, columnIter.next().getByteBufferValue());
}
valueMapper.setField(entity, columnIter.next().getByteBufferValue());
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
/**
*
* @param entity
* @param columnName
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
public void setEntityFieldsFromColumnName(Object entity, ByteBuffer columnName) throws IllegalArgumentException, IllegalAccessException {
// Iterate through components in order and set fields
for (FieldMapper<?> component : components) {
ByteBuffer data = getWithShortLength(columnName);
if (data != null) {
if (data.remaining() > 0) {
component.setField(entity, data);
}
byte end_of_component = columnName.get();
if (end_of_component != Equality.EQUAL.toByte()) {
throw new RuntimeException("Invalid composite column. Expected END_OF_COMPONENT.");
}
}
else {
throw new RuntimeException("Missing component data in composite type");
}
}
}
/**
* Return the cassandra comparator type for this composite structure
* @return
*/
public String getComparatorType() {
StringBuilder sb = new StringBuilder();
sb.append("CompositeType(");
sb.append(StringUtils.join(
Collections2.transform(components, new Function<FieldMapper<?>, String>() {
public String apply(FieldMapper<?> input) {
return input.serializer.getComparatorType().getClassName();
}
}),
","));
sb.append(")");
return sb.toString();
}
public static int getShortLength(ByteBuffer bb) {
int length = (bb.get() & 0xFF) << 8;
return length | (bb.get() & 0xFF);
}
public static ByteBuffer getWithShortLength(ByteBuffer bb) {
int length = getShortLength(bb);
return getBytes(bb, length);
}
public static ByteBuffer getBytes(ByteBuffer bb, int length) {
ByteBuffer copy = bb.duplicate();
copy.limit(copy.position() + length);
bb.position(bb.position() + length);
return copy;
}
public String getValueType() {
return valueMapper.getSerializer().getComparatorType().getClassName();
}
public ByteBuffer[] getQueryEndpoints(Collection<ColumnPredicate> predicates) {
// Convert to multimap for easy lookup
ArrayListMultimap<Object, ColumnPredicate> lookup = ArrayListMultimap.create();
for (ColumnPredicate predicate : predicates) {
Preconditions.checkArgument(validNames.contains(predicate.getName()), "Field '" + predicate.getName() + "' does not exist in the entity " + clazz.getCanonicalName());
lookup.put(predicate.getName(), predicate);
}
SimpleCompositeBuilder start = new SimpleCompositeBuilder(bufferSize, Equality.GREATER_THAN_EQUALS);
SimpleCompositeBuilder end = new SimpleCompositeBuilder(bufferSize, Equality.LESS_THAN_EQUALS);
// Iterate through components in order while applying predicate to 'start' and 'end'
for (FieldMapper<?> mapper : components) {
for (ColumnPredicate p : lookup.get(mapper.getName())) {
applyPredicate(mapper, start, end, p);
}
}
return new ByteBuffer[]{start.get(), end.get()};
}
private void applyPredicate(FieldMapper<?> mapper, SimpleCompositeBuilder start, SimpleCompositeBuilder end, ColumnPredicate predicate) {
ByteBuffer bb = mapper.valueToByteBuffer(predicate.getValue());
switch (predicate.getOp()) {
case EQUAL:
start.addWithoutControl(bb);
end.addWithoutControl(bb);
break;
case GREATER_THAN:
case GREATER_THAN_EQUALS:
if (mapper.isAscending())
start.add(bb, predicate.getOp());
else
end.add(bb, predicate.getOp());
break;
case LESS_THAN:
case LESS_THAN_EQUALS:
if (mapper.isAscending())
end.add(bb, predicate.getOp());
else
start.add(bb, predicate.getOp());
break;
}
}
}
| 8,126 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/AbstractColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import javax.persistence.Column;
public abstract class AbstractColumnMapper implements ColumnMapper {
protected final Field field;
protected final Column columnAnnotation;
protected final String columnName;
public AbstractColumnMapper(Field field) {
this.field = field;
this.columnAnnotation = field.getAnnotation(Column.class);
// use field name if annotation name is not set
String name = columnAnnotation.name().isEmpty() ? field.getName() : columnAnnotation.name();
// dot is a reserved char as separator
if(name.indexOf(".") >= 0)
throw new IllegalArgumentException("illegal column name containing reserved dot (.) char: " + name);
this.columnName = name;
}
public Field getField() {
return this.field;
}
}
| 8,127 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/SimpleCompositeBuilder.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.nio.ByteBuffer;
import com.google.common.base.Preconditions;
import com.netflix.astyanax.model.Equality;
/**
* Yet another attempt at simplifying how composite columns are built
*
* @author elandau
*
*/
public class SimpleCompositeBuilder {
private final static int COMPONENT_OVERHEAD = 3;
private int bufferSize;
private ByteBuffer bb;
private boolean hasControl = true;
private Equality lastEquality = Equality.EQUAL;
private final Equality finalEquality;
public SimpleCompositeBuilder(int bufferSize, Equality finalEquality) {
bb = ByteBuffer.allocate(bufferSize);
this.finalEquality = finalEquality;
}
public void add(ByteBuffer cb, Equality control) {
addWithoutControl(cb);
addControl(control);
}
public void addWithoutControl(ByteBuffer cb) {
Preconditions.checkState(lastEquality == Equality.EQUAL, "Cannot extend composite since non equality control already set");
if (cb == null) {
cb = ByteBuffer.allocate(0);
}
if (cb.limit() + COMPONENT_OVERHEAD > bb.remaining()) {
int exponent = (int) Math.ceil(Math.log((double) (cb.limit() + COMPONENT_OVERHEAD + bb.limit())) / Math.log(2));
bufferSize = (int) Math.pow(2, exponent);
ByteBuffer temp = ByteBuffer.allocate(bufferSize);
bb.flip();
temp.put(bb);
bb = temp;
}
if (!hasControl) {
addControl(Equality.EQUAL);
}
else if (bb.position() > 0) {
bb.position(bb.position() - 1);
bb.put(Equality.EQUAL.toByte());
}
// Write the data: <length><data>
bb.putShort((short) cb.remaining());
bb.put(cb.slice());
hasControl = false;
}
public void addControl(Equality control) {
Preconditions.checkState(!hasControl, "Control byte already set");
Preconditions.checkState(lastEquality == Equality.EQUAL, "Cannot extend composite since non equality control already set");
hasControl = true;
bb.put(control.toByte());
}
public boolean hasControl() {
return hasControl;
}
public ByteBuffer get() {
if (!hasControl)
addControl(this.finalEquality);
ByteBuffer ret = bb.duplicate();
ret.flip();
return ret;
}
}
| 8,128 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/DefaultEntityManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.persistence.PersistenceException;
import org.apache.commons.lang.StringUtils;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.partitioner.BigInteger127Partitioner;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.recipes.reader.AllRowsReader;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.StringSerializer;
/**
* Manager entities in a column famliy with any key type but columns that are
* encoded as strings.
*/
public class DefaultEntityManager<T, K> implements EntityManager<T, K> {
//////////////////////////////////////////////////////////////////
// Builder pattern
public static class Builder<T, K> {
private Class<T> clazz = null;
private EntityMapper<T,K> entityMapper = null;
private Keyspace keyspace = null;
private ColumnFamily<K, String> columnFamily = null;
private ConsistencyLevel readConsitency = null;
private ConsistencyLevel writeConsistency = null;
private Integer ttl = null;
private RetryPolicy retryPolicy = null;
private LifecycleEvents<T> lifecycleHandler = null;
private String columnFamilyName = null;
private boolean autoCommit = true;
private Partitioner partitioner = DEFAULT_ENTITY_MANAGER_PARTITIONER;
public Builder() {
}
/**
* mandatory
* @param clazz entity class type
*/
public Builder<T, K> withEntityType(Class<T> clazz) {
Preconditions.checkNotNull(clazz);
this.clazz = clazz;
return this;
}
/**
* mandatory
* @param keyspace
*/
public Builder<T, K> withKeyspace(Keyspace keyspace) {
Preconditions.checkNotNull(keyspace);
this.keyspace = keyspace;
return this;
}
/**
* optional
* @param columnFamily column name type is fixed to String/UTF8
*/
public Builder<T, K> withColumnFamily(ColumnFamily<K, String> columnFamily) {
Preconditions.checkState(this.columnFamilyName == null && this.columnFamily == null , "withColumnFamily called multiple times");
Preconditions.checkNotNull(columnFamily);
this.columnFamily = columnFamily;
return this;
}
/**
* optional
* @param columnFamilyName Name of column family to use.
*/
public Builder<T, K> withColumnFamily(String columnFamilyName) {
Preconditions.checkState(this.columnFamilyName == null && columnFamily == null , "withColumnFamily called multiple times");
Preconditions.checkNotNull(columnFamilyName);
this.columnFamilyName = columnFamilyName;
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withReadConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.readConsitency = level;
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withWriteConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.writeConsistency = level;
return this;
}
/**
* set both read and write consistency
* optional
* @param level
*/
public Builder<T, K> withConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.readConsitency = level;
this.writeConsistency = level;
return this;
}
/**
* default TTL for all columns written to cassandra
* optional
* @return
*/
public Builder<T, K> withTTL(Integer ttl) {
this.ttl = ttl;
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withRetryPolicy(RetryPolicy policy) {
Preconditions.checkNotNull(policy);
this.retryPolicy = policy;
return this;
}
public Builder<T, K> withAutoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
/**
* Partitioner used to determine token ranges and how to break token
* ranges into sub parts. The default is BigInteger127Partitioner in
* pre-cassandra 1.2.
*
* @param partitioner
* @return
*/
public Builder<T, K> withPartitioner(Partitioner partitioner) {
this.partitioner = partitioner;
return this;
}
@SuppressWarnings("unchecked")
public DefaultEntityManager<T, K> build() {
// check mandatory fields
Preconditions.checkNotNull(clazz, "withEntityType(...) is not set");
Preconditions
.checkNotNull(keyspace, "withKeyspace(...) is not set");
// TODO: check @Id type compatibility
// TODO: do we need to require @Entity annotation
this.entityMapper = new EntityMapper<T, K>(clazz, ttl);
this.lifecycleHandler = new LifecycleEvents<T>(clazz);
if (columnFamily == null) {
if (columnFamilyName == null)
columnFamilyName = entityMapper.getEntityName();
columnFamily = new ColumnFamily<K, String>(columnFamilyName,
(com.netflix.astyanax.Serializer<K>) MappingUtils
.getSerializerForField(this.entityMapper
.getId()), StringSerializer.get());
}
// build object
return new DefaultEntityManager<T, K>(this);
}
}
public static <T,K> Builder<T,K> builder() {
return new Builder<T,K>();
}
//////////////////////////////////////////////////////////////////
// private members
private final EntityMapper<T, K> entityMapper;
private final Keyspace keyspace;
private final ColumnFamily<K, String> columnFamily;
private final ConsistencyLevel readConsitency;
private final ConsistencyLevel writeConsistency;
private final RetryPolicy retryPolicy;
private final LifecycleEvents<T> lifecycleHandler;
private final boolean autoCommit;
private final ThreadLocal<MutationBatch> tlMutation = new ThreadLocal<MutationBatch>();
private static final Partitioner DEFAULT_ENTITY_MANAGER_PARTITIONER = BigInteger127Partitioner
.get();
private final Partitioner partitioner;
private DefaultEntityManager(Builder<T, K> builder) {
entityMapper = builder.entityMapper;
keyspace = builder.keyspace;
columnFamily = builder.columnFamily;
readConsitency = builder.readConsitency;
writeConsistency = builder.writeConsistency;
retryPolicy = builder.retryPolicy;
lifecycleHandler = builder.lifecycleHandler;
autoCommit = builder.autoCommit;
partitioner = builder.partitioner;
}
//////////////////////////////////////////////////////////////////
// public APIs
/**
* @inheritDoc
*/
public void put(T entity) throws PersistenceException {
try {
lifecycleHandler.onPrePersist(entity);
MutationBatch mb = newMutationBatch();
entityMapper.fillMutationBatch(mb, columnFamily, entity);
if (autoCommit)
mb.execute();
lifecycleHandler.onPostPersist(entity);
} catch(Exception e) {
throw new PersistenceException("failed to put entity ", e);
}
}
/**
* @inheritDoc
*/
public T get(K id) throws PersistenceException {
try {
ColumnFamilyQuery<K, String> cfq = newQuery();
ColumnList<String> cl = cfq.getKey(id).execute().getResult();
// when a row is deleted in cassandra,
// the row key remains (without any columns) until the next compaction.
// simply return null (as non exist)
if(cl.isEmpty())
return null;
T entity = entityMapper.constructEntity(id, cl);
lifecycleHandler.onPostLoad(entity);
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to get entity " + id, e);
}
}
/**
* @inheritDoc
*/
@Override
public void delete(K id) throws PersistenceException {
try {
MutationBatch mb = getMutationBatch();
mb.withRow(columnFamily, id).delete();
if (autoCommit)
mb.execute();
} catch(Exception e) {
throw new PersistenceException("failed to delete entity " + id, e);
}
}
@Override
public void remove(T entity) throws PersistenceException {
K id = null;
try {
lifecycleHandler.onPreRemove(entity);
id = entityMapper.getEntityId(entity);
MutationBatch mb = newMutationBatch();
mb.withRow(columnFamily, id).delete();
if (autoCommit)
mb.execute();
lifecycleHandler.onPostRemove(entity);
} catch(Exception e) {
throw new PersistenceException("failed to delete entity " + id, e);
}
}
/**
* @inheritDoc
*/
@Override
public List<T> getAll() throws PersistenceException {
final List<T> entities = Lists.newArrayList();
visitAll(new Function<T, Boolean>() {
@Override
public synchronized Boolean apply(T entity) {
entities.add(entity);
return true;
}
});
return entities;
}
/**
* @inheritDoc
*/
@Override
public List<T> get(Collection<K> ids) throws PersistenceException {
try {
ColumnFamilyQuery<K, String> cfq = newQuery();
Rows<K, String> rows = cfq.getRowSlice(ids).execute().getResult();
List<T> entities = Lists.newArrayListWithExpectedSize(rows.size());
for (Row<K, String> row : rows) {
if (!row.getColumns().isEmpty()) {
T entity = entityMapper.constructEntity(row.getKey(), row.getColumns());
lifecycleHandler.onPostLoad(entity);
entities.add(entity);
}
}
return entities;
} catch(Exception e) {
throw new PersistenceException("failed to get entities " + ids, e);
}
}
/**
* @inheritDoc
*/
@Override
public void delete(Collection<K> ids) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
for (K id : ids) {
mb.withRow(columnFamily, id).delete();
}
if (autoCommit)
mb.execute();
} catch(Exception e) {
throw new PersistenceException("failed to delete entities " + ids, e);
}
}
@Override
public void remove(Collection<T> entities) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
for (T entity : entities) {
lifecycleHandler.onPreRemove(entity);
K id = entityMapper.getEntityId(entity);
mb.withRow(columnFamily, id).delete();
}
mb.execute();
for (T entity : entities) {
lifecycleHandler.onPostRemove(entity);
}
} catch(Exception e) {
throw new PersistenceException("failed to delete entities ", e);
}
}
/**
* @inheritDoc
*/
@Override
public void put(Collection<T> entities) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
for (T entity : entities) {
lifecycleHandler.onPrePersist(entity);
entityMapper.fillMutationBatch(mb, columnFamily, entity);
}
if (autoCommit)
mb.execute();
for (T entity : entities) {
lifecycleHandler.onPostPersist(entity);
}
} catch(Exception e) {
throw new PersistenceException("failed to put entities ", e);
}
}
/**
* @inheritDoc
*/
@Override
public void visitAll(final Function<T, Boolean> callback) throws PersistenceException {
try {
new AllRowsReader.Builder<K, String>(keyspace, columnFamily)
.withIncludeEmptyRows(false)
.withPartitioner(partitioner)
.forEachRow(new Function<Row<K,String>, Boolean>() {
@Override
public Boolean apply(Row<K, String> row) {
if (row.getColumns().isEmpty())
return true;
T entity = entityMapper.constructEntity(row.getKey(), row.getColumns());
try {
lifecycleHandler.onPostLoad(entity);
} catch (Exception e) {
// TODO:
}
return callback.apply(entity);
}
})
.build()
.call();
} catch (Exception e) {
throw new PersistenceException("Failed to fetch all entites", e);
}
}
@Override
public List<T> find(String cql) throws PersistenceException {
Preconditions.checkArgument(StringUtils.left(cql, 6).equalsIgnoreCase("SELECT"), "CQL must be SELECT statement");
try {
CqlResult<K, String> results = newQuery().withCql(cql).execute().getResult();
List<T> entities = Lists.newArrayListWithExpectedSize(results.getRows().size());
for (Row<K, String> row : results.getRows()) {
if (!row.getColumns().isEmpty()) {
T entity = entityMapper.constructEntity(row.getKey(), row.getColumns());
lifecycleHandler.onPostLoad(entity);
entities.add(entity);
}
}
return entities;
} catch (Exception e) {
throw new PersistenceException("Failed to execute cql query", e);
}
}
private MutationBatch newMutationBatch() {
MutationBatch mb = keyspace.prepareMutationBatch();
if(writeConsistency != null)
mb.withConsistencyLevel(writeConsistency);
if(retryPolicy != null)
mb.withRetryPolicy(retryPolicy);
return mb;
}
private MutationBatch getMutationBatch() {
if (autoCommit) {
return newMutationBatch();
}
else {
MutationBatch mb = tlMutation.get();
if (mb == null) {
mb = newMutationBatch();
tlMutation.set(mb);
}
return mb;
}
}
private ColumnFamilyQuery<K, String> newQuery() {
ColumnFamilyQuery<K, String> cfq = keyspace.prepareQuery(columnFamily);
if(readConsitency != null)
cfq.setConsistencyLevel(readConsitency);
if(retryPolicy != null)
cfq.withRetryPolicy(retryPolicy);
return cfq;
}
@Override
public void createStorage(Map<String, Object> options) throws PersistenceException {
try {
keyspace.createColumnFamily(this.columnFamily, options);
} catch (ConnectionException e) {
if (e.getMessage().contains("already exist"))
return;
throw new PersistenceException("Unable to create column family " + this.columnFamily.getName(), e);
}
}
@Override
public void deleteStorage() throws PersistenceException {
try {
keyspace.dropColumnFamily(this.columnFamily);
} catch (ConnectionException e) {
throw new PersistenceException("Unable to drop column family " + this.columnFamily.getName(), e);
}
}
@Override
public void truncate() throws PersistenceException {
try {
keyspace.truncateColumnFamily(this.columnFamily);
} catch (ConnectionException e) {
throw new PersistenceException("Unable to drop column family " + this.columnFamily.getName(), e);
}
}
@Override
public void commit() throws PersistenceException {
MutationBatch mb = tlMutation.get();
if (mb != null) {
try {
mb.execute();
} catch (ConnectionException e) {
throw new PersistenceException("Failed to commit mutation batch", e);
}
}
}
@Override
public NativeQuery<T, K> createNativeQuery() {
throw new UnsupportedOperationException("Not implemented yet");
}
}
| 8,129 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/SetColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.util.Iterator;
import java.util.Set;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.serializers.SerializerTypeInferer;
/**
*
* <field>.<key>
* @author elandau
*
*/
public class SetColumnMapper extends AbstractColumnMapper {
private final Class<?> clazz;
private final Serializer<?> serializer;
public SetColumnMapper(Field field) {
super(field);
ParameterizedType stringListType = (ParameterizedType) field.getGenericType();
this.clazz = (Class<?>) stringListType.getActualTypeArguments()[0];
this.serializer = SerializerTypeInferer.getSerializer(this.clazz);
}
@Override
public String getColumnName() {
return this.columnName;
}
@Override
public boolean fillMutationBatch(Object entity, ColumnListMutation<String> clm, String prefix) throws Exception {
Set<?> set = (Set<?>) field.get(entity);
if(set == null) {
if(columnAnnotation.nullable())
return false; // skip
else
throw new IllegalArgumentException("cannot write non-nullable column with null value: " + columnName);
}
for (Object entry : set) {
clm.putEmptyColumn(prefix + columnName + "." + entry.toString(), null);
}
return true;
}
@Override
public boolean setField(Object entity, Iterator<String> name, com.netflix.astyanax.model.Column<String> column) throws Exception {
Set<Object> set = (Set<Object>) field.get(entity);
if (set == null) {
set = Sets.newHashSet();
field.set(entity, set);
}
String value = name.next();
if (name.hasNext())
return false;
set.add(serializer.fromByteBuffer(serializer.fromString(value)));
return true;
}
@Override
public void validate(Object entity) throws Exception {
// TODO Auto-generated method stub
}
}
| 8,130 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/CompositeEntityManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.persistence.PersistenceException;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.MutationBatchManager;
import com.netflix.astyanax.ThreadLocalMutationBatchManager;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.recipes.reader.AllRowsReader;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.util.RangeBuilder;
/**
* Entity manager for a composite column family. This entity manager expects
* the entity to have a single @Id which corresponds to the row key. It will then
* have at least 3 columns, with all but the last being parts of the composite.
*
* @Entity
* class Entity {
* @Id String rowKey
* @Column String firstCompositePart;
* @Column Long secondCompositePart;
* @Column String valuePart;
* }
*
*
* @author elandau
*
* @param <T> Entity type
* @param <K> Partition key
*/
public class CompositeEntityManager<T, K> implements EntityManager<T, K> {
private static final Logger LOG = LoggerFactory.getLogger(CompositeEntityManager.class);
private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.CL_ONE;
public static class Builder<T, K> {
private Keyspace keyspace;
private Class<T> clazz;
private ColumnFamily<K, ByteBuffer> columnFamily = null;
private ConsistencyLevel readConsitency = DEFAULT_CONSISTENCY_LEVEL;
private ConsistencyLevel writeConsistency = DEFAULT_CONSISTENCY_LEVEL;
private CompositeEntityMapper<T, K> entityMapper;
private Integer ttl = null;
private RetryPolicy retryPolicy = null;
private LifecycleEvents<T> lifecycleHandler = null;
private String columnFamilyName = null;
private boolean autoCommit = true;
private MutationBatchManager batchManager = null;
private boolean verbose = false;
private ByteBuffer prefix = null;
/**
* mandatory
* @param clazz entity class type
*/
public Builder<T, K> withEntityType(Class<T> clazz) {
Preconditions.checkNotNull(clazz);
this.clazz = clazz;
return this;
}
/**
* mandatory
* @param keyspace
*/
public Builder<T, K> withKeyspace(Keyspace keyspace) {
Preconditions.checkNotNull(keyspace);
this.keyspace = keyspace;
return this;
}
/**
* optional
* @param columnFamilyName Name of column family to use.
*/
public Builder<T, K> withColumnFamily(String columnFamilyName) {
Preconditions.checkState(this.columnFamilyName == null && columnFamily == null , "withColumnFamily called multiple times");
Preconditions.checkNotNull(columnFamilyName);
this.columnFamilyName = columnFamilyName; // .toLowerCase();
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withReadConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.readConsitency = level;
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withWriteConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.writeConsistency = level;
return this;
}
/**
* set both read and write consistency
* optional
* @param level
*/
public Builder<T, K> withConsistency(ConsistencyLevel level) {
Preconditions.checkNotNull(level);
this.readConsitency = level;
this.writeConsistency = level;
return this;
}
/**
* default TTL for all columns written to cassandra
* optional
* @return
*/
public Builder<T, K> withTTL(Integer ttl) {
this.ttl = ttl;
return this;
}
/**
* optional
* @param level
*/
public Builder<T, K> withRetryPolicy(RetryPolicy policy) {
Preconditions.checkNotNull(policy);
this.retryPolicy = policy;
return this;
}
/**
* If set to false
* @param autoCommit
* @return
*/
public Builder<T, K> withAutoCommit(boolean autoCommit) {
Preconditions.checkArgument(autoCommit == false && this.batchManager == null, "Cannot use autoCommit with an externally supplied MutationBatchManager");
this.autoCommit = autoCommit;
return this;
}
/**
* If set to true log every action
* @param verbose
* @return
*/
public Builder<T, K> withVerboseTracing(boolean verbose) {
this.verbose = verbose;
return this;
}
/**
* Specify a mutation manager to use. The mutation manager makes it possible to share
* the same mutation across multiple calls to multiple entity managers and only
* commit when all the mutations has been created.
* @param batchManager
* @return
*/
public Builder<T, K> withMutationBatchManager(MutationBatchManager batchManager) {
this.batchManager = batchManager;
this.autoCommit = false;
return this;
}
public Builder<T, K> withKeyPrefix(String prefix) {
this.prefix = StringSerializer.get().toByteBuffer(prefix);
return this;
}
@SuppressWarnings("unchecked")
public CompositeEntityManager<T, K> build() {
// check mandatory fields
Preconditions.checkNotNull(clazz, "withEntityType(...) is not set");
Preconditions.checkNotNull(keyspace, "withKeyspace(...) is not set");
// TODO: check @Id type compatibility
// TODO: do we need to require @Entity annotation
this.entityMapper = new CompositeEntityMapper<T,K>(clazz, ttl, prefix);
this.lifecycleHandler = new LifecycleEvents<T>(clazz);
if (columnFamily == null) {
if (columnFamilyName == null)
columnFamilyName = entityMapper.getEntityName();
columnFamily = new ColumnFamily<K, ByteBuffer>(
columnFamilyName,
(com.netflix.astyanax.Serializer<K>)MappingUtils.getSerializerForField(this.entityMapper.getId()),
ByteBufferSerializer.get());
}
if (batchManager == null) {
batchManager = new ThreadLocalMutationBatchManager(this.keyspace, this.writeConsistency, this.retryPolicy);
}
// build object
return new CompositeEntityManager<T, K>(this);
}
}
public static <T,K> Builder<T,K> builder() {
return new Builder<T, K>();
}
private final Keyspace keyspace;
private final CompositeEntityMapper<T,K> entityMapper;
private final RetryPolicy retryPolicy;
private final LifecycleEvents<T> lifecycleHandler;
private final boolean autoCommit;
private final ColumnFamily<K, ByteBuffer> columnFamily;
private final ConsistencyLevel readConsitency;
private final MutationBatchManager batchManager;
private final boolean verbose;
public CompositeEntityManager(Builder<T,K> builder) {
entityMapper = builder.entityMapper;
keyspace = builder.keyspace;
columnFamily = builder.columnFamily;
readConsitency = builder.readConsitency;
retryPolicy = builder.retryPolicy;
lifecycleHandler = builder.lifecycleHandler;
autoCommit = builder.autoCommit;
batchManager = builder.batchManager;
verbose = builder.verbose;
}
//////////////////////////////////////////////////////////////////
// public APIs
/**
* @inheritDoc
*/
public void put(T entity) throws PersistenceException {
try {
if (verbose)
LOG.info(String.format("%s : Adding entity '%s'", columnFamily.getName(), entity));
lifecycleHandler.onPrePersist(entity);
MutationBatch mb = getMutationBatch();
entityMapper.fillMutationBatch(mb, columnFamily, entity);
if (autoCommit)
mb.execute();
lifecycleHandler.onPostPersist(entity);
} catch(Exception e) {
throw new PersistenceException("failed to put entity ", e);
}
}
/**
* @inheritDoc
*/
public T get(K id) throws PersistenceException {
throw new UnsupportedOperationException("Call newNativeQuery().withId().equal({id}) instead");
}
/**
* @inheritDoc
*/
@Override
public void delete(K id) throws PersistenceException {
try {
if (verbose)
LOG.info(String.format("%s : Deleting id '%s'", columnFamily.getName(), id));
MutationBatch mb = getMutationBatch();
mb.withRow(columnFamily, id).delete();
if (autoCommit)
mb.execute();
} catch(Exception e) {
throw new PersistenceException("failed to delete entity " + id, e);
}
}
@Override
public void remove(T entity) throws PersistenceException {
K id = null;
try {
if (verbose)
LOG.info(String.format("%s : Removing entity '%s'", columnFamily.getName(), entity));
lifecycleHandler.onPreRemove(entity);
id = entityMapper.getEntityId(entity);
MutationBatch mb = getMutationBatch();
entityMapper.fillMutationBatchForDelete(mb, columnFamily, entity);
if (autoCommit)
mb.execute();
lifecycleHandler.onPostRemove(entity);
} catch(Exception e) {
throw new PersistenceException("failed to delete entity " + id, e);
}
}
/**
* @inheritDoc
*/
@Override
public List<T> getAll() throws PersistenceException {
final List<T> entities = Lists.newArrayList();
visitAll(new Function<T, Boolean>() {
@Override
public synchronized Boolean apply(T entity) {
entities.add(entity);
try {
lifecycleHandler.onPostLoad(entity);
} catch (Exception e) {
// TODO
}
return true;
}
});
return entities;
}
/**
* @inheritDoc
*/
@Override
public List<T> get(Collection<K> ids) throws PersistenceException {
try {
if (verbose)
LOG.info(String.format("%s : Reading entities '%s'", columnFamily.getName(), ids.toString()));
// Query for rows
ColumnFamilyQuery<K, ByteBuffer> cfq = newQuery();
return convertRowsToEntities(cfq.getRowSlice(ids).execute().getResult());
} catch(Exception e) {
throw new PersistenceException("failed to get entities " + ids, e);
}
}
private List<T> convertRowsToEntities(Rows<K, ByteBuffer> rows) throws Exception {
List<T> entities = Lists.newArrayList();
for (Row<K, ByteBuffer> row : rows) {
ColumnList<ByteBuffer> cl = row.getColumns();
// when a row is deleted in cassandra,
// the row key remains (without any columns) until the next compaction.
// simply return null (as non exist)
if (!cl.isEmpty()) {
for (Column<ByteBuffer> column : cl) {
T entity = entityMapper.constructEntity(row.getKey(), column);
lifecycleHandler.onPostLoad(entity);
entities.add(entity);
}
}
}
return entities;
}
/**
* @inheritDoc
*/
@Override
public void delete(Collection<K> ids) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
if (verbose)
LOG.info(String.format("%s : Delete ids '%s'", columnFamily.getName(), ids.toString()));
for (K id : ids) {
mb.withRow(columnFamily, id).delete();
}
if (autoCommit)
mb.execute();
} catch(Exception e) {
throw new PersistenceException("failed to delete entities " + ids, e);
}
}
@Override
public void remove(Collection<T> entities) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
for (T entity : entities) {
lifecycleHandler.onPreRemove(entity);
if (verbose)
LOG.info(String.format("%s : Deleting '%s'", columnFamily.getName(), entity));
entityMapper.fillMutationBatchForDelete(mb, columnFamily, entity);
}
mb.execute();
for (T entity : entities) {
lifecycleHandler.onPostRemove(entity);
}
} catch(Exception e) {
throw new PersistenceException("failed to delete entities ", e);
}
}
/**
* @inheritDoc
*/
@Override
public void put(Collection<T> entities) throws PersistenceException {
MutationBatch mb = getMutationBatch();
try {
for (T entity : entities) {
lifecycleHandler.onPrePersist(entity);
if (verbose)
LOG.info(String.format("%s : Writing '%s'", columnFamily.getName(), entity));
entityMapper.fillMutationBatch(mb, columnFamily, entity);
}
if (autoCommit)
mb.execute();
for (T entity : entities) {
lifecycleHandler.onPostPersist(entity);
}
} catch(Exception e) {
throw new PersistenceException("failed to put entities ", e);
}
}
/**
* @inheritDoc
*/
@Override
public void visitAll(final Function<T, Boolean> callback) throws PersistenceException {
try {
new AllRowsReader.Builder<K, ByteBuffer>(keyspace, columnFamily)
.withIncludeEmptyRows(false)
.forEachRow(new Function<Row<K,ByteBuffer>, Boolean>() {
@Override
public Boolean apply(Row<K, ByteBuffer> row) {
if (row.getColumns().isEmpty())
return true;
for (Column column : row.getColumns()) {
T entity = (T) entityMapper.constructEntity(row.getKey(), column);
try {
lifecycleHandler.onPostLoad(entity);
} catch (Exception e) {
// TODO:
}
if (!callback.apply(entity))
return false;
}
return true;
}
})
.build()
.call();
} catch (Exception e) {
throw new PersistenceException("Failed to fetch all entites", e);
}
}
@Override
public List<T> find(String cql) throws PersistenceException {
Preconditions.checkArgument(StringUtils.left(cql, 6).equalsIgnoreCase("SELECT"), "CQL must be SELECT statement");
try {
CqlResult<K, ByteBuffer> results = newQuery().withCql(cql).execute().getResult();
List<T> entities = Lists.newArrayListWithExpectedSize(results.getRows().size());
for (Row<K, ByteBuffer> row : results.getRows()) {
if (!row.getColumns().isEmpty()) {
T entity = entityMapper.constructEntityFromCql(row.getColumns());
lifecycleHandler.onPostLoad(entity);
entities.add(entity);
}
}
return entities;
} catch (Exception e) {
throw new PersistenceException("Failed to execute cql query", e);
}
}
private MutationBatch getMutationBatch() {
return batchManager.getSharedMutationBatch();
}
private ColumnFamilyQuery<K, ByteBuffer> newQuery() {
ColumnFamilyQuery<K, ByteBuffer> cfq = keyspace.prepareQuery(columnFamily);
if(readConsitency != null)
cfq.setConsistencyLevel(readConsitency);
if(retryPolicy != null)
cfq.withRetryPolicy(retryPolicy);
return cfq;
}
@Override
public void createStorage(Map<String, Object> options) throws PersistenceException {
try {
Properties props = new Properties();
props.put("key_validation_class", this.entityMapper.getKeyType());
props.put("default_validation_class", this.entityMapper.getValueType());
props.put("comparator_type", this.entityMapper.getComparatorType());
props.put("name", this.columnFamily.getName());
LOG.info("Creating column family : " + props.toString());
keyspace.createColumnFamily(props);
} catch (ConnectionException e) {
throw new PersistenceException("Unable to create column family " + this.columnFamily.getName(), e);
}
}
@Override
public void deleteStorage() throws PersistenceException {
try {
LOG.info(String.format("%s : Deleting storage", columnFamily.getName()));
keyspace.dropColumnFamily(this.columnFamily);
} catch (ConnectionException e) {
throw new PersistenceException("Unable to drop column family " + this.columnFamily.getName(), e);
}
}
@Override
public void truncate() throws PersistenceException {
try {
LOG.info(String.format("%s : Truncating", columnFamily.getName()));
keyspace.truncateColumnFamily(this.columnFamily);
} catch (ConnectionException e) {
throw new PersistenceException("Unable to drop column family " + this.columnFamily.getName(), e);
}
}
@Override
public void commit() throws PersistenceException {
if (verbose)
LOG.info(String.format("%s : Commit mutation", columnFamily.getName()));
MutationBatch mb = getMutationBatch();
if (mb != null) {
try {
mb.execute();
} catch (ConnectionException e) {
throw new PersistenceException("Failed to commit mutation batch", e);
}
}
else {
if (verbose)
LOG.info(String.format("%s : Nothing to commit", columnFamily.getName()));
}
}
@Override
public NativeQuery<T, K> createNativeQuery() {
return new NativeQuery<T, K>() {
@Override
public T getSingleResult() throws PersistenceException {
return Iterables.getFirst(getResultSet(), null);
}
@Override
public Collection<T> getResultSet() throws PersistenceException {
Preconditions.checkArgument(!ids.isEmpty(), "Must specify at least one row key (ID) to fetch");
// if (verbose)
// LOG.info(String.format("%s : Query ids '%s' with predicates '%s'", columnFamily.getName(), ids, predicates));
RowSliceQuery<K, ByteBuffer> rowQuery = prepareQuery();
try {
List<T> entities = convertRowsToEntities(rowQuery.execute().getResult());
// if (verbose)
// LOG.info(String.format("%s : Query ids '%s' with predicates '%s' result='%s'", columnFamily.getName(), ids, predicates, entities));
return entities;
} catch (Exception e) {
throw new PersistenceException("Error executing query", e);
}
}
@Override
public Map<K, Collection<T>> getResultSetById() throws Exception {
Map<K, Collection<T>> result = Maps.newLinkedHashMap();
for (T entity : getResultSet()) {
K id = (K)entityMapper.idMapper.getValue(entity);
Collection<T> children = result.get(id);
if (children == null) {
children = Lists.newArrayListWithCapacity(1);
result.put(id, children);
}
children.add(entity);
}
return result;
}
@Override
public Map<K, Integer> getResultSetCounts() throws Exception {
Preconditions.checkArgument(!ids.isEmpty(), "Must specify at least one row key (ID) to fetch");
// if (verbose)
// LOG.info(String.format("%s : Query ids '%s' with predicates '%s'", columnFamily.getName(), ids, predicates));
RowSliceQuery<K, ByteBuffer> rowQuery = prepareQuery();
try {
Map<K, Integer> counts = rowQuery.getColumnCounts().execute().getResult();
// if (verbose)
// LOG.info(String.format("%s : Query ids '%s' with predicates '%s' result='%s'", columnFamily.getName(), ids, predicates, counts));
return counts;
} catch (Exception e) {
throw new PersistenceException("Error executing query", e);
}
}
private RowSliceQuery<K, ByteBuffer> prepareQuery() {
RowSliceQuery<K, ByteBuffer> rowQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(readConsitency)
.getRowSlice(ids);
if (predicates != null && !predicates.isEmpty()) {
ByteBuffer[] endpoints = entityMapper.getQueryEndpoints(predicates);
rowQuery = rowQuery.withColumnRange(
new RangeBuilder()
.setStart(endpoints[0])
.setEnd(endpoints[1])
.setLimit(columnLimit)
.build());
}
return rowQuery;
}
};
}
}
| 8,131 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/ColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.util.Iterator;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.model.Column;
public interface ColumnMapper {
public String getColumnName();
/**
* @return true if set, false if skipped due to null value for nullable field
* @throws IllegalArgumentException if value is null and field is NOT nullable
*/
public boolean fillMutationBatch(Object entity, ColumnListMutation<String> clm, String prefix) throws Exception;
/**
* @return true if set, false if skipped due to non-existent column for nullable field
* @throws IllegalArgumentException if value is null and field is NOT nullable
*/
public boolean setField(Object entity, Iterator<String> name, Column<String> column) throws Exception;
/**
* Perform a validation step either before persisting or after loading
* @throws Exception
*/
public void validate(Object entity) throws Exception;
/**
* Return the field associated with this mapper
*/
public Field getField();
}
| 8,132 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/LifecycleEvents.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Method;
import javax.persistence.PostLoad;
import javax.persistence.PostPersist;
import javax.persistence.PostRemove;
import javax.persistence.PrePersist;
import javax.persistence.PreRemove;
import com.google.common.base.Preconditions;
public class LifecycleEvents<T> {
private final Class<T> clazz;
private Method prePersist;
private Method postPersist;
private Method postRemove;
private Method preRemove;
private Method postLoad;
public LifecycleEvents(Class<T> clazz) {
this.clazz = clazz;
for (Method method : this.clazz.getDeclaredMethods()) {
if (method.isAnnotationPresent(PrePersist.class)) {
Preconditions.checkState(prePersist == null, "Duplicate PrePersist annotation on " + method.getName());
prePersist = method;
prePersist.setAccessible(true);
}
if (method.isAnnotationPresent(PostPersist.class)) {
Preconditions.checkState(postPersist == null, "Duplicate PostPersist annotation on " + method.getName());
postPersist = method;
postPersist.setAccessible(true);
}
if (method.isAnnotationPresent(PostRemove.class)) {
Preconditions.checkState(postRemove == null, "Duplicate PostRemove annotation on " + method.getName());
postRemove = method;
postRemove.setAccessible(true);
}
if (method.isAnnotationPresent(PreRemove.class)) {
Preconditions.checkState(preRemove == null, "Duplicate PreRemove annotation on " + method.getName());
preRemove = method;
preRemove.setAccessible(true);
}
if (method.isAnnotationPresent(PostLoad.class)) {
Preconditions.checkState(postLoad == null, "Duplicate PostLoad annotation on " + method.getName());
postLoad = method;
postLoad.setAccessible(true);
}
}
}
public void onPrePersist(T obj) throws Exception {
if (prePersist != null) {
prePersist.invoke(obj);
}
}
public void onPostPersist(T obj) throws Exception {
if (postPersist != null) {
postPersist.invoke(obj);
}
}
public void onPreRemove(T obj) throws Exception {
if (preRemove != null) {
preRemove.invoke(obj);
}
}
public void onPostRemove(T obj) throws Exception {
if (postRemove != null) {
postRemove.invoke(obj);
}
}
public void onPostLoad(T obj) throws Exception {
if (postLoad != null) {
postLoad.invoke(obj);
}
}
}
| 8,133 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/NativeQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.netflix.astyanax.model.Equality;
import com.netflix.astyanax.query.ColumnPredicate;
/**
* SQL'ish like fluent API for defining a query. This is mainly used by the various entity mappers
* to query for a subset of columns. Each entity mapper stores data differently and will use the
* predicates here to make the correct lower level query.
*
* @author elandau
*
* @param <T>
* @param <K>
*/
public abstract class NativeQuery<T, K> {
protected List<K> ids = Lists.newArrayList();
protected Collection<Object> columnNames;
protected List<ColumnPredicate> predicates;
protected int columnLimit = Integer.MAX_VALUE;
/**
* Refine query for row key
* @author elandau
*
*/
public class NativeIdQuery {
public NativeQuery<T, K> in(Collection<K> keys) {
ids.addAll(keys);
return NativeQuery.this;
}
public NativeQuery<T, K> in(K... keys) {
ids.addAll(Lists.newArrayList(keys));
return NativeQuery.this;
}
public NativeQuery<T, K> equal(K key) {
ids.add(key);
return NativeQuery.this;
}
}
/**
* Refine query for column range or slice
* @author elandau
*
*/
public class NativeColumnQuery {
private ColumnPredicate predicate = new ColumnPredicate();
public NativeColumnQuery(String name) {
predicate.setName(name);
}
public NativeQuery<T, K> in(Collection<Object> names) {
columnNames = names;
return NativeQuery.this;
}
public NativeQuery<T, K> equal(Object value) {
return addPredicate(predicate.setOp(Equality.EQUAL).setValue(value));
}
public NativeQuery<T, K> greaterThan(Object value) {
return addPredicate(predicate.setOp(Equality.GREATER_THAN).setValue(value));
}
public NativeQuery<T, K> lessThan(Object value) {
return addPredicate(predicate.setOp(Equality.LESS_THAN).setValue(value));
}
public NativeQuery<T, K> greaterThanEqual(Object value) {
return addPredicate(predicate.setOp(Equality.GREATER_THAN_EQUALS).setValue(value));
}
public NativeQuery<T, K> lessThanEqual(Object value) {
return addPredicate(predicate.setOp(Equality.LESS_THAN_EQUALS).setValue(value));
}
}
public NativeIdQuery whereId() {
return new NativeIdQuery();
}
public NativeColumnQuery whereColumn(String name) {
return new NativeColumnQuery(name);
}
public NativeQuery<T,K> limit(int columnLimit) {
this.columnLimit = columnLimit;
return this;
}
private NativeQuery<T, K> addPredicate(ColumnPredicate predicate) {
if (predicates == null) {
predicates = Lists.newArrayList();
}
predicates.add(predicate);
return this;
}
/**
* Return a single entity (or first) response
* @return
* @throws Exception
*/
public abstract T getSingleResult() throws Exception;
/**
* Return a result set of entities
* @return
* @throws Exception
*/
public abstract Collection<T> getResultSet() throws Exception;
/**
* Get the result set as a mapping of the id field to a collection of entities. This
* is useful for a multi-get scenario where it is desirable to group all the 'entities'
* within a row.
*
* @return
* @throws Exception
*/
public abstract Map<K, Collection<T>> getResultSetById() throws Exception;
/**
* Get the column count for each id in the query without sending data back
* to the client.
* @return
* @throws Excerption
*/
public abstract Map<K, Integer> getResultSetCounts() throws Exception;
}
| 8,134 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/EntityMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.PersistenceException;
import org.apache.commons.lang.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
/**
* utility class to map btw root Entity and cassandra data model
* @param <T> entity type
* @param <K> rowKey type
*/
public class EntityMapper<T, K> {
private final Class<T> clazz;
private final Integer ttl;
private final Method ttlMethod;
private final Field idField;
private final Map<String, ColumnMapper> columnList;
private final ColumnMapper uniqueColumn;
private final String entityName;
/**
*
* @param clazz
* @throws IllegalArgumentException
* if clazz is NOT annotated with @Entity
* if column name contains illegal char (like dot)
*/
public EntityMapper(Class<T> clazz, Integer ttl) {
this.clazz = clazz;
// clazz should be annotated with @Entity
Entity entityAnnotation = clazz.getAnnotation(Entity.class);
if(entityAnnotation == null)
throw new IllegalArgumentException("class is NOT annotated with @javax.persistence.Entity: " + clazz.getName());
entityName = MappingUtils.getEntityName(entityAnnotation, clazz);
// TTL value from constructor or class-level annotation
Integer tmpTtlValue = ttl;
if(tmpTtlValue == null) {
// constructor value has higher priority
// try @TTL annotation at entity/class level.
// it doesn't make sense to support @TTL annotation at individual column level.
TTL ttlAnnotation = clazz.getAnnotation(TTL.class);
if(ttlAnnotation != null) {
int ttlAnnotationValue = ttlAnnotation.value();
Preconditions.checkState(ttlAnnotationValue > 0, "cannot define non-positive value for TTL annotation at class level: " + ttlAnnotationValue);
tmpTtlValue = ttlAnnotationValue;
}
}
this.ttl = tmpTtlValue;
// TTL method
Method tmpTtlMethod = null;
for (Method method : this.clazz.getDeclaredMethods()) {
if (method.isAnnotationPresent(TTL.class)) {
Preconditions.checkState(tmpTtlMethod == null, "Duplicate TTL method annotation on " + method.getName());
tmpTtlMethod = method;
tmpTtlMethod.setAccessible(true);
}
}
this.ttlMethod = tmpTtlMethod;
Field[] declaredFields = clazz.getDeclaredFields();
columnList = Maps.newHashMapWithExpectedSize(declaredFields.length);
Set<String> usedColumnNames = Sets.newHashSet();
Field tmpIdField = null;
ColumnMapper tempUniqueMapper = null;
for (Field field : declaredFields) {
Id idAnnotation = field.getAnnotation(Id.class);
if(idAnnotation != null) {
Preconditions.checkArgument(tmpIdField == null, "there are multiple fields with @Id annotation");
field.setAccessible(true);
tmpIdField = field;
}
Column columnAnnotation = field.getAnnotation(Column.class);
if ((columnAnnotation != null)) {
field.setAccessible(true);
ColumnMapper columnMapper = null;
Entity compositeAnnotation = field.getType().getAnnotation(Entity.class);
if (Map.class.isAssignableFrom(field.getType())) {
columnMapper = new MapColumnMapper(field);
} else if (Set.class.isAssignableFrom(field.getType())) {
columnMapper = new SetColumnMapper(field);
} else if(compositeAnnotation == null) {
if (columnAnnotation.unique()) {
Preconditions.checkArgument(tempUniqueMapper == null, "can't have multiple unique columns '" + field.getName() + "'");
tempUniqueMapper = new LeafColumnMapper(field);
}
else {
columnMapper = new LeafColumnMapper(field);
}
} else {
columnMapper = new CompositeColumnMapper(field);
}
Preconditions.checkArgument(!usedColumnNames.contains(columnMapper.getColumnName()),
String.format("duplicate case-insensitive column name: %s", columnMapper.getColumnName().toLowerCase()));
columnList.put(columnMapper.getColumnName(), columnMapper);
usedColumnNames.add(columnMapper.getColumnName().toLowerCase());
}
}
Preconditions.checkNotNull(tmpIdField, "there are no field with @Id annotation");
//Preconditions.checkArgument(tmpIdField.getClass().equals(K.getClass()), String.format("@Id field type (%s) doesn't match generic type K (%s)", tmpIdField.getClass(), K.getClass()));
idField = tmpIdField;
uniqueColumn = tempUniqueMapper;
}
public void fillMutationBatch(MutationBatch mb, ColumnFamily<K, String> columnFamily, T entity) {
try {
@SuppressWarnings("unchecked")
K rowKey = (K) idField.get(entity);
ColumnListMutation<String> clm = mb.withRow(columnFamily, rowKey);
clm.setDefaultTtl(getTtl(entity));
for (ColumnMapper mapper : columnList.values()) {
mapper.fillMutationBatch(entity, clm, "");
}
} catch(Exception e) {
throw new PersistenceException("failed to fill mutation batch", e);
}
}
private Integer getTtl(T entity) throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
Integer retTtl = this.ttl;
// TTL method has higher priority
if(ttlMethod != null) {
Object retobj = ttlMethod.invoke(entity);
retTtl = (Integer) retobj;
}
return retTtl;
}
public T constructEntity(K id, ColumnList<String> cl) {
try {
T entity = clazz.newInstance();
idField.set(entity, id);
for (com.netflix.astyanax.model.Column<String> column : cl) {
List<String> name = Lists.newArrayList(StringUtils.split(column.getName(), "."));
setField(entity, name.iterator(), column);
}
for (ColumnMapper column : columnList.values()) {
column.validate(entity);
}
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
void setField(T entity, Iterator<String> name, com.netflix.astyanax.model.Column<String> column) throws Exception {
String fieldName = name.next();
ColumnMapper mapper = this.columnList.get(fieldName);
if (mapper != null)
mapper.setField(entity, name, column);
}
@SuppressWarnings("unchecked")
public K getEntityId(T entity) throws Exception {
return (K)idField.get(entity);
}
@VisibleForTesting
Field getId() {
return idField;
}
@VisibleForTesting
Collection<ColumnMapper> getColumnList() {
return columnList.values();
}
public String getEntityName() {
return entityName;
}
@Override
public String toString() {
return String.format("EntityMapper(%s)", clazz);
}
}
| 8,135 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/MappingUtils.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import javax.persistence.Entity;
import org.apache.commons.lang.StringUtils;
import com.netflix.astyanax.serializers.SerializerTypeInferer;
public class MappingUtils {
static com.netflix.astyanax.Serializer<?> getSerializerForField(Field field) {
com.netflix.astyanax.Serializer<?> serializer = null;
// check if there is explicit @Serializer annotation first
Serializer serializerAnnotation = field.getAnnotation(Serializer.class);
if(serializerAnnotation != null) {
final Class<?> serializerClazz = serializerAnnotation.value();
// check type
if(!(com.netflix.astyanax.Serializer.class.isAssignableFrom(serializerClazz)))
throw new RuntimeException("annotated serializer class is not a subclass of com.netflix.astyanax.Serializer. " + serializerClazz.getCanonicalName());
// invoke public static get() method
try {
Method getInstanceMethod = serializerClazz.getMethod("get");
serializer = (com.netflix.astyanax.Serializer<?>) getInstanceMethod.invoke(null);
} catch(Exception e) {
throw new RuntimeException("Failed to get or invoke public static get() method", e);
}
} else {
// otherwise automatically infer the Serializer type from field object type
serializer = SerializerTypeInferer.getSerializer(field.getType());
}
return serializer;
}
static String getEntityName(Entity entityAnnotation, Class<?> clazz) {
String name = entityAnnotation.name();
if (name == null || name.isEmpty())
return StringUtils.substringAfterLast(clazz.getName(), ".").toLowerCase();
else
return name;
}
}
| 8,136 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/LeafColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.util.Iterator;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
class LeafColumnMapper extends AbstractColumnMapper {
private final Serializer<?> serializer;
LeafColumnMapper(final Field field) {
super(field);
this.serializer = MappingUtils.getSerializerForField(field);
}
@Override
public String getColumnName() {
return columnName;
}
Serializer<?> getSerializer() {
return serializer;
}
@SuppressWarnings("unchecked")
@Override
public boolean fillMutationBatch(Object entity, ColumnListMutation<String> clm, String prefix) throws Exception {
Object value = field.get(entity);
if(value == null) {
if(columnAnnotation.nullable())
return false; // skip
else
throw new IllegalArgumentException("cannot write non-nullable column with null value: " + columnName);
}
@SuppressWarnings("rawtypes")
final Serializer valueSerializer = serializer;
// TODO: suppress the unchecked raw type now.
// we have to use the raw type to avoid compiling error
clm.putColumn(prefix + columnName, value, valueSerializer, null);
return true;
}
@Override
public boolean setField(Object entity, Iterator<String> name, com.netflix.astyanax.model.Column<String> column) throws Exception {
if (name.hasNext())
return false;
final Object fieldValue = column.getValue(serializer);
field.set(entity, fieldValue);
return true;
}
@Override
public void validate(Object entity) throws Exception {
if (field.get(entity) == null && !columnAnnotation.nullable())
throw new IllegalArgumentException("cannot find non-nullable column: " + columnName);
}
}
| 8,137 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/Serializer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Documented
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
public @interface Serializer {
/**
* Fully qualified class name of custom Serializer
*/
Class<?> value();
}
| 8,138 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/CompositeEntityMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.PersistenceException;
import org.apache.commons.lang.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Collections2;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Equality;
import com.netflix.astyanax.query.ColumnPredicate;
/**
* The composite entity mapper maps a Pojo to a composite column structure where
* the row key represents the Pojo ID, each @Column is a component of the composite
* and the final @Column is the column value.
* @Column for the value.
*
* @author elandau
*
* @param <T>
* @param <K>
*/
public class CompositeEntityMapper<T, K> {
/**
* Entity class
*/
private final Class<T> clazz;
/**
* Default ttl
*/
private final Integer ttl;
/**
* TTL supplier method
*/
private final Method ttlMethod;
/**
* ID Field (same as row key)
*/
final FieldMapper<?> idMapper;
/**
* TODO
*/
private final String entityName;
/**
* List of serializers for the composite parts
*/
private List<FieldMapper<?>> components = Lists.newArrayList();
/**
* List of valid (i.e. existing) column names
*/
private Set<String> validNames = Sets.newHashSet();
/**
* Mapper for the value part of the entity
*/
private FieldMapper<?> valueMapper;
/**
* Largest buffer size
*/
private int bufferSize = 64;
/**
*
* @param clazz
* @param prefix
* @throws IllegalArgumentException
* if clazz is NOT annotated with @Entity
* if column name contains illegal char (like dot)
*/
public CompositeEntityMapper(Class<T> clazz, Integer ttl, ByteBuffer prefix) {
this.clazz = clazz;
// clazz should be annotated with @Entity
Entity entityAnnotation = clazz.getAnnotation(Entity.class);
if(entityAnnotation == null)
throw new IllegalArgumentException("class is NOT annotated with @javax.persistence.Entity: " + clazz.getName());
entityName = MappingUtils.getEntityName(entityAnnotation, clazz);
// TTL value from constructor or class-level annotation
Integer tmpTtlValue = ttl;
if(tmpTtlValue == null) {
// constructor value has higher priority
// try @TTL annotation at entity/class level.
// it doesn't make sense to support @TTL annotation at individual column level.
TTL ttlAnnotation = clazz.getAnnotation(TTL.class);
if(ttlAnnotation != null) {
int ttlAnnotationValue = ttlAnnotation.value();
Preconditions.checkState(ttlAnnotationValue > 0, "cannot define non-positive value for TTL annotation at class level: " + ttlAnnotationValue);
tmpTtlValue = ttlAnnotationValue;
}
}
this.ttl = tmpTtlValue;
// TTL method
Method tmpTtlMethod = null;
for (Method method : this.clazz.getDeclaredMethods()) {
if (method.isAnnotationPresent(TTL.class)) {
Preconditions.checkState(tmpTtlMethod == null, "Duplicate TTL method annotation on " + method.getName());
tmpTtlMethod = method;
tmpTtlMethod.setAccessible(true);
}
}
this.ttlMethod = tmpTtlMethod;
Field[] declaredFields = clazz.getDeclaredFields();
FieldMapper tempIdMapper = null;
CompositeColumnEntityMapper tempEmbeddedEntityMapper = null;
for (Field field : declaredFields) {
// Should only have one id field and it should map to the row key
Id idAnnotation = field.getAnnotation(Id.class);
if(idAnnotation != null) {
Preconditions.checkArgument(tempIdMapper == null, "there are multiple fields with @Id annotation");
field.setAccessible(true);
tempIdMapper = new FieldMapper(field, prefix);
}
// Composite part or the value
Column columnAnnotation = field.getAnnotation(Column.class);
if (columnAnnotation != null) {
field.setAccessible(true);
FieldMapper fieldMapper = new FieldMapper(field);
components.add(fieldMapper);
validNames.add(fieldMapper.getName());
}
}
Preconditions.checkNotNull(tempIdMapper, "there are no field with @Id annotation");
idMapper = tempIdMapper;
Preconditions.checkNotNull(components.size() > 2, "there should be at least 2 component columns and a value");
// Last one is always treated as the 'value'
valueMapper = components.remove(components.size() - 1);
}
void fillMutationBatch(MutationBatch mb, ColumnFamily<K, ByteBuffer> columnFamily, T entity) {
try {
@SuppressWarnings("unchecked")
ColumnListMutation<ByteBuffer> clm = mb.withRow(columnFamily, (K)idMapper.getValue(entity));
clm.setDefaultTtl(getTtl(entity));
try {
ByteBuffer columnName = toColumnName(entity);
ByteBuffer value = valueMapper.toByteBuffer(entity);
clm.putColumn(columnName, value);
} catch(Exception e) {
throw new PersistenceException("failed to fill mutation batch", e);
}
} catch(Exception e) {
throw new PersistenceException("failed to fill mutation batch", e);
}
}
void fillMutationBatchForDelete(MutationBatch mb, ColumnFamily<K, ByteBuffer> columnFamily, T entity) {
try {
@SuppressWarnings("unchecked")
ColumnListMutation<ByteBuffer> clm = mb.withRow(columnFamily, (K)idMapper.getValue(entity));
clm.deleteColumn(toColumnName(entity));
} catch(Exception e) {
throw new PersistenceException("failed to fill mutation batch", e);
}
}
private Integer getTtl(T entity) throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
Integer retTtl = this.ttl;
// TTL method has higher priority
if(ttlMethod != null) {
Object retobj = ttlMethod.invoke(entity);
retTtl = (Integer) retobj;
}
return retTtl;
}
/**
* Return the column name byte buffer for this entity
*
* @param obj
* @return
*/
private ByteBuffer toColumnName(Object obj) {
SimpleCompositeBuilder composite = new SimpleCompositeBuilder(bufferSize, Equality.EQUAL);
// Iterate through each component and add to a CompositeType structure
for (FieldMapper<?> mapper : components) {
try {
composite.addWithoutControl(mapper.toByteBuffer(obj));
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
return composite.get();
}
/**
* Construct an entity object from a row key and column list.
*
* @param id
* @param cl
* @return
*/
T constructEntity(K id, com.netflix.astyanax.model.Column<ByteBuffer> column) {
try {
// First, construct the parent class and give it an id
T entity = clazz.newInstance();
idMapper.setValue(entity, id);
setEntityFieldsFromColumnName(entity, column.getRawName().duplicate());
valueMapper.setField(entity, column.getByteBufferValue().duplicate());
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
T constructEntityFromCql(ColumnList<ByteBuffer> cl) {
try {
T entity = clazz.newInstance();
// First, construct the parent class and give it an id
K id = (K) idMapper.fromByteBuffer(Iterables.getFirst(cl, null).getByteBufferValue());
idMapper.setValue(entity, id);
Iterator<com.netflix.astyanax.model.Column<ByteBuffer>> columnIter = cl.iterator();
columnIter.next();
for (FieldMapper<?> component : components) {
component.setField(entity, columnIter.next().getByteBufferValue());
}
valueMapper.setField(entity, columnIter.next().getByteBufferValue());
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
@SuppressWarnings("unchecked")
public K getEntityId(T entity) throws Exception {
return (K)idMapper.getValue(entity);
}
@VisibleForTesting
Field getId() {
return idMapper.field;
}
public String getEntityName() {
return entityName;
}
@Override
public String toString() {
return String.format("EntityMapper(%s)", clazz);
}
public String getKeyType() {
return idMapper.getSerializer().getComparatorType().getTypeName();
}
/**
* Return an object from the column
*
* @param cl
* @return
*/
Object fromColumn(K id, com.netflix.astyanax.model.Column<ByteBuffer> c) {
try {
// Allocate a new entity
Object entity = clazz.newInstance();
idMapper.setValue(entity, id);
setEntityFieldsFromColumnName(entity, c.getRawName().duplicate());
valueMapper.setField(entity, c.getByteBufferValue().duplicate());
return entity;
} catch(Exception e) {
throw new PersistenceException("failed to construct entity", e);
}
}
/**
*
* @param entity
* @param columnName
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
void setEntityFieldsFromColumnName(Object entity, ByteBuffer columnName) throws IllegalArgumentException, IllegalAccessException {
// Iterate through components in order and set fields
for (FieldMapper<?> component : components) {
ByteBuffer data = getWithShortLength(columnName);
if (data != null) {
if (data.remaining() > 0) {
component.setField(entity, data);
}
byte end_of_component = columnName.get();
if (end_of_component != Equality.EQUAL.toByte()) {
throw new RuntimeException("Invalid composite column. Expected END_OF_COMPONENT.");
}
}
else {
throw new RuntimeException("Missing component data in composite type");
}
}
}
/**
* Return the cassandra comparator type for this composite structure
* @return
*/
public String getComparatorType() {
StringBuilder sb = new StringBuilder();
sb.append("CompositeType(");
sb.append(StringUtils.join(
Collections2.transform(components, new Function<FieldMapper<?>, String>() {
public String apply(FieldMapper<?> input) {
return input.serializer.getComparatorType().getTypeName();
}
}),
","));
sb.append(")");
return sb.toString();
}
public static int getShortLength(ByteBuffer bb) {
int length = (bb.get() & 0xFF) << 8;
return length | (bb.get() & 0xFF);
}
public static ByteBuffer getWithShortLength(ByteBuffer bb) {
int length = getShortLength(bb);
return getBytes(bb, length);
}
public static ByteBuffer getBytes(ByteBuffer bb, int length) {
ByteBuffer copy = bb.duplicate();
copy.limit(copy.position() + length);
bb.position(bb.position() + length);
return copy;
}
String getValueType() {
return valueMapper.getSerializer().getComparatorType().getTypeName();
}
ByteBuffer[] getQueryEndpoints(Collection<ColumnPredicate> predicates) {
// Convert to multimap for easy lookup
ArrayListMultimap<Object, ColumnPredicate> lookup = ArrayListMultimap.create();
for (ColumnPredicate predicate : predicates) {
Preconditions.checkArgument(validNames.contains(predicate.getName()), "Field '" + predicate.getName() + "' does not exist in the entity " + clazz.getCanonicalName());
lookup.put(predicate.getName(), predicate);
}
SimpleCompositeBuilder start = new SimpleCompositeBuilder(bufferSize, Equality.GREATER_THAN_EQUALS);
SimpleCompositeBuilder end = new SimpleCompositeBuilder(bufferSize, Equality.LESS_THAN_EQUALS);
// Iterate through components in order while applying predicate to 'start' and 'end'
for (FieldMapper<?> mapper : components) {
for (ColumnPredicate p : lookup.get(mapper.getName())) {
try {
applyPredicate(mapper, start, end, p);
}
catch (Exception e) {
throw new RuntimeException(String.format("Failed to serialize predicate '%s'", p.toString()), e);
}
}
}
return new ByteBuffer[]{start.get(), end.get()};
}
void applyPredicate(FieldMapper<?> mapper, SimpleCompositeBuilder start, SimpleCompositeBuilder end, ColumnPredicate predicate) {
ByteBuffer bb = mapper.valueToByteBuffer(predicate.getValue());
switch (predicate.getOp()) {
case EQUAL:
start.addWithoutControl(bb);
end.addWithoutControl(bb);
break;
case GREATER_THAN:
case GREATER_THAN_EQUALS:
if (mapper.isAscending())
start.add(bb, predicate.getOp());
else
end.add(bb, predicate.getOp());
break;
case LESS_THAN:
case LESS_THAN_EQUALS:
if (mapper.isAscending())
end.add(bb, predicate.getOp());
else
start.add(bb, predicate.getOp());
break;
}
}
}
| 8,139 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/TTL.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Documented
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
public @interface TTL {
/**
* The time-to-live in seconds with which this particular field should be persisted as.
* add a negative default value so that the same annotation can be used at method level
*/
int value() default -1;
}
| 8,140 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/MapColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import com.google.common.collect.Maps;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.serializers.SerializerTypeInferer;
public class MapColumnMapper extends AbstractColumnMapper {
private final Class<?> keyClazz;
private final Class<?> valueClazz;
private final Serializer<?> keySerializer;
private final Serializer<Object> valueSerializer;
public MapColumnMapper(Field field) {
super(field);
ParameterizedType stringListType = (ParameterizedType) field.getGenericType();
this.keyClazz = (Class<?>) stringListType.getActualTypeArguments()[0];
this.keySerializer = SerializerTypeInferer.getSerializer(this.keyClazz);
this.valueClazz = (Class<?>) stringListType.getActualTypeArguments()[1];
this.valueSerializer = SerializerTypeInferer.getSerializer(this.valueClazz);
}
@Override
public String getColumnName() {
return this.columnName;
}
@Override
public boolean fillMutationBatch(Object entity, ColumnListMutation<String> clm, String prefix) throws Exception {
Map<?, ?> map = (Map<?, ?>) field.get(entity);
if (map == null) {
if (columnAnnotation.nullable())
return false; // skip
else
throw new IllegalArgumentException("cannot write non-nullable column with null value: " + columnName);
}
for (Entry<?, ?> entry : map.entrySet()) {
clm.putColumn(prefix + columnName + "." + entry.getKey().toString(), entry.getValue(), valueSerializer, null);
}
return true;
}
@Override
public boolean setField(Object entity, Iterator<String> name, com.netflix.astyanax.model.Column<String> column) throws Exception {
Map<Object, Object> map = (Map<Object, Object>) field.get(entity);
if (map == null) {
map = Maps.newLinkedHashMap();
field.set(entity, map);
}
String key = name.next();
if (name.hasNext())
return false;
map.put(keySerializer.fromByteBuffer(keySerializer.fromString(key)),
valueSerializer.fromByteBuffer(column.getByteBufferValue()));
return true;
}
@Override
public void validate(Object entity) throws Exception {
// TODO Auto-generated method stub
}
}
| 8,141 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/EntityManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import javax.persistence.PersistenceException;
import com.google.common.base.Function;
/**
* @param <T> entity type
* @param <K> rowKey type
*/
public interface EntityManager<T, K> {
/**
* write entity to cassandra with mapped rowId and columns
* @param entity entity object
*/
public void put(T entity) throws PersistenceException;
/**
* fetch whole row and construct entity object mapping from columns
* @param id row key
* @return entity object. null if not exist
*/
public T get(K id) throws PersistenceException;
/**
* delete the whole row by id
* @param id row key
*/
public void delete(K id) throws PersistenceException;
/**
* remove an entire entity
* @param id row key
*/
public void remove(T entity) throws PersistenceException;
/**
* @return Return all entities.
*
* @throws PersistenceException
*/
public List<T> getAll() throws PersistenceException;
/**
* @return Get a set of entities
* @param ids
* @throws PersistenceException
*/
public List<T> get(Collection<K> ids) throws PersistenceException;
/**
* Delete a set of entities by their id
* @param ids
* @throws PersistenceException
*/
public void delete(Collection<K> ids) throws PersistenceException;
/**
* Delete a set of entities
* @param ids
* @throws PersistenceException
*/
public void remove(Collection<T> entities) throws PersistenceException;
/**
* Store a set of entities.
* @param entites
* @throws PersistenceException
*/
public void put(Collection<T> entities) throws PersistenceException;
/**
* Visit all entities.
*
* @param callback Callback when an entity is read. Note that the callback
* may be called from multiple threads.
* @throws PersistenceException
*/
public void visitAll(Function<T, Boolean> callback) throws PersistenceException;
/**
* Execute a CQL query and return the found entites
* @param cql
* @throws PersistenceException
*/
public List<T> find(String cql) throws PersistenceException;
/**
* Execute a 'native' query using a simple API that adheres to cassandra's native
* model of rows and columns.
* @return
*/
public NativeQuery<T, K> createNativeQuery();
/**
* Create the underlying storage for this entity. This should only be called
* once when first creating store and not part of the normal startup sequence.
* @throws PersistenceException
*/
public void createStorage(Map<String, Object> options) throws PersistenceException;
/**
* Delete the underlying storage for this entity.
* @param options
* @throws PersistenceException
*/
public void deleteStorage() throws PersistenceException;
/**
* Truncate all data in the underlying
* @param options
* @throws PersistenceException
*/
public void truncate() throws PersistenceException;
/**
* Commit the internal batch after multiple operations. Note that an entity
* manager implementation may autocommit after each operation.
* @throws PersistenceException
*/
public void commit() throws PersistenceException;
}
| 8,142 |
0 | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-entity-mapper/src/main/java/com/netflix/astyanax/entitystore/CompositeColumnMapper.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.entitystore;
import java.lang.reflect.Field;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
class CompositeColumnMapper extends AbstractColumnMapper {
private final Class<?> clazz;
private final Map<String, ColumnMapper> columnList;
private final List<ColumnMapper> nonNullableFields;
CompositeColumnMapper(final Field field) {
super(field);
this.clazz = field.getType();
// clazz should be annotated with @Entity
Entity entityAnnotation = clazz.getAnnotation(Entity.class);
if(entityAnnotation == null)
throw new IllegalArgumentException("class is NOT annotated with @javax.persistence.Entity: " + clazz.getName());
columnList = Maps.newHashMapWithExpectedSize(clazz.getDeclaredFields().length);
nonNullableFields = Lists.newArrayList();
Set<String> usedColumnNames = Sets.newHashSet();
for (Field childField : clazz.getDeclaredFields()) {
// extract @Column annotated fields
Column annotation = childField.getAnnotation(Column.class);
if ((annotation != null)) {
childField.setAccessible(true);
ColumnMapper columnMapper = null;
Entity compositeAnnotation = childField.getType().getAnnotation(Entity.class);
if(compositeAnnotation == null) {
columnMapper = new LeafColumnMapper(childField);
} else {
columnMapper = new CompositeColumnMapper(childField);
}
Preconditions.checkArgument(!usedColumnNames.contains(columnMapper.getColumnName().toLowerCase()),
String.format("duplicate case-insensitive column name: %s", columnMapper.getColumnName()));
columnList.put(columnMapper.getColumnName(), columnMapper);
usedColumnNames.add(columnMapper.getColumnName().toLowerCase());
if (!annotation.nullable()) {
nonNullableFields.add(columnMapper);
}
}
}
}
@Override
public String toString() {
return String.format("CompositeColumnMapper(%s)", clazz);
}
@Override
public String getColumnName() {
return columnName;
}
@Override
public boolean fillMutationBatch(Object entity, ColumnListMutation<String> clm, String prefix) throws Exception {
Object childEntity = field.get(entity);
if(childEntity == null) {
if(columnAnnotation.nullable()) {
return false; // skip. cannot write null column
} else {
throw new IllegalArgumentException("cannot write non-nullable column with null value: " + columnName);
}
}
prefix += getColumnName() + ".";
boolean hasNonNullChildField = false;
for (ColumnMapper mapper : columnList.values()) {
boolean childFilled = mapper.fillMutationBatch(childEntity, clm, prefix);
if(childFilled)
hasNonNullChildField = true;
}
return hasNonNullChildField;
}
@Override
public boolean setField(Object entity, Iterator<String> name, com.netflix.astyanax.model.Column<String> column) throws Exception {
Object childEntity = field.get(entity);
if (childEntity == null) {
childEntity = clazz.newInstance();
field.set(entity, childEntity);
}
ColumnMapper mapper = this.columnList.get(name.next());
if (mapper == null)
return false;
return mapper.setField(childEntity, name, column);
}
@Override
public void validate(Object entity) throws Exception {
Object objForThisField = field.get(entity);
if (objForThisField == null) {
if (!columnAnnotation.nullable())
throw new IllegalArgumentException("cannot find non-nullable column: " + columnName);
}
else {
for (ColumnMapper childField : this.nonNullableFields) {
childField.validate(objForThisField);
}
}
}
}
| 8,143 |
0 | Create_ds/conductor-community/metrics/src/test/java/com/netflix/conductor/contribs | Create_ds/conductor-community/metrics/src/test/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfigurationTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.context.annotation.Primary;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.micrometer.MicrometerRegistry;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.prometheus.PrometheusConfig;
import io.micrometer.prometheus.PrometheusMeterRegistry;
import static org.junit.Assert.assertTrue;
@RunWith(SpringRunner.class)
@Import({PrometheusMetricsConfiguration.class})
@TestPropertySource(properties = {"conductor.metrics-prometheus.enabled=true"})
public class PrometheusMetricsConfigurationTest {
@SuppressWarnings("unchecked")
@Test
public void testCollector() throws IllegalAccessException {
final Optional<Field> registries =
Arrays.stream(Spectator.globalRegistry().getClass().getDeclaredFields())
.filter(f -> f.getName().equals("registries"))
.findFirst();
assertTrue(registries.isPresent());
registries.get().setAccessible(true);
List<Registry> meters = (List<Registry>) registries.get().get(Spectator.globalRegistry());
assertTrue(meters.size() > 0);
Optional<Registry> microMeterReg =
meters.stream()
.filter(r -> r.getClass().equals(MicrometerRegistry.class))
.findFirst();
assertTrue(microMeterReg.isPresent());
}
@TestConfiguration
public static class TestConfig {
/**
* This bean will be injected in PrometheusMetricsConfiguration, which wraps it with a
* MicrometerRegistry, and appends it to the global registry.
*
* @return a Prometheus registry instance
*/
@Bean
@Primary
public MeterRegistry meterRegistry() {
return new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);
}
}
}
| 8,144 |
0 | Create_ds/conductor-community/metrics/src/test/java/com/netflix/conductor/contribs | Create_ds/conductor-community/metrics/src/test/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfigurationTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Import;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.contribs.metrics.LoggingMetricsConfiguration.Slf4jReporterProvider;
import com.codahale.metrics.MetricRegistry;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
@RunWith(SpringRunner.class)
@Import({LoggingMetricsConfiguration.class, MetricsRegistryConfiguration.class})
@TestPropertySource(properties = {"conductor.metrics-logger.enabled=true"})
public class LoggingMetricsConfigurationTest {
@Autowired MetricRegistry metricRegistry;
@Test
public void testCollector() {
Logger logger = spy(Logger.class);
doReturn(true).when(logger).isInfoEnabled(any());
Slf4jReporterProvider reporterProvider =
new Slf4jReporterProvider(metricRegistry, logger, 1);
metricRegistry.counter("test").inc();
reporterProvider.getReporter();
verify(logger, timeout(TimeUnit.SECONDS.toMillis(10))).isInfoEnabled(null);
}
}
| 8,145 |
0 | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.micrometer.MicrometerRegistry;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.prometheus.PrometheusRenameFilter;
/**
* Metrics prometheus module, sending all metrics to a Prometheus server.
*
* <p>Enable in config: conductor.metrics-prometheus.enabled=true
*
* <p>Make sure your dependencies include both spectator-reg-micrometer &
* spring-boot-starter-actuator
*/
@ConditionalOnProperty(value = "conductor.metrics-prometheus.enabled", havingValue = "true")
@Configuration
public class PrometheusMetricsConfiguration {
private static final Logger LOGGER =
LoggerFactory.getLogger(PrometheusMetricsConfiguration.class);
public PrometheusMetricsConfiguration(MeterRegistry meterRegistry) {
LOGGER.info("Prometheus metrics module initialized");
final MicrometerRegistry metricsRegistry = new MicrometerRegistry(meterRegistry);
meterRegistry.config().meterFilter(new PrometheusRenameFilter());
Spectator.globalRegistry().add(metricsRegistry);
}
}
| 8,146 |
0 | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import java.time.Duration;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Slf4jReporter;
/**
* Metrics logging reporter, dumping all metrics into an Slf4J logger.
*
* <p>Enable in config: conductor.metrics-logger.enabled=true
*
* <p>additional config: conductor.metrics-logger.reportInterval=15s
*/
@ConditionalOnProperty(value = "conductor.metrics-logger.enabled", havingValue = "true")
@Configuration
public class LoggingMetricsConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingMetricsConfiguration.class);
// Dedicated logger for metrics
// This way one can cleanly separate the metrics stream from rest of the logs
private static final Logger METRICS_LOGGER = LoggerFactory.getLogger("ConductorMetrics");
@Value("${conductor.metrics-logger.reportInterval:#{T(java.time.Duration).ofSeconds(30)}}")
private Duration reportInterval;
@Bean
public Slf4jReporter getSl4jReporter(MetricRegistry metricRegistry) {
return new Slf4jReporterProvider(metricRegistry, reportInterval.getSeconds()).getReporter();
}
static class Slf4jReporterProvider {
private final long metricsReportInterval;
private final MetricRegistry metrics3Registry;
private final Logger logger;
Slf4jReporterProvider(MetricRegistry metricRegistry, long reportInterval) {
this(metricRegistry, METRICS_LOGGER, reportInterval);
}
Slf4jReporterProvider(
MetricRegistry metricRegistry, Logger outputLogger, long metricsReportInterval) {
this.metrics3Registry = metricRegistry;
this.logger = outputLogger;
this.metricsReportInterval = metricsReportInterval;
}
public Slf4jReporter getReporter() {
final Slf4jReporter reporter =
Slf4jReporter.forRegistry(metrics3Registry)
.outputTo(logger)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
reporter.start(metricsReportInterval, TimeUnit.SECONDS);
LOGGER.info(
"Logging metrics reporter started, reporting every {} seconds",
metricsReportInterval);
return reporter;
}
}
}
| 8,147 |
0 | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs | Create_ds/conductor-community/metrics/src/main/java/com/netflix/conductor/contribs/metrics/MetricsRegistryConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.spectator.api.Clock;
import com.netflix.spectator.api.Spectator;
import com.netflix.spectator.metrics3.MetricsRegistry;
import com.codahale.metrics.MetricRegistry;
@ConditionalOnProperty(value = "conductor.metrics-logger.enabled", havingValue = "true")
@Configuration
public class MetricsRegistryConfiguration {
public static final MetricRegistry METRIC_REGISTRY = new MetricRegistry();
public static final MetricsRegistry METRICS_REGISTRY =
new MetricsRegistry(Clock.SYSTEM, METRIC_REGISTRY);
static {
Spectator.globalRegistry().add(METRICS_REGISTRY);
}
@Bean
public MetricRegistry metricRegistry() {
return METRIC_REGISTRY;
}
@Bean
public MetricsRegistry metricsRegistry() {
return METRICS_REGISTRY;
}
}
| 8,148 |
0 | Create_ds/conductor-community/lock/zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper | Create_ds/conductor-community/lock/zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper/lock/ZookeeperLockTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.zookeeper.lock;
import java.time.Duration;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.test.TestingServer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.service.ExecutionLockService;
import com.netflix.conductor.zookeeper.config.ZookeeperProperties;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ZookeeperLockTest {
private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperLockTest.class);
TestingServer zkServer;
ZookeeperProperties properties;
@Before
public void setUp() throws Exception {
zkServer = new TestingServer(2181);
properties = mock(ZookeeperProperties.class);
when(properties.getConnectionString()).thenReturn("localhost:2181");
when(properties.getSessionTimeout())
.thenReturn(
Duration.ofMillis(CuratorFrameworkFactory.builder().getSessionTimeoutMs()));
when(properties.getConnectionTimeout())
.thenReturn(
Duration.ofMillis(
CuratorFrameworkFactory.builder().getConnectionTimeoutMs()));
when(properties.getNamespace()).thenReturn("");
}
@After
public void tearDown() throws Exception {
zkServer.stop();
}
@Test
public void testLockReentrance() {
Lock zkLock = new ZookeeperLock(properties);
boolean hasLock = zkLock.acquireLock("reentrantLock1", 50, TimeUnit.MILLISECONDS);
assertTrue(hasLock);
hasLock = zkLock.acquireLock("reentrantLock1", 50, TimeUnit.MILLISECONDS);
assertTrue(hasLock);
zkLock.releaseLock("reentrantLock1");
zkLock.releaseLock("reentrantLock1");
}
@Test
public void testZkLock() throws InterruptedException {
Lock zkLock = new ZookeeperLock(properties);
String lock1 = "lock1";
String lock2 = "lock2";
Worker worker1 = new Worker(zkLock, lock1);
worker1.start();
worker1.lockNotify.acquire();
assertTrue(worker1.isLocked);
Thread.sleep(30000);
Worker worker2 = new Worker(zkLock, lock1);
worker2.start();
assertTrue(worker2.isAlive());
assertFalse(worker2.isLocked);
Thread.sleep(30000);
Worker worker3 = new Worker(zkLock, lock2);
worker3.start();
worker3.lockNotify.acquire();
assertTrue(worker3.isLocked);
Thread.sleep(30000);
worker1.unlockNotify.release();
worker1.join();
Thread.sleep(30000);
worker2.lockNotify.acquire();
assertTrue(worker2.isLocked);
worker2.unlockNotify.release();
worker2.join();
worker3.unlockNotify.release();
worker3.join();
}
private static class Worker extends Thread {
private final Lock lock;
private final String lockID;
Semaphore unlockNotify = new Semaphore(0);
Semaphore lockNotify = new Semaphore(0);
boolean isLocked = false;
Worker(Lock lock, String lockID) {
super("TestWorker-" + lockID);
this.lock = lock;
this.lockID = lockID;
}
@Override
public void run() {
lock.acquireLock(lockID, 5, TimeUnit.MILLISECONDS);
isLocked = true;
lockNotify.release();
try {
unlockNotify.acquire();
} catch (Exception e) {
e.printStackTrace();
} finally {
isLocked = false;
lock.releaseLock(lockID);
}
}
}
private static class MultiLockWorker extends Thread {
private final ExecutionLockService lock;
private final Iterable<String> lockIDs;
private boolean finishedSuccessfully = false;
public MultiLockWorker(ExecutionLockService executionLock, Iterable<String> lockIDs) {
super();
this.lock = executionLock;
this.lockIDs = lockIDs;
}
@Override
public void run() {
try {
int iterations = 0;
for (String lockID : lockIDs) {
lock.acquireLock(lockID);
Thread.sleep(100);
lock.releaseLock(lockID);
iterations++;
if (iterations % 10 == 0) {
LOGGER.info("Finished iterations: {}", iterations);
}
}
finishedSuccessfully = true;
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public boolean isFinishedSuccessfully() {
return finishedSuccessfully;
}
}
}
| 8,149 |
0 | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperProperties.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.zookeeper.config;
import java.time.Duration;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.zookeeper-lock")
public class ZookeeperProperties {
/** The connection string to be used to connect to the Zookeeper cluster */
private String connectionString = "localhost:2181";
/** The session timeout for the curator */
private Duration sessionTimeout =
Duration.ofMillis(CuratorFrameworkFactory.builder().getSessionTimeoutMs());
/** The connection timeout for the curator */
private Duration connectionTimeout =
Duration.ofMillis(CuratorFrameworkFactory.builder().getConnectionTimeoutMs());
/** The namespace to use within the zookeeper cluster */
private String namespace = "";
public String getConnectionString() {
return connectionString;
}
public void setConnectionString(String connectionString) {
this.connectionString = connectionString;
}
public Duration getSessionTimeout() {
return sessionTimeout;
}
public void setSessionTimeout(Duration sessionTimeout) {
this.sessionTimeout = sessionTimeout;
}
public Duration getConnectionTimeout() {
return connectionTimeout;
}
public void setConnectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = connectionTimeout;
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
}
| 8,150 |
0 | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperLockConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.zookeeper.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.zookeeper.lock.ZookeeperLock;
@Configuration
@EnableConfigurationProperties(ZookeeperProperties.class)
@ConditionalOnProperty(name = "conductor.workflow-execution-lock.type", havingValue = "zookeeper")
public class ZookeeperLockConfiguration {
@Bean
public Lock provideLock(ZookeeperProperties properties) {
return new ZookeeperLock(properties);
}
}
| 8,151 |
0 | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper | Create_ds/conductor-community/lock/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/lock/ZookeeperLock.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.zookeeper.lock;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.zookeeper.config.ZookeeperProperties;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
@SuppressWarnings("UnstableApiUsage")
public class ZookeeperLock implements Lock {
public static final int CACHE_MAXSIZE = 20000;
public static final int CACHE_EXPIRY_TIME = 10;
private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperLock.class);
private final CuratorFramework client;
private final LoadingCache<String, InterProcessMutex> zkLocks;
private final String zkPath;
public ZookeeperLock(ZookeeperProperties properties) {
String lockNamespace = properties.getNamespace();
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
client =
CuratorFrameworkFactory.newClient(
properties.getConnectionString(),
(int) properties.getSessionTimeout().toMillis(),
(int) properties.getConnectionTimeout().toMillis(),
retryPolicy);
client.start();
zkLocks =
CacheBuilder.newBuilder()
.maximumSize(CACHE_MAXSIZE)
.expireAfterAccess(CACHE_EXPIRY_TIME, TimeUnit.MINUTES)
.build(
new CacheLoader<String, InterProcessMutex>() {
@Override
public InterProcessMutex load(String key) {
return new InterProcessMutex(client, zkPath.concat(key));
}
});
zkPath =
StringUtils.isEmpty(lockNamespace)
? ("/conductor/")
: ("/conductor/" + lockNamespace + "/");
}
public void acquireLock(String lockId) {
if (StringUtils.isEmpty(lockId)) {
throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId);
}
try {
InterProcessMutex mutex = zkLocks.get(lockId);
mutex.acquire();
} catch (Exception e) {
LOGGER.debug("Failed in acquireLock: ", e);
}
}
public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
if (StringUtils.isEmpty(lockId)) {
throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId);
}
try {
InterProcessMutex mutex = zkLocks.get(lockId);
return mutex.acquire(timeToTry, unit);
} catch (Exception e) {
LOGGER.debug("Failed in acquireLock: ", e);
}
return false;
}
public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
return acquireLock(lockId, timeToTry, unit);
}
public void releaseLock(String lockId) {
if (StringUtils.isEmpty(lockId)) {
throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId);
}
try {
InterProcessMutex lock = zkLocks.getIfPresent(lockId);
if (lock != null) {
lock.release();
}
} catch (Exception e) {
LOGGER.debug("Failed in releaseLock: ", e);
}
}
public void deleteLock(String lockId) {
try {
LOGGER.debug("Deleting lock {}", zkPath.concat(lockId));
client.delete().guaranteed().forPath(zkPath.concat(lockId));
} catch (Exception e) {
LOGGER.debug("Failed to removeLock: ", e);
}
}
}
| 8,152 |
0 | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/test/java/com/netflix/conductor/azureblob | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.storage;
import java.time.Duration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.azureblob.config.AzureBlobProperties;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AzureBlobPayloadStorageTest {
private AzureBlobProperties properties;
private IDGenerator idGenerator;
@Before
public void setUp() {
properties = mock(AzureBlobProperties.class);
idGenerator = new IDGenerator();
when(properties.getConnectionString()).thenReturn(null);
when(properties.getContainerName()).thenReturn("conductor-payloads");
when(properties.getEndpoint()).thenReturn(null);
when(properties.getSasToken()).thenReturn(null);
when(properties.getSignedUrlExpirationDuration()).thenReturn(Duration.ofSeconds(5));
when(properties.getWorkflowInputPath()).thenReturn("workflow/input/");
when(properties.getWorkflowOutputPath()).thenReturn("workflow/output/");
when(properties.getTaskInputPath()).thenReturn("task/input");
when(properties.getTaskOutputPath()).thenReturn("task/output/");
}
/** Dummy credentials Azure SDK doesn't work with Azurite since it cleans parameters */
private final String azuriteConnectionString =
"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;EndpointSuffix=localhost";
@Rule public ExpectedException expectedException = ExpectedException.none();
@Test
public void testNoStorageAccount() {
expectedException.expect(NonTransientException.class);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testUseConnectionString() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testUseEndpoint() {
String azuriteEndpoint = "http://127.0.0.1:10000/";
when(properties.getEndpoint()).thenReturn(azuriteEndpoint);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testGetLocationFixedPath() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
AzureBlobPayloadStorage azureBlobPayloadStorage =
new AzureBlobPayloadStorage(idGenerator, properties);
String path = "somewhere";
ExternalStorageLocation externalStorageLocation =
azureBlobPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
path);
assertNotNull(externalStorageLocation);
assertEquals(path, externalStorageLocation.getPath());
assertNotNull(externalStorageLocation.getUri());
}
private void testGetLocation(
AzureBlobPayloadStorage azureBlobPayloadStorage,
ExternalPayloadStorage.Operation operation,
ExternalPayloadStorage.PayloadType payloadType,
String expectedPath) {
ExternalStorageLocation externalStorageLocation =
azureBlobPayloadStorage.getLocation(operation, payloadType, null);
assertNotNull(externalStorageLocation);
assertNotNull(externalStorageLocation.getPath());
assertTrue(externalStorageLocation.getPath().startsWith(expectedPath));
assertNotNull(externalStorageLocation.getUri());
assertTrue(externalStorageLocation.getUri().contains(expectedPath));
}
@Test
public void testGetAllLocations() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
AzureBlobPayloadStorage azureBlobPayloadStorage =
new AzureBlobPayloadStorage(idGenerator, properties);
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
properties.getWorkflowInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT,
properties.getWorkflowOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
properties.getTaskInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_OUTPUT,
properties.getTaskOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
properties.getWorkflowInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT,
properties.getWorkflowOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
properties.getTaskInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.TASK_OUTPUT,
properties.getTaskOutputPath());
}
}
| 8,153 |
0 | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.external-payload-storage.azureblob")
public class AzureBlobProperties {
/** The connection string to be used to connect to Azure Blob storage */
private String connectionString = null;
/** The name of the container where the payloads will be stored */
private String containerName = "conductor-payloads";
/** The endpoint to be used to connect to Azure Blob storage */
private String endpoint = null;
/** The sas token to be used for authenticating requests */
private String sasToken = null;
/** The time for which the shared access signature is valid */
@DurationUnit(ChronoUnit.SECONDS)
private Duration signedUrlExpirationDuration = Duration.ofSeconds(5);
/** The path at which the workflow inputs will be stored */
private String workflowInputPath = "workflow/input/";
/** The path at which the workflow outputs will be stored */
private String workflowOutputPath = "workflow/output/";
/** The path at which the task inputs will be stored */
private String taskInputPath = "task/input/";
/** The path at which the task outputs will be stored */
private String taskOutputPath = "task/output/";
public String getConnectionString() {
return connectionString;
}
public void setConnectionString(String connectionString) {
this.connectionString = connectionString;
}
public String getContainerName() {
return containerName;
}
public void setContainerName(String containerName) {
this.containerName = containerName;
}
public String getEndpoint() {
return endpoint;
}
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
public String getSasToken() {
return sasToken;
}
public void setSasToken(String sasToken) {
this.sasToken = sasToken;
}
public Duration getSignedUrlExpirationDuration() {
return signedUrlExpirationDuration;
}
public void setSignedUrlExpirationDuration(Duration signedUrlExpirationDuration) {
this.signedUrlExpirationDuration = signedUrlExpirationDuration;
}
public String getWorkflowInputPath() {
return workflowInputPath;
}
public void setWorkflowInputPath(String workflowInputPath) {
this.workflowInputPath = workflowInputPath;
}
public String getWorkflowOutputPath() {
return workflowOutputPath;
}
public void setWorkflowOutputPath(String workflowOutputPath) {
this.workflowOutputPath = workflowOutputPath;
}
public String getTaskInputPath() {
return taskInputPath;
}
public void setTaskInputPath(String taskInputPath) {
this.taskInputPath = taskInputPath;
}
public String getTaskOutputPath() {
return taskOutputPath;
}
public void setTaskOutputPath(String taskOutputPath) {
this.taskOutputPath = taskOutputPath;
}
}
| 8,154 |
0 | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.azureblob.storage.AzureBlobPayloadStorage;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(AzureBlobProperties.class)
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "azureblob")
public class AzureBlobConfiguration {
@Bean
public ExternalPayloadStorage azureBlobExternalPayloadStorage(
IDGenerator idGenerator, AzureBlobProperties properties) {
return new AzureBlobPayloadStorage(idGenerator, properties);
}
}
| 8,155 |
0 | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob | Create_ds/conductor-community/external-payload-storage/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.storage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.azureblob.config.AzureBlobProperties;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import com.azure.core.exception.UnexpectedLengthException;
import com.azure.core.util.Context;
import com.azure.storage.blob.BlobContainerClient;
import com.azure.storage.blob.BlobContainerClientBuilder;
import com.azure.storage.blob.models.BlobHttpHeaders;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.sas.BlobSasPermission;
import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.credentials.SasTokenCredential;
/**
* An implementation of {@link ExternalPayloadStorage} using Azure Blob for storing large JSON
* payload data.
*
* @see <a href="https://github.com/Azure/azure-sdk-for-java">Azure Java SDK</a>
*/
public class AzureBlobPayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureBlobPayloadStorage.class);
private static final String CONTENT_TYPE = "application/json";
private final IDGenerator idGenerator;
private final String workflowInputPath;
private final String workflowOutputPath;
private final String taskInputPath;
private final String taskOutputPath;
private final BlobContainerClient blobContainerClient;
private final long expirationSec;
private final SasTokenCredential sasTokenCredential;
public AzureBlobPayloadStorage(IDGenerator idGenerator, AzureBlobProperties properties) {
this.idGenerator = idGenerator;
workflowInputPath = properties.getWorkflowInputPath();
workflowOutputPath = properties.getWorkflowOutputPath();
taskInputPath = properties.getTaskInputPath();
taskOutputPath = properties.getTaskOutputPath();
expirationSec = properties.getSignedUrlExpirationDuration().getSeconds();
String connectionString = properties.getConnectionString();
String containerName = properties.getContainerName();
String endpoint = properties.getEndpoint();
String sasToken = properties.getSasToken();
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder();
if (connectionString != null) {
blobContainerClientBuilder.connectionString(connectionString);
sasTokenCredential = null;
} else if (endpoint != null) {
blobContainerClientBuilder.endpoint(endpoint);
if (sasToken != null) {
sasTokenCredential = SasTokenCredential.fromSasTokenString(sasToken);
blobContainerClientBuilder.sasToken(sasTokenCredential.getSasToken());
} else {
sasTokenCredential = null;
}
} else {
String msg = "Missing property for connectionString OR endpoint";
LOGGER.error(msg);
throw new NonTransientException(msg);
}
blobContainerClient = blobContainerClientBuilder.containerName(containerName).buildClient();
}
/**
* @param operation the type of {@link Operation} to be performed
* @param payloadType the {@link PayloadType} that is being accessed
* @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the
* azure blob name for the json payload
*/
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
try {
ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation();
String objectKey;
if (StringUtils.isNotBlank(path)) {
objectKey = path;
} else {
objectKey = getObjectKey(payloadType);
}
externalStorageLocation.setPath(objectKey);
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(objectKey).getBlockBlobClient();
String blobUrl = Utility.urlDecode(blockBlobClient.getBlobUrl());
if (sasTokenCredential != null) {
blobUrl = blobUrl + "?" + sasTokenCredential.getSasToken();
} else {
BlobSasPermission blobSASPermission = new BlobSasPermission();
if (operation.equals(Operation.READ)) {
blobSASPermission.setReadPermission(true);
} else if (operation.equals(Operation.WRITE)) {
blobSASPermission.setWritePermission(true);
blobSASPermission.setCreatePermission(true);
}
BlobServiceSasSignatureValues blobServiceSasSignatureValues =
new BlobServiceSasSignatureValues(
OffsetDateTime.now(ZoneOffset.UTC).plusSeconds(expirationSec),
blobSASPermission);
blobUrl =
blobUrl + "?" + blockBlobClient.generateSas(blobServiceSasSignatureValues);
}
externalStorageLocation.setUri(blobUrl);
return externalStorageLocation;
} catch (BlobStorageException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Uploads the payload to the given azure blob name. It is expected that the caller retrieves
* the blob name using {@link #getLocation(Operation, PayloadType, String)} before making this
* call.
*
* @param path the name of the blob to be uploaded
* @param payload an {@link InputStream} containing the json payload which is to be uploaded
* @param payloadSize the size of the json payload in bytes
*/
@Override
public void upload(String path, InputStream payload, long payloadSize) {
try {
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(path).getBlockBlobClient();
BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders().setContentType(CONTENT_TYPE);
blockBlobClient.uploadWithResponse(
payload,
payloadSize,
blobHttpHeaders,
null,
null,
null,
null,
null,
Context.NONE);
} catch (BlobStorageException | UncheckedIOException | UnexpectedLengthException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Downloads the payload stored in an azure blob.
*
* @param path the path of the blob
* @return an input stream containing the contents of the object Caller is expected to close the
* input stream.
*/
@Override
public InputStream download(String path) {
try {
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(path).getBlockBlobClient();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
// Avoid another call to the api to get the blob size
// ByteArrayOutputStream outputStream = new
// ByteArrayOutputStream(blockBlobClient.getProperties().value().blobSize());
blockBlobClient.download(outputStream);
return new ByteArrayInputStream(outputStream.toByteArray());
} catch (BlobStorageException | UncheckedIOException | NullPointerException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Build path on external storage. Copied from S3PayloadStorage.
*
* @param payloadType the {@link PayloadType} which will determine the base path of the object
* @return External Storage path
*/
private String getObjectKey(PayloadType payloadType) {
StringBuilder stringBuilder = new StringBuilder();
switch (payloadType) {
case WORKFLOW_INPUT:
stringBuilder.append(workflowInputPath);
break;
case WORKFLOW_OUTPUT:
stringBuilder.append(workflowOutputPath);
break;
case TASK_INPUT:
stringBuilder.append(taskInputPath);
break;
case TASK_OUTPUT:
stringBuilder.append(taskOutputPath);
break;
}
stringBuilder.append(idGenerator.generate()).append(".json");
return stringBuilder.toString();
}
}
| 8,156 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.containers.PostgreSQLContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class PostgresPayloadStorageTest {
private PostgresPayloadTestUtil testPostgres;
private PostgresPayloadStorage executionPostgres;
public PostgreSQLContainer<?> postgreSQLContainer;
private final String inputString =
"Lorem Ipsum is simply dummy text of the printing and typesetting industry."
+ " Lorem Ipsum has been the industry's standard dummy text ever since the 1500s.";
private final String errorMessage = "{\"Error\": \"Data does not exist.\"}";
private final InputStream inputData;
private final String key = "dummyKey.json";
public PostgresPayloadStorageTest() {
inputData = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8));
}
@Before
public void setup() {
postgreSQLContainer =
new PostgreSQLContainer<>(DockerImageName.parse("postgres"))
.withDatabaseName("conductor");
postgreSQLContainer.start();
testPostgres = new PostgresPayloadTestUtil(postgreSQLContainer);
executionPostgres =
new PostgresPayloadStorage(
testPostgres.getTestProperties(),
testPostgres.getDataSource(),
new IDGenerator(),
errorMessage);
}
@Test
public void testWriteInputStreamToDb() throws IOException, SQLException {
executionPostgres.upload(key, inputData, inputData.available());
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement(
"SELECT data FROM external.external_payload WHERE id = 'dummyKey.json'");
ResultSet rs = stmt.executeQuery();
rs.next();
assertEquals(
inputString,
new String(rs.getBinaryStream(1).readAllBytes(), StandardCharsets.UTF_8));
}
@Test
public void testReadInputStreamFromDb() throws IOException, SQLException {
insertData();
assertEquals(
inputString,
new String(executionPostgres.download(key).readAllBytes(), StandardCharsets.UTF_8));
}
private void insertData() throws SQLException, IOException {
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement("INSERT INTO external.external_payload VALUES (?, ?)");
stmt.setString(1, key);
stmt.setBinaryStream(2, inputData, inputData.available());
stmt.executeUpdate();
}
@Test(timeout = 60 * 1000)
public void testMultithreadDownload()
throws ExecutionException, InterruptedException, SQLException, IOException {
AtomicInteger threadCounter = new AtomicInteger(0);
insertData();
int numberOfThread = 12;
int taskInThread = 100;
ArrayList<CompletableFuture<?>> completableFutures = new ArrayList<>();
Executor executor = Executors.newFixedThreadPool(numberOfThread);
IntStream.range(0, numberOfThread * taskInThread)
.forEach(
i ->
createFutureForDownloadOperation(
threadCounter, completableFutures, executor));
for (CompletableFuture<?> completableFuture : completableFutures) {
completableFuture.get();
}
assertCount(1);
assertEquals(numberOfThread * taskInThread, threadCounter.get());
}
private void createFutureForDownloadOperation(
AtomicInteger threadCounter,
ArrayList<CompletableFuture<?>> completableFutures,
Executor executor) {
CompletableFuture<Void> objectCompletableFuture =
CompletableFuture.supplyAsync(() -> downloadData(threadCounter), executor);
completableFutures.add(objectCompletableFuture);
}
private Void downloadData(AtomicInteger threadCounter) {
try {
assertEquals(
inputString,
new String(
executionPostgres.download(key).readAllBytes(),
StandardCharsets.UTF_8));
threadCounter.getAndIncrement();
return null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void testReadNonExistentInputStreamFromDb() throws IOException, SQLException {
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement("INSERT INTO external.external_payload VALUES (?, ?)");
stmt.setString(1, key);
stmt.setBinaryStream(2, inputData, inputData.available());
stmt.executeUpdate();
assertEquals(
errorMessage,
new String(
executionPostgres.download("non_existent_key.json").readAllBytes(),
StandardCharsets.UTF_8));
}
@Test
public void testMaxRowInTable() throws IOException, SQLException {
executionPostgres.upload(key, inputData, inputData.available());
executionPostgres.upload("dummyKey2.json", inputData, inputData.available());
executionPostgres.upload("dummyKey3.json", inputData, inputData.available());
executionPostgres.upload("dummyKey4.json", inputData, inputData.available());
executionPostgres.upload("dummyKey5.json", inputData, inputData.available());
executionPostgres.upload("dummyKey6.json", inputData, inputData.available());
executionPostgres.upload("dummyKey7.json", inputData, inputData.available());
assertCount(5);
}
@Test(timeout = 60 * 1000)
public void testMultithreadInsert()
throws SQLException, ExecutionException, InterruptedException {
AtomicInteger threadCounter = new AtomicInteger(0);
int numberOfThread = 12;
int taskInThread = 100;
ArrayList<CompletableFuture<?>> completableFutures = new ArrayList<>();
Executor executor = Executors.newFixedThreadPool(numberOfThread);
IntStream.range(0, numberOfThread * taskInThread)
.forEach(
i ->
createFutureForUploadOperation(
threadCounter, completableFutures, executor));
for (CompletableFuture<?> completableFuture : completableFutures) {
completableFuture.get();
}
assertCount(1);
assertEquals(numberOfThread * taskInThread, threadCounter.get());
}
private void createFutureForUploadOperation(
AtomicInteger threadCounter,
ArrayList<CompletableFuture<?>> completableFutures,
Executor executor) {
CompletableFuture<Void> objectCompletableFuture =
CompletableFuture.supplyAsync(() -> uploadData(threadCounter), executor);
completableFutures.add(objectCompletableFuture);
}
private Void uploadData(AtomicInteger threadCounter) {
try {
uploadData();
threadCounter.getAndIncrement();
return null;
} catch (IOException | SQLException e) {
throw new RuntimeException(e);
}
}
@Test
public void testHashEnsuringNoDuplicates()
throws IOException, SQLException, InterruptedException {
final String createdOn = uploadData();
Thread.sleep(500);
final String createdOnAfterUpdate = uploadData();
assertCount(1);
assertNotEquals(createdOnAfterUpdate, createdOn);
}
private String uploadData() throws SQLException, IOException {
final String location = getKey(inputString);
ByteArrayInputStream inputStream =
new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8));
executionPostgres.upload(location, inputStream, inputStream.available());
return getCreatedOn(location);
}
@Test
public void testDistinctHashedKey() {
final String location = getKey(inputString);
final String location2 = getKey(inputString);
final String location3 = getKey(inputString + "A");
assertNotEquals(location3, location);
assertEquals(location2, location);
}
private String getKey(String input) {
return executionPostgres
.getLocation(
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
"",
input.getBytes(StandardCharsets.UTF_8))
.getUri();
}
private void assertCount(int expected) throws SQLException {
try (PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement(
"SELECT count(id) FROM external.external_payload");
ResultSet rs = stmt.executeQuery()) {
rs.next();
assertEquals(expected, rs.getInt(1));
}
}
private String getCreatedOn(String key) throws SQLException {
try (Connection conn = testPostgres.getDataSource().getConnection();
PreparedStatement stmt =
conn.prepareStatement(
"SELECT created_on FROM external.external_payload WHERE id = ?")) {
stmt.setString(1, key);
try (ResultSet rs = stmt.executeQuery()) {
rs.next();
return rs.getString(1);
}
}
}
@After
public void teardown() throws SQLException {
testPostgres.getDataSource().getConnection().close();
}
}
| 8,157 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.nio.file.Paths;
import java.util.Map;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.flywaydb.core.api.configuration.FluentConfiguration;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.testcontainers.containers.PostgreSQLContainer;
import com.netflix.conductor.postgres.config.PostgresPayloadProperties;
public class PostgresPayloadTestUtil {
private final DataSource dataSource;
private final PostgresPayloadProperties properties = new PostgresPayloadProperties();
public PostgresPayloadTestUtil(PostgreSQLContainer<?> postgreSQLContainer) {
this.dataSource =
DataSourceBuilder.create()
.url(postgreSQLContainer.getJdbcUrl())
.username(postgreSQLContainer.getUsername())
.password(postgreSQLContainer.getPassword())
.build();
flywayMigrate(dataSource);
}
private void flywayMigrate(DataSource dataSource) {
FluentConfiguration fluentConfiguration =
Flyway.configure()
.schemas("external")
.locations(Paths.get("db/migration_external_postgres").toString())
.dataSource(dataSource)
.placeholderReplacement(true)
.placeholders(
Map.of(
"tableName",
"external.external_payload",
"maxDataRows",
"5",
"maxDataDays",
"'1'",
"maxDataMonths",
"'1'",
"maxDataYears",
"'1'"));
Flyway flyway = fluentConfiguration.load();
flyway.migrate();
}
public DataSource getDataSource() {
return dataSource;
}
public PostgresPayloadProperties getTestProperties() {
return properties;
}
}
| 8,158 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.controller;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import org.junit.Before;
import org.junit.Test;
import org.springframework.core.io.InputStreamResource;
import org.springframework.http.ResponseEntity;
import com.netflix.conductor.postgres.storage.PostgresPayloadStorage;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ExternalPostgresPayloadResourceTest {
private PostgresPayloadStorage mockPayloadStorage;
private ExternalPostgresPayloadResource postgresResource;
@Before
public void before() {
this.mockPayloadStorage = mock(PostgresPayloadStorage.class);
this.postgresResource = new ExternalPostgresPayloadResource(this.mockPayloadStorage);
}
@Test
public void testGetExternalStorageData() throws IOException {
String data = "Dummy data";
InputStream inputStreamData =
new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
when(mockPayloadStorage.download(anyString())).thenReturn(inputStreamData);
ResponseEntity<InputStreamResource> response =
postgresResource.getExternalStorageData("dummyKey.json");
assertNotNull(response.getBody());
assertEquals(
data,
new String(
response.getBody().getInputStream().readAllBytes(),
StandardCharsets.UTF_8));
}
}
| 8,159 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.external-payload-storage.postgres")
public class PostgresPayloadProperties {
/** The PostgreSQL schema and table name where the payloads will be stored */
private String tableName = "external.external_payload";
/** Username for connecting to PostgreSQL database */
private String username;
/** Password for connecting to PostgreSQL database */
private String password;
/** URL for connecting to PostgreSQL database */
private String url;
/**
* Maximum count of data rows in PostgreSQL database. After overcoming this limit, the oldest
* data will be deleted.
*/
private long maxDataRows = Long.MAX_VALUE;
/**
* Maximum count of days of data age in PostgreSQL database. After overcoming limit, the oldest
* data will be deleted.
*/
private int maxDataDays = 0;
/**
* Maximum count of months of data age in PostgreSQL database. After overcoming limit, the
* oldest data will be deleted.
*/
private int maxDataMonths = 0;
/**
* Maximum count of years of data age in PostgreSQL database. After overcoming limit, the oldest
* data will be deleted.
*/
private int maxDataYears = 1;
/**
* URL, that can be used to pull the json configurations, that will be downloaded from
* PostgreSQL to the conductor server. For example: for local development it is
* "http://localhost:8080"
*/
private String conductorUrl = "";
public String getTableName() {
return tableName;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
public String getUrl() {
return url;
}
public String getConductorUrl() {
return conductorUrl;
}
public long getMaxDataRows() {
return maxDataRows;
}
public int getMaxDataDays() {
return maxDataDays;
}
public int getMaxDataMonths() {
return maxDataMonths;
}
public int getMaxDataYears() {
return maxDataYears;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public void setUsername(String username) {
this.username = username;
}
public void setPassword(String password) {
this.password = password;
}
public void setUrl(String url) {
this.url = url;
}
public void setConductorUrl(String conductorUrl) {
this.conductorUrl = conductorUrl;
}
public void setMaxDataRows(long maxDataRows) {
this.maxDataRows = maxDataRows;
}
public void setMaxDataDays(int maxDataDays) {
this.maxDataDays = maxDataDays;
}
public void setMaxDataMonths(int maxDataMonths) {
this.maxDataMonths = maxDataMonths;
}
public void setMaxDataYears(int maxDataYears) {
this.maxDataYears = maxDataYears;
}
}
| 8,160 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.postgres.storage.PostgresPayloadStorage;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(PostgresPayloadProperties.class)
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres")
@Import(DataSourceAutoConfiguration.class)
public class PostgresPayloadConfiguration {
PostgresPayloadProperties properties;
DataSource dataSource;
IDGenerator idGenerator;
private static final String DEFAULT_MESSAGE_TO_USER =
"{\"Error\": \"Data with this ID does not exist or has been deleted from the external storage.\"}";
public PostgresPayloadConfiguration(
PostgresPayloadProperties properties, DataSource dataSource, IDGenerator idGenerator) {
this.properties = properties;
this.dataSource = dataSource;
this.idGenerator = idGenerator;
}
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForExternalDb() {
return Flyway.configure()
.locations("classpath:db/migration_external_postgres")
.schemas("external")
.baselineOnMigrate(true)
.placeholderReplacement(true)
.placeholders(
Map.of(
"tableName",
properties.getTableName(),
"maxDataRows",
String.valueOf(properties.getMaxDataRows()),
"maxDataDays",
"'" + properties.getMaxDataDays() + "'",
"maxDataMonths",
"'" + properties.getMaxDataMonths() + "'",
"maxDataYears",
"'" + properties.getMaxDataYears() + "'"))
.dataSource(dataSource)
.load();
}
@Bean
@DependsOn({"flywayForExternalDb"})
public ExternalPayloadStorage postgresExternalPayloadStorage(
PostgresPayloadProperties properties) {
return new PostgresPayloadStorage(
properties, dataSource, idGenerator, DEFAULT_MESSAGE_TO_USER);
}
}
| 8,161 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.function.Supplier;
import javax.sql.DataSource;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.postgres.config.PostgresPayloadProperties;
/**
* Store and pull the external payload which consists of key and stream of data in PostgreSQL
* database
*/
public class PostgresPayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresPayloadStorage.class);
public static final String URI_SUFFIX_HASHED = ".hashed.json";
public static final String URI_SUFFIX = ".json";
public static final String URI_PREFIX_EXTERNAL = "/api/external/postgres/";
private final String defaultMessageToUser;
private final DataSource postgresDataSource;
private final IDGenerator idGenerator;
private final String tableName;
private final String conductorUrl;
public PostgresPayloadStorage(
PostgresPayloadProperties properties,
DataSource dataSource,
IDGenerator idGenerator,
String defaultMessageToUser) {
tableName = properties.getTableName();
conductorUrl = properties.getConductorUrl();
this.postgresDataSource = dataSource;
this.idGenerator = idGenerator;
this.defaultMessageToUser = defaultMessageToUser;
LOGGER.info("PostgreSQL Extenal Payload Storage initialized.");
}
/**
* @param operation the type of {@link Operation} to be performed
* @param payloadType the {@link PayloadType} that is being accessed
* @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the
* PostgreSQL object key for the json payload
*/
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
return getLocationInternal(path, () -> idGenerator.generate() + URI_SUFFIX);
}
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path, byte[] payloadBytes) {
return getLocationInternal(
path, () -> DigestUtils.sha256Hex(payloadBytes) + URI_SUFFIX_HASHED);
}
private ExternalStorageLocation getLocationInternal(
String path, Supplier<String> calculateKey) {
ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation();
String objectKey;
if (StringUtils.isNotBlank(path)) {
objectKey = path;
} else {
objectKey = calculateKey.get();
}
String uri = conductorUrl + URI_PREFIX_EXTERNAL + objectKey;
externalStorageLocation.setUri(uri);
externalStorageLocation.setPath(objectKey);
LOGGER.debug("External storage location URI: {}, location path: {}", uri, objectKey);
return externalStorageLocation;
}
/**
* Uploads the payload to the given PostgreSQL object key. It is expected that the caller
* retrieves the object key using {@link #getLocation(Operation, PayloadType, String)} before
* making this call.
*
* @param key the PostgreSQL key of the object to be uploaded
* @param payload an {@link InputStream} containing the json payload which is to be uploaded
* @param payloadSize the size of the json payload in bytes
*/
@Override
public void upload(String key, InputStream payload, long payloadSize) {
try (Connection conn = postgresDataSource.getConnection();
PreparedStatement stmt =
conn.prepareStatement(
"INSERT INTO "
+ tableName
+ " (id, data) VALUES (?, ?) ON CONFLICT(id) "
+ "DO UPDATE SET created_on=CURRENT_TIMESTAMP")) {
stmt.setString(1, key);
stmt.setBinaryStream(2, payload, payloadSize);
stmt.executeUpdate();
LOGGER.debug(
"External PostgreSQL uploaded key: {}, payload size: {}", key, payloadSize);
} catch (SQLException e) {
String msg = "Error uploading data into External PostgreSQL";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Downloads the payload stored in the PostgreSQL.
*
* @param key the PostgreSQL key of the object
* @return an input stream containing the contents of the object. Caller is expected to close
* the input stream.
*/
@Override
public InputStream download(String key) {
InputStream inputStream;
try (Connection conn = postgresDataSource.getConnection();
PreparedStatement stmt =
conn.prepareStatement("SELECT data FROM " + tableName + " WHERE id = ?")) {
stmt.setString(1, key);
try (ResultSet rs = stmt.executeQuery()) {
if (!rs.next()) {
LOGGER.debug("External PostgreSQL data with this ID: {} does not exist", key);
return new ByteArrayInputStream(defaultMessageToUser.getBytes());
}
inputStream = rs.getBinaryStream(1);
LOGGER.debug("External PostgreSQL downloaded key: {}", key);
}
} catch (SQLException e) {
String msg = "Error downloading data from external PostgreSQL";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
return inputStream;
}
}
| 8,162 |
0 | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/external-payload-storage/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.controller;
import java.io.InputStream;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.core.io.InputStreamResource;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import io.swagger.v3.oas.annotations.Operation;
/**
* REST controller for pulling payload stream of data by key (externalPayloadPath) from PostgreSQL
* database
*/
@RestController
@RequestMapping(value = "/api/external/postgres")
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres")
public class ExternalPostgresPayloadResource {
private final ExternalPayloadStorage postgresService;
public ExternalPostgresPayloadResource(
@Qualifier("postgresExternalPayloadStorage") ExternalPayloadStorage postgresService) {
this.postgresService = postgresService;
}
@GetMapping("/{externalPayloadPath}")
@Operation(
summary =
"Get task or workflow by externalPayloadPath from External PostgreSQL Storage")
public ResponseEntity<InputStreamResource> getExternalStorageData(
@PathVariable("externalPayloadPath") String externalPayloadPath) {
InputStream inputStream = postgresService.download(externalPayloadPath);
InputStreamResource outputStreamBody = new InputStreamResource(inputStream);
return ResponseEntity.ok().contentType(MediaType.APPLICATION_JSON).body(outputStreamBody);
}
}
| 8,163 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/test/integration/grpc | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc.postgres;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest;
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.db.type=postgres",
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=6",
"conductor.grpc-server.port=8098",
"conductor.indexing.type=elasticsearch",
"spring.datasource.url=jdbc:tc:postgresql:11.15-alpine:///conductor", // "tc" prefix
// starts the
// Postgres container
"spring.datasource.username=postgres",
"spring.datasource.password=postgres",
"spring.datasource.hikari.maximum-pool-size=8",
"spring.datasource.hikari.minimum-idle=300000"
})
public class PostgresGrpcEndToEndTest extends AbstractGrpcEndToEndTest {
@Before
public void init() {
taskClient = new TaskClient("localhost", 8098);
workflowClient = new WorkflowClient("localhost", 8098);
metadataClient = new MetadataClient("localhost", 8098);
eventClient = new EventClient("localhost", 8098);
}
}
| 8,164 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilderTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.*;
public class PostgresIndexQueryBuilderTest {
@Test
void shouldGenerateQueryForEmptyString() throws SQLException {
String inputQuery = "";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals("SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForNull() throws SQLException {
String inputQuery = null;
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals("SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForWorkflowId() throws SQLException {
String inputQuery = "workflowId=\"abc123\"";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE workflow_id = ? LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("abc123");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForMultipleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED,RUNNING)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE status = ANY(?) LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForSingleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE status = ? LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("COMPLETED");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForStartTimeGt() throws SQLException {
String inputQuery = "startTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time > ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForStartTimeLt() throws SQLException {
String inputQuery = "startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForUpdateTimeGt() throws SQLException {
String inputQuery = "updateTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE update_time > ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForUpdateTimeLt() throws SQLException {
String inputQuery = "updateTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForMultipleConditions() throws SQLException {
String inputQuery =
"workflowId=\"abc123\" AND workflowType IN (one,two) AND status IN (COMPLETED,RUNNING) AND startTime>1675701498000 AND startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time < ?::TIMESTAMPTZ AND start_time > ?::TIMESTAMPTZ AND status = ANY(?) AND workflow_id = ? AND workflow_type = ANY(?) LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter("2023-02-06T16:38:18Z");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
inOrder.verify(mockQuery).addParameter("abc123");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("one", "two")));
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateOrderBy() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"updateTime:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query));
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ ORDER BY update_time DESC LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldGenerateOrderByMultiple() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"updateTime:DESC", "correlationId:ASC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query));
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ ORDER BY update_time DESC, correlation_id ASC LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldNotAllowInvalidColumns() throws SQLException {
String inputQuery = "sqlInjection<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>());
String expectedQuery = "SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldNotAllowInvalidSortColumn() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query));
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldAllowFullTextSearch() throws SQLException {
String freeText = "correlation-id";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query));
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE to_tsvector(json_data::text) @@ to_tsquery(?) LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldAllowJsonSearch() throws SQLException {
String freeText = "{\"correlationId\":\"not-the-id\"}";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query));
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE json_data @> ?::JSONB LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
}
| 8,165 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class PostgresQueueDAOTest {
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresQueueDAOTest.class);
@Autowired private PostgresQueueDAO queueDAO;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired private ObjectMapper objectMapper;
@Rule public TestName name = new TestName();
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void complexQueueTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
*
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
List<Message> zeroPoll = queueDAO.pollMessages(queueName, 0, 10_000);
assertTrue("Zero poll should be empty", zeroPoll.isEmpty());
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/** Test fix for https://github.com/Netflix/conductor/issues/1892 */
@Test
public void containsMessageTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertFalse(queueDAO.containsMessage(queueName, messageId));
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
*
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't
// accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
queueDAO.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds =
messages.stream()
.map(Message::getId)
.collect(Collectors.toList())
.subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.debug("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
@Test
public void processUnacksTest() {
processUnacks(
() -> {
// Process unacks
queueDAO.processUnacks("process_unacks_test");
},
"process_unacks_test");
}
@Test
public void processAllUnacksTest() {
processUnacks(
() -> {
// Process all unacks
queueDAO.processAllUnacks();
},
"process_unacks_test");
}
private void processUnacks(Runnable unack, String queueName) {
// Count of messages in the queue(s)
final int count = 10;
// Number of messages to process acks for
final int unackedCount = 4;
// A secondary queue to make sure we don't accidentally process other queues
final String otherQueueName = "process_unacks_test_other_queue";
// Create testing queue with some messages (but not all) that will be popped/acked.
for (int i = 0; i < count; i++) {
int offset = 0;
if (i >= unackedCount) {
offset = 1_000_000;
}
queueDAO.push(queueName, "unack-" + i, offset);
}
// Create a second queue to make sure that unacks don't occur for it
for (int i = 0; i < count; i++) {
queueDAO.push(otherQueueName, "other-" + i, 0);
}
// Poll for first batch of messages (should be equal to unackedCount)
List<Message> polled = queueDAO.pollMessages(queueName, 100, 10_000);
assertNotNull(polled);
assertFalse(polled.isEmpty());
assertEquals(unackedCount, polled.size());
// Poll messages from the other queue so we know they don't get unacked later
queueDAO.pollMessages(otherQueueName, 100, 10_000);
// Ack one of the polled messages
assertTrue(queueDAO.ack(queueName, "unack-1"));
// Should have one less un-acked popped message in the queue
Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(uacked.longValue(), unackedCount - 1);
unack.run();
// Check uacks for both queues after processing
Map<String, Map<String, Map<String, Long>>> details = queueDAO.queuesDetailVerbose();
uacked = details.get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(
"The messages that were polled should be unacked still",
uacked.longValue(),
unackedCount - 1);
Long otherUacked = details.get(otherQueueName).get("a").get("uacked");
assertNotNull(otherUacked);
assertEquals(
"Other queue should have all unacked messages", otherUacked.longValue(), count);
Long size = queueDAO.queuesDetail().get(queueName);
assertNotNull(size);
assertEquals(size.longValue(), count - unackedCount);
}
}
| 8,166 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class PostgresMetadataDAOTest {
@Autowired private PostgresMetadataDAO metadataDAO;
@Rule public TestName name = new TestName();
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testDuplicateWorkflowDef() {
WorkflowDef def = new WorkflowDef();
def.setName("testDuplicate");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
NonTransientException applicationException =
assertThrows(NonTransientException.class, () -> metadataDAO.createWorkflowDef(def));
assertEquals(
"Workflow with testDuplicate.1 already exists!", applicationException.getMessage());
}
@Test
public void testRemoveNotExistingWorkflowDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeWorkflowDef("test", 1));
assertEquals(
"No such workflow definition: test version: 1", applicationException.getMessage());
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get();
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(3, found.getVersion());
all = metadataDAO.getAllLatest();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(3, all.get(0).getVersion());
all = metadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(3, all.get(1).getVersion());
def.setDescription("updated");
metadataDAO.updateWorkflowDef(def);
found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = metadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(3, found.getVersion());
metadataDAO.removeWorkflowDef("test", 3);
Optional<WorkflowDef> deleted = metadataDAO.getWorkflowDef("test", 3);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
metadataDAO.removeWorkflowDef("test", 1);
deleted = metadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(TaskDef.RetryLogic.FIXED);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createTaskDef(def);
TaskDef found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setDescription("updated description");
metadataDAO.updateTaskDef(def);
found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
metadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
metadataDAO.removeTaskDef(def.getName() + i);
}
all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test
public void testRemoveNotExistingTaskDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()));
assertEquals("No such task definition", applicationException.getMessage());
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
EventHandler.Action action = new EventHandler.Action();
action.setAction(EventHandler.Action.Type.start_workflow);
action.setStart_workflow(new EventHandler.StartWorkflow());
action.getStart_workflow().setName("workflow_x");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
metadataDAO.addEventHandler(eventHandler);
List<EventHandler> all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(eventHandler.getName(), all.get(0).getName());
assertEquals(eventHandler.getEvent(), all.get(0).getEvent());
List<EventHandler> byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
metadataDAO.updateEventHandler(eventHandler);
all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = metadataDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
def.setName("test2");
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
metadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(3, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
}
| 8,167 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.util.List;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.google.common.collect.Iterables;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class PostgresExecutionDAOTest extends ExecutionDAOTest {
@Autowired private PostgresExecutionDAO executionDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testPendingByCorrelationId() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_correlation_jtest");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
generateWorkflows(workflow, 10);
List<WorkflowModel> bycorrelationId =
getExecutionDAO()
.getWorkflowsByCorrelationId(
"pending_count_correlation_jtest", "corr001", true);
assertNotNull(bycorrelationId);
assertEquals(10, bycorrelationId.size());
}
@Test
public void testRemoveWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
assertEquals(1, getExecutionDAO().getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> getExecutionDAO().removeWorkflow(wfId));
assertEquals(0, getExecutionDAO().getPendingWorkflowCount("workflow"));
}
@Test
public void testRemoveWorkflowWithExpiry() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
final ExecutionDAO execDao = Mockito.spy(getExecutionDAO());
assertEquals(1, execDao.getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> execDao.removeWorkflowWithExpiry(wfId, 1));
Mockito.verify(execDao, Mockito.timeout(10 * 1000)).removeWorkflow(Iterables.getLast(ids));
}
@Override
public ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| 8,168 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.*;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres"
})
@SpringBootTest
public class PostgresIndexDAOTest {
@Autowired private PostgresIndexDAO indexDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
private WorkflowSummary getMockWorkflowSummary(String id) {
WorkflowSummary wfs = new WorkflowSummary();
wfs.setWorkflowId(id);
wfs.setCorrelationId("correlation-id");
wfs.setWorkflowType("workflow-type");
wfs.setStartTime("2023-02-07T08:42:45Z");
wfs.setStatus(Workflow.WorkflowStatus.COMPLETED);
return wfs;
}
private TaskSummary getMockTaskSummary(String taskId) {
TaskSummary ts = new TaskSummary();
ts.setTaskId(taskId);
ts.setTaskType("task-type");
ts.setTaskDefName("task-def-name");
ts.setStatus(Task.Status.COMPLETED);
ts.setStartTime("2023-02-07T09:41:45Z");
ts.setUpdateTime("2023-02-07T09:42:45Z");
ts.setWorkflowType("workflow-type");
return ts;
}
private TaskExecLog getMockTaskExecutionLog(long createdTime, String log) {
TaskExecLog tse = new TaskExecLog();
tse.setTaskId("task-id");
tse.setLog(log);
tse.setCreatedTime(createdTime);
return tse;
}
private void compareWorkflowSummary(WorkflowSummary wfs) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM workflow_index WHERE workflow_id = '%s'",
wfs.getWorkflowId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals(
"Workflow id does not match",
wfs.getWorkflowId(),
result.get(0).get("workflow_id"));
assertEquals(
"Correlation id does not match",
wfs.getCorrelationId(),
result.get(0).get("correlation_id"));
assertEquals(
"Workflow type does not match",
wfs.getWorkflowType(),
result.get(0).get("workflow_type"));
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(wfs.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
assertEquals("Start time does not match", startTime, result.get(0).get("start_time"));
assertEquals(
"Status does not match", wfs.getStatus().toString(), result.get(0).get("status"));
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
private void compareTaskSummary(TaskSummary ts) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM task_index WHERE task_id = '%s'", ts.getTaskId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals("Task id does not match", ts.getTaskId(), result.get(0).get("task_id"));
assertEquals("Task type does not match", ts.getTaskType(), result.get(0).get("task_type"));
assertEquals(
"Task def name does not match",
ts.getTaskDefName(),
result.get(0).get("task_def_name"));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
assertEquals("Start time does not match", startTime, result.get(0).get("start_time"));
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
assertEquals("Update time does not match", updateTime, result.get(0).get("update_time"));
assertEquals(
"Status does not match", ts.getStatus().toString(), result.get(0).get("status"));
assertEquals(
"Workflow type does not match",
ts.getWorkflowType().toString(),
result.get(0).get("workflow_type"));
}
@Test
public void testIndexNewWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testIndexExistingWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
wfs.setStatus(Workflow.WorkflowStatus.FAILED);
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testIndexNewTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testIndexExistingTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
ts.setStatus(Task.Status.FAILED);
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testAddTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
logs.add(getMockTaskExecutionLog(1675845986000L, "Log 1"));
logs.add(getMockTaskExecutionLog(1675845987000L, "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<Map<String, Object>> records =
queryDb("SELECT * FROM task_execution_logs ORDER BY created_time ASC");
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).get("log"));
assertEquals(new Date(1675845986000L), records.get(0).get("created_time"));
assertEquals(logs.get(1).getLog(), records.get(1).get("log"));
assertEquals(new Date(1675845987000L), records.get(1).get("created_time"));
}
@Test
public void testSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String query = String.format("workflowId=\"%s\"", wfs.getWorkflowId());
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testFullTextSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String freeText = "notworkflow-id";
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("Wrong number of results returned", 0, results.getResults().size());
freeText = "workflow-id";
results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testJsonSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
wfs.setVersion(3);
indexDAO.indexWorkflow(wfs);
String freeText = "{\"correlationId\":\"not-the-id\"}";
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("Wrong number of results returned", 0, results.getResults().size());
freeText = "{\"correlationId\":\"correlation-id\", \"version\":3}";
results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testSearchWorkflowSummaryPagination() {
for (int i = 0; i < 5; i++) {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-" + i);
indexDAO.indexWorkflow(wfs);
}
List<String> orderBy = Arrays.asList(new String[] {"workflowId:DESC"});
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", "*", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 3, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-4",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-3",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-2",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-1",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 1, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-0",
results.getResults().get(0).getWorkflowId());
}
@Test
public void testSearchTaskSummary() {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
String query = String.format("taskId=\"%s\"", ts.getTaskId());
SearchResult<TaskSummary> results =
indexDAO.searchTaskSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong task returned", ts.getTaskId(), results.getResults().get(0).getTaskId());
}
@Test
public void testSearchTaskSummaryPagination() {
for (int i = 0; i < 5; i++) {
TaskSummary ts = getMockTaskSummary("task-id-" + i);
indexDAO.indexTask(ts);
}
List<String> orderBy = Arrays.asList(new String[] {"taskId:DESC"});
SearchResult<TaskSummary> results = indexDAO.searchTaskSummary("", "*", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 3, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-4",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-3",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-2",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-1",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 1, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-0",
results.getResults().get(0).getTaskId());
}
@Test
public void testGetTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
logs.add(getMockTaskExecutionLog(new Date(1675845986000L).getTime(), "Log 1"));
logs.add(getMockTaskExecutionLog(new Date(1675845987000L).getTime(), "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> records = indexDAO.getTaskExecutionLogs(logs.get(0).getTaskId());
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).getLog());
assertEquals(logs.get(0).getCreatedTime(), 1675845986000L);
assertEquals(logs.get(1).getLog(), records.get(1).getLog());
assertEquals(logs.get(1).getCreatedTime(), 1675845987000L);
}
}
| 8,169 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.performance;
// SBMTODO: this test needs to be migrated
// reference - https://github.com/Netflix/conductor/pull/1940
// @Ignore("This test cannot be automated")
// public class PerformanceTest {
//
// public static final int MSGS = 1000;
// public static final int PRODUCER_BATCH = 10; // make sure MSGS % PRODUCER_BATCH == 0
// public static final int PRODUCERS = 4;
// public static final int WORKERS = 8;
// public static final int OBSERVERS = 4;
// public static final int OBSERVER_DELAY = 5000;
// public static final int UNACK_RUNNERS = 10;
// public static final int UNACK_DELAY = 500;
// public static final int WORKER_BATCH = 10;
// public static final int WORKER_BATCH_TIMEOUT = 500;
// public static final int COMPLETION_MONITOR_DELAY = 1000;
//
// private DataSource dataSource;
// private QueueDAO Q;
// private ExecutionDAO E;
//
// private final ExecutorService threadPool = Executors.newFixedThreadPool(PRODUCERS + WORKERS +
// OBSERVERS + UNACK_RUNNERS);
// private static final Logger LOGGER = LoggerFactory.getLogger(PerformanceTest.class);
//
// @Before
// public void setUp() {
// TestConfiguration testConfiguration = new TestConfiguration();
// configuration = new TestPostgresConfiguration(testConfiguration,
//
// "jdbc:postgresql://localhost:54320/conductor?charset=utf8&parseTime=true&interpolateParams=true",
// 10, 2);
// PostgresDataSourceProvider dataSource = new PostgresDataSourceProvider(configuration);
// this.dataSource = dataSource.get();
// resetAllData(this.dataSource);
// flywayMigrate(this.dataSource);
//
// final ObjectMapper objectMapper = new JsonMapperProvider().get();
// Q = new PostgresQueueDAO(objectMapper, this.dataSource);
// E = new PostgresExecutionDAO(objectMapper, this.dataSource);
// }
//
// @After
// public void tearDown() throws Exception {
// resetAllData(dataSource);
// }
//
// public static final String QUEUE = "task_queue";
//
// @Test
// public void testQueueDaoPerformance() throws InterruptedException {
// AtomicBoolean stop = new AtomicBoolean(false);
// Stopwatch start = Stopwatch.createStarted();
// AtomicInteger poppedCoutner = new AtomicInteger(0);
// HashMultiset<String> allPopped = HashMultiset.create();
//
// // Consumers - workers
// for (int i = 0; i < WORKERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// List<Message> pop = Q.pollMessages(QUEUE, WORKER_BATCH, WORKER_BATCH_TIMEOUT);
// LOGGER.info("Popped {} messages", pop.size());
// poppedCoutner.accumulateAndGet(pop.size(), Integer::sum);
//
// if (pop.size() == 0) {
// try {
// Thread.sleep(200);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// } else {
// LOGGER.info("Popped {}",
// pop.stream().map(Message::getId).collect(Collectors.toList()));
// }
//
// pop.forEach(popped -> {
// synchronized (allPopped) {
// allPopped.add(popped.getId());
// }
// boolean exists = Q.containsMessage(QUEUE, popped.getId());
// boolean ack = Q.ack(QUEUE, popped.getId());
//
// if (ack && exists) {
// // OK
// } else {
// LOGGER.error("Exists & Ack did not succeed for msg: {}", popped);
// }
// });
// }
// });
// }
//
// // Producers
// List<Future<?>> producers = Lists.newArrayList();
// for (int i = 0; i < PRODUCERS; i++) {
// Future<?> producer = threadPool.submit(() -> {
// try {
// // N messages
// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
// List<Message> randomMessages = getRandomMessages(PRODUCER_BATCH);
// Q.push(QUEUE, randomMessages);
// LOGGER.info("Pushed {} messages", PRODUCER_BATCH);
// LOGGER.info("Pushed {}",
// randomMessages.stream().map(Message::getId).collect(Collectors.toList()));
// }
// LOGGER.info("Pushed ALL");
// } catch (Exception e) {
// LOGGER.error("Something went wrong with producer", e);
// throw new RuntimeException(e);
// }
// });
//
// producers.add(producer);
// }
//
// // Observers
// for (int i = 0; i < OBSERVERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// int size = Q.getSize(QUEUE);
// Q.queuesDetail();
// LOGGER.info("Size {} messages", size);
// } catch (Exception e) {
// LOGGER.info("Queue size failed, nevermind");
// }
//
// try {
// Thread.sleep(OBSERVER_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// // Consumers - unack processor
// for (int i = 0; i < UNACK_RUNNERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// Q.processUnacks(QUEUE);
// } catch (Exception e) {
// LOGGER.info("Unack failed, nevermind", e);
// continue;
// }
// LOGGER.info("Unacked");
// try {
// Thread.sleep(UNACK_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// long elapsed;
// while (true) {
// try {
// Thread.sleep(COMPLETION_MONITOR_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
//
// int size = Q.getSize(QUEUE);
// LOGGER.info("MONITOR SIZE : {}", size);
//
// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 &&
// b2)) {
// elapsed = start.elapsed(TimeUnit.MILLISECONDS);
// stop.set(true);
// break;
// }
// }
//
// threadPool.awaitTermination(10, TimeUnit.SECONDS);
// threadPool.shutdown();
// LOGGER.info("Finished in {} ms", elapsed);
// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
// LOGGER.info("Threads finished");
// if (poppedCoutner.get() != MSGS * PRODUCERS) {
// synchronized (allPopped) {
// List<String> duplicates = allPopped.entrySet().stream()
// .filter(stringEntry -> stringEntry.getCount() > 1)
// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount())
// .collect(Collectors.toList());
//
// LOGGER.error("Found duplicate pops: " + duplicates);
// }
// throw new RuntimeException("Popped " + poppedCoutner.get() + " != produced: " + MSGS *
// PRODUCERS);
// }
// }
//
// @Test
// public void testExecDaoPerformance() throws InterruptedException {
// AtomicBoolean stop = new AtomicBoolean(false);
// Stopwatch start = Stopwatch.createStarted();
// BlockingDeque<Task> msgQueue = new LinkedBlockingDeque<>(1000);
// HashMultiset<String> allPopped = HashMultiset.create();
//
// // Consumers - workers
// for (int i = 0; i < WORKERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// List<Task> popped = new ArrayList<>();
// while (true) {
// try {
// Task poll;
// poll = msgQueue.poll(10, TimeUnit.MILLISECONDS);
//
// if (poll == null) {
// // poll timed out
// continue;
// }
// synchronized (allPopped) {
// allPopped.add(poll.getTaskId());
// }
// popped.add(poll);
// if (stop.get() || popped.size() == WORKER_BATCH) {
// break;
// }
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
//
// LOGGER.info("Popped {} messages", popped.size());
// LOGGER.info("Popped {}",
// popped.stream().map(Task::getTaskId).collect(Collectors.toList()));
//
// // Polling
// popped.stream()
// .peek(task -> {
// task.setWorkerId("someWorker");
// task.setPollCount(task.getPollCount() + 1);
// task.setStartTime(System.currentTimeMillis());
// })
// .forEach(task -> {
// try {
// // should always be false
// boolean concurrentLimit = E.exceedsInProgressLimit(task);
// task.setStartTime(System.currentTimeMillis());
// E.updateTask(task);
// LOGGER.info("Polled {}", task.getTaskId());
// } catch (Exception e) {
// LOGGER.error("Something went wrong with worker during poll", e);
// throw new RuntimeException(e);
// }
// });
//
// popped.forEach(task -> {
// try {
//
// String wfId = task.getWorkflowInstanceId();
// Workflow workflow = E.getWorkflow(wfId, true);
// E.getTask(task.getTaskId());
//
// task.setStatus(Task.Status.COMPLETED);
// task.setWorkerId("someWorker");
// task.setOutputData(Collections.singletonMap("a", "b"));
// E.updateTask(task);
// E.updateWorkflow(workflow);
// LOGGER.info("Updated {}", task.getTaskId());
// } catch (Exception e) {
// LOGGER.error("Something went wrong with worker during update", e);
// throw new RuntimeException(e);
// }
// });
//
// }
// });
// }
//
// Multiset<String> pushedTasks = HashMultiset.create();
//
// // Producers
// List<Future<?>> producers = Lists.newArrayList();
// for (int i = 0; i < PRODUCERS; i++) {
// Future<?> producer = threadPool.submit(() -> {
// // N messages
// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
// List<Task> randomTasks = getRandomTasks(PRODUCER_BATCH);
//
// Workflow wf = getWorkflow(randomTasks);
// E.createWorkflow(wf);
//
// E.createTasks(randomTasks);
// randomTasks.forEach(t -> {
// try {
// boolean offer = false;
// while (!offer) {
// offer = msgQueue.offer(t, 10, TimeUnit.MILLISECONDS);
// }
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// });
// LOGGER.info("Pushed {} messages", PRODUCER_BATCH);
// List<String> collect =
// randomTasks.stream().map(Task::getTaskId).collect(Collectors.toList());
// synchronized (pushedTasks) {
// pushedTasks.addAll(collect);
// }
// LOGGER.info("Pushed {}", collect);
// }
// LOGGER.info("Pushed ALL");
// });
//
// producers.add(producer);
// }
//
// // Observers
// for (int i = 0; i < OBSERVERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// List<Task> size = E.getPendingTasksForTaskType("taskType");
// LOGGER.info("Size {} messages", size.size());
// LOGGER.info("Size q {} messages", msgQueue.size());
// synchronized (allPopped) {
// LOGGER.info("All pp {} messages", allPopped.size());
// }
// LOGGER.info("Workflows by correlation id size: {}",
// E.getWorkflowsByCorrelationId("abcd", "1", true).size());
// LOGGER.info("Workflows by correlation id size: {}",
// E.getWorkflowsByCorrelationId("abcd", "2", true).size());
// LOGGER.info("Workflows running ids: {}", E.getRunningWorkflowIds("abcd",
// 1));
// LOGGER.info("Workflows pending count: {}",
// E.getPendingWorkflowCount("abcd"));
// } catch (Exception e) {
// LOGGER.warn("Observer failed ", e);
// }
// try {
// Thread.sleep(OBSERVER_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// long elapsed;
// while (true) {
// try {
// Thread.sleep(COMPLETION_MONITOR_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
//
// int size;
// try {
// size = E.getPendingTasksForTaskType("taskType").size();
// } catch (Exception e) {
// LOGGER.warn("Monitor failed", e);
// continue;
// }
// LOGGER.info("MONITOR SIZE : {}", size);
//
// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 &&
// b2)) {
// elapsed = start.elapsed(TimeUnit.MILLISECONDS);
// stop.set(true);
// break;
// }
// }
//
// threadPool.awaitTermination(10, TimeUnit.SECONDS);
// threadPool.shutdown();
// LOGGER.info("Finished in {} ms", elapsed);
// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
// LOGGER.info("Threads finished");
//
// List<String> duplicates = pushedTasks.entrySet().stream()
// .filter(stringEntry -> stringEntry.getCount() > 1)
// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount())
// .collect(Collectors.toList());
//
// LOGGER.error("Found duplicate pushes: " + duplicates);
// }
//
// private Workflow getWorkflow(List<Task> randomTasks) {
// Workflow wf = new Workflow();
// wf.setWorkflowId(randomTasks.get(0).getWorkflowInstanceId());
// wf.setCorrelationId(wf.getWorkflowId());
// wf.setTasks(randomTasks);
// WorkflowDef workflowDefinition = new WorkflowDef();
// workflowDefinition.setName("abcd");
// wf.setWorkflowDefinition(workflowDefinition);
// wf.setStartTime(System.currentTimeMillis());
// return wf;
// }
//
// private List<Task> getRandomTasks(int i) {
// String timestamp = Long.toString(System.nanoTime());
// return IntStream.range(0, i).mapToObj(j -> {
// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j;
// Task task = new Task();
// task.setTaskId(id);
// task.setCorrelationId(Integer.toString(j));
// task.setTaskType("taskType");
// task.setReferenceTaskName("refName" + j);
// task.setWorkflowType("task_wf");
// task.setWorkflowInstanceId(Thread.currentThread().getId() + "_" + timestamp);
// return task;
// }).collect(Collectors.toList());
// }
//
// private List<Message> getRandomMessages(int i) {
// String timestamp = Long.toString(System.nanoTime());
// return IntStream.range(0, i).mapToObj(j -> {
// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j;
// return new Message(id, "{ \"a\": \"b\", \"timestamp\": \" " + timestamp + " \"}",
// "receipt");
// }).collect(Collectors.toList());
// }
//
// private void flywayMigrate(DataSource dataSource) {
// FluentConfiguration flywayConfiguration = Flyway.configure()
// .table(configuration.getFlywayTable())
// .locations(Paths.get("db","migration_postgres").toString())
// .dataSource(dataSource)
// .placeholderReplacement(false);
//
// Flyway flyway = flywayConfiguration.load();
// try {
// flyway.migrate();
// } catch (FlywayException e) {
// if (e.getMessage().contains("non-empty")) {
// return;
// }
// throw e;
// }
// }
//
// public void resetAllData(DataSource dataSource) {
// // TODO
// }
// }
| 8,170 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions with no expected result.
*
* @author mustafa
*/
@FunctionalInterface
public interface ExecuteFunction {
void apply(Query query) throws SQLException;
}
| 8,171 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Functional interface for operations within a transactional context.
*
* @author mustafa
*/
@FunctionalInterface
public interface TransactionalFunction<R> {
R apply(Connection tx) throws SQLException;
}
| 8,172 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions that return results.
*
* @author mustafa
*/
@FunctionalInterface
public interface QueryFunction<R> {
R apply(Query query) throws SQLException;
}
| 8,173 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}.
*
* @author mustafa
*/
@FunctionalInterface
public interface ResultSetHandler<R> {
R apply(ResultSet resultSet) throws SQLException;
}
| 8,174 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
public class PostgresIndexQueryBuilder {
private final String table;
private final String freeText;
private final int start;
private final int count;
private final List<String> sort;
private final List<Condition> conditions = new ArrayList<>();
private static final String[] VALID_FIELDS = {
"workflow_id",
"correlation_id",
"workflow_type",
"start_time",
"status",
"task_id",
"task_type",
"task_def_name",
"update_time",
"json_data",
"to_tsvector(json_data::text)"
};
private static final String[] VALID_SORT_ORDER = {"ASC", "DESC"};
private static class Condition {
private String attribute;
private String operator;
private List<String> values;
private final String CONDITION_REGEX = "([a-zA-Z]+)\\s?(=|>|<|IN)\\s?(.*)";
public Condition() {}
public Condition(String query) {
Pattern conditionRegex = Pattern.compile(CONDITION_REGEX);
Matcher conditionMatcher = conditionRegex.matcher(query);
if (conditionMatcher.find()) {
String[] valueArr = conditionMatcher.group(3).replaceAll("[\"()]", "").split(",");
ArrayList<String> values = new ArrayList<>(Arrays.asList(valueArr));
this.attribute = camelToSnake(conditionMatcher.group(1));
this.values = values;
this.operator = getOperator(conditionMatcher.group(2));
if (this.attribute.endsWith("_time")) {
values.set(0, millisToUtc(values.get(0)));
}
}
}
public String getQueryFragment() {
if (operator.equals("IN")) {
return attribute + " = ANY(?)";
} else if (operator.equals("@@")) {
return attribute + " @@ to_tsquery(?)";
} else if (operator.equals("@>")) {
return attribute + " @> ?::JSONB";
} else {
if (attribute.endsWith("_time")) {
return attribute + " " + operator + " ?::TIMESTAMPTZ";
} else {
return attribute + " " + operator + " ?";
}
}
}
private String getOperator(String op) {
if (op.equals("IN") && values.size() == 1) {
return "=";
}
return op;
}
public void addParameter(Query q) throws SQLException {
if (values.size() > 1) {
q.addParameter(values);
} else {
q.addParameter(values.get(0));
}
}
private String millisToUtc(String millis) {
Long startTimeMilli = Long.parseLong(millis);
ZonedDateTime startDate =
ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimeMilli), ZoneOffset.UTC);
return DateTimeFormatter.ISO_DATE_TIME.format(startDate);
}
private boolean isValid() {
return Arrays.asList(VALID_FIELDS).contains(attribute);
}
public void setAttribute(String attribute) {
this.attribute = attribute;
}
public void setOperator(String operator) {
this.operator = operator;
}
public void setValues(List<String> values) {
this.values = values;
}
}
public PostgresIndexQueryBuilder(
String table, String query, String freeText, int start, int count, List<String> sort) {
this.table = table;
this.freeText = freeText;
this.start = start;
this.count = count;
this.sort = sort;
this.parseQuery(query);
this.parseFreeText(freeText);
}
public String getQuery() {
String queryString = "";
List<Condition> validConditions =
conditions.stream().filter(c -> c.isValid()).collect(Collectors.toList());
if (validConditions.size() > 0) {
queryString =
" WHERE "
+ String.join(
" AND ",
validConditions.stream()
.map(c -> c.getQueryFragment())
.collect(Collectors.toList()));
}
return "SELECT json_data::TEXT FROM "
+ table
+ queryString
+ getSort()
+ " LIMIT ? OFFSET ?";
}
public void addParameters(Query q) throws SQLException {
for (Condition condition : conditions) {
condition.addParameter(q);
}
q.addParameter(count);
q.addParameter(start);
}
private void parseQuery(String query) {
if (!StringUtils.isEmpty(query)) {
for (String s : query.split(" AND ")) {
conditions.add(new Condition(s));
}
Collections.sort(conditions, Comparator.comparing(Condition::getQueryFragment));
}
}
private void parseFreeText(String freeText) {
if (!StringUtils.isEmpty(freeText) && !freeText.equals("*")) {
if (freeText.startsWith("{") && freeText.endsWith("}")) {
Condition cond = new Condition();
cond.setAttribute("json_data");
cond.setOperator("@>");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
conditions.add(cond);
} else {
Condition cond = new Condition();
cond.setAttribute("to_tsvector(json_data::text)");
cond.setOperator("@@");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
conditions.add(cond);
}
}
}
private String getSort() {
ArrayList<String> sortConds = new ArrayList<>();
for (String s : sort) {
String[] splitCond = s.split(":");
if (splitCond.length == 2) {
String attribute = camelToSnake(splitCond[0]);
String order = splitCond[1].toUpperCase();
if (Arrays.asList(VALID_FIELDS).contains(attribute)
&& Arrays.asList(VALID_SORT_ORDER).contains(order)) {
sortConds.add(attribute + " " + order);
}
}
}
if (sortConds.size() > 0) {
return " ORDER BY " + String.join(", ", sortConds);
}
return "";
}
private static String camelToSnake(String camel) {
return camel.replaceAll("\\B([A-Z])", "_$1").toLowerCase();
}
}
| 8,175 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.util.function.Supplier;
/** Functional class to support the lazy execution of a String result. */
public class LazyToString {
private final Supplier<String> supplier;
/**
* @param supplier Supplier to execute when {@link #toString()} is called.
*/
public LazyToString(Supplier<String> supplier) {
this.supplier = supplier;
}
@Override
public String toString() {
return supplier.get();
}
}
| 8,176 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecutorsUtil.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
public class ExecutorsUtil {
private ExecutorsUtil() {}
public static ThreadFactory newNamedThreadFactory(final String threadNamePrefix) {
return new ThreadFactory() {
private final AtomicInteger counter = new AtomicInteger();
@SuppressWarnings("NullableProblems")
@Override
public Thread newThread(Runnable r) {
Thread thread = Executors.defaultThreadFactory().newThread(r);
thread.setName(threadNamePrefix + counter.getAndIncrement());
return thread;
}
};
}
}
| 8,177 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.io.IOException;
import java.sql.*;
import java.sql.Date;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities.
*
* <p>This class simulates a parameter building pattern and all {@literal addParameter(*)} methods
* must be called in the proper order of their expected binding sequence.
*
* @author mustafa
*/
public class Query implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(getClass());
/** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */
protected final ObjectMapper objectMapper;
/** The initial supplied query String that was used to prepare {@link #statement}. */
private final String rawQuery;
/**
* Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a
* parameter is added to the {@code PreparedStatement} {@link #statement}.
*/
private final AtomicInteger index = new AtomicInteger(1);
/** The {@link PreparedStatement} that will be managed and executed by this class. */
private final PreparedStatement statement;
private final Connection connection;
public Query(ObjectMapper objectMapper, Connection connection, String query) {
this.rawQuery = query;
this.objectMapper = objectMapper;
this.connection = connection;
try {
this.statement = connection.prepareStatement(query);
} catch (SQLException ex) {
throw new NonTransientException(
"Cannot prepare statement for query: " + ex.getMessage(), ex);
}
}
/**
* Generate a String with {@literal count} number of '?' placeholders for {@link
* PreparedStatement} queries.
*
* @param count The number of '?' chars to generate.
* @return a comma delimited string of {@literal count} '?' binding placeholders.
*/
public static String generateInBindings(int count) {
String[] questions = new String[count];
for (int i = 0; i < count; i++) {
questions[i] = "?";
}
return String.join(", ", questions);
}
public Query addParameter(final String value) {
return addParameterInternal((ps, idx) -> ps.setString(idx, value));
}
public Query addParameter(final List<String> value) throws SQLException {
String[] valueStringArray = value.toArray(new String[0]);
Array valueArray = this.connection.createArrayOf("VARCHAR", valueStringArray);
return addParameterInternal((ps, idx) -> ps.setArray(idx, valueArray));
}
public Query addParameter(final int value) {
return addParameterInternal((ps, idx) -> ps.setInt(idx, value));
}
public Query addParameter(final boolean value) {
return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value)));
}
public Query addParameter(final long value) {
return addParameterInternal((ps, idx) -> ps.setLong(idx, value));
}
public Query addParameter(final double value) {
return addParameterInternal((ps, idx) -> ps.setDouble(idx, value));
}
public Query addParameter(Date date) {
return addParameterInternal((ps, idx) -> ps.setDate(idx, date));
}
public Query addParameter(Timestamp timestamp) {
return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp));
}
/**
* Serializes {@literal value} to a JSON string for persistence.
*
* @param value The value to serialize.
* @return {@literal this}
*/
public Query addJsonParameter(Object value) {
return addParameter(toJson(value));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Date}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addDateParameter(java.util.Date date) {
return addParameter(new Date(date.getTime()));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Timestamp}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addTimestampParameter(java.util.Date date) {
return addParameter(new Timestamp(date.getTime()));
}
/**
* Bind the given epoch millis to the PreparedStatement as a {@link Timestamp}.
*
* @param epochMillis The epoch ms to create a new {@literal Timestamp} from.
* @return {@literal this}
*/
public Query addTimestampParameter(long epochMillis) {
return addParameter(new Timestamp(epochMillis));
}
/**
* Add a collection of primitive values at once, in the order of the collection.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the
* collection.
* @see #addParameters(Object...)
*/
public Query addParameters(Collection values) {
return addParameters(values.toArray());
}
/**
* Add many primitive values at once.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered.
*/
public Query addParameters(Object... values) {
for (Object v : values) {
if (v instanceof String) {
addParameter((String) v);
} else if (v instanceof Integer) {
addParameter((Integer) v);
} else if (v instanceof Long) {
addParameter((Long) v);
} else if (v instanceof Double) {
addParameter((Double) v);
} else if (v instanceof Boolean) {
addParameter((Boolean) v);
} else if (v instanceof Date) {
addParameter((Date) v);
} else if (v instanceof Timestamp) {
addParameter((Timestamp) v);
} else {
throw new IllegalArgumentException(
"Type "
+ v.getClass().getName()
+ " is not supported by automatic property assignment");
}
}
return this;
}
/**
* Utility method for evaluating the prepared statement as a query to check the existence of a
* record using a numeric count or boolean return value.
*
* <p>The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result.
*
* @return {@literal true} If a count query returned more than 0 or an exists query returns
* {@literal true}.
* @throws NonTransientException If an unexpected return type cannot be evaluated to a {@code
* Boolean} result.
*/
public boolean exists() {
Object val = executeScalar();
if (null == val) {
return false;
}
if (val instanceof Number) {
return convertLong(val) > 0;
}
if (val instanceof Boolean) {
return (Boolean) val;
}
if (val instanceof String) {
return convertBoolean(val);
}
throw new NonTransientException(
"Expected a Numeric or Boolean scalar return value from the query, received "
+ val.getClass().getName());
}
/**
* Convenience method for executing delete statements.
*
* @return {@literal true} if the statement affected 1 or more rows.
* @see #executeUpdate()
*/
public boolean executeDelete() {
int count = executeUpdate();
if (count > 1) {
logger.trace("Removed {} row(s) for query {}", count, rawQuery);
}
return count > 0;
}
/**
* Convenience method for executing statements that return a single numeric value, typically
* {@literal SELECT COUNT...} style queries.
*
* @return The result of the query as a {@literal long}.
*/
public long executeCount() {
return executeScalar(Long.class);
}
/**
* @return The result of {@link PreparedStatement#executeUpdate()}
*/
public int executeUpdate() {
try {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
final int val = this.statement.executeUpdate();
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery);
}
return val;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a query from the PreparedStatement and return the ResultSet.
*
* <p><em>NOTE:</em> The returned ResultSet must be closed/managed by the calling methods.
*
* @return {@link PreparedStatement#executeQuery()}
* @throws NonTransientException If any SQL errors occur.
*/
public ResultSet executeQuery() {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
try {
return this.statement.executeQuery();
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}", (end - start), rawQuery);
}
}
}
/**
* @return The single result of the query as an Object.
*/
public Object executeScalar() {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
return null;
}
return rs.getObject(1);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a single 'primitive' value from the ResultSet.
*
* @param returnType The type to return.
* @param <V> The type parameter to return a List of.
* @return A single result from the execution of the statement, as a type of {@literal
* returnType}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> V executeScalar(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
Object value = null;
if (Integer.class == returnType) {
value = 0;
} else if (Long.class == returnType) {
value = 0L;
} else if (Boolean.class == returnType) {
value = false;
}
return returnType.cast(value);
} else {
return getScalarFromResultSet(rs, returnType);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeScalarList(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> values = new ArrayList<>();
while (rs.next()) {
values.add(getScalarFromResultSet(rs, returnType));
}
return values;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the statement and return only the first record from the result set.
*
* @param returnType The Class to return.
* @param <V> The type parameter.
* @return An instance of {@literal <V>} from the result set.
*/
public <V> V executeAndFetchFirst(Class<V> returnType) {
Object o = executeScalar();
if (null == o) {
return null;
}
return convert(o, returnType);
}
/**
* Execute the PreparedStatement and return a List of {@literal returnType} values from the
* ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeAndFetch(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> list = new ArrayList<>();
while (rs.next()) {
list.add(convert(rs.getObject(1), returnType));
}
return list;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of {@literal Map} values from the ResultSet.
*
* @return A {@code List<Map>}.
* @throws SQLException if any SQL errors occur.
* @throws NonTransientException if any SQL errors occur.
*/
public List<Map<String, Object>> executeAndFetchMap() {
try (ResultSet rs = executeQuery()) {
List<Map<String, Object>> result = new ArrayList<>();
ResultSetMetaData metadata = rs.getMetaData();
int columnCount = metadata.getColumnCount();
while (rs.next()) {
HashMap<String, Object> row = new HashMap<>();
for (int i = 1; i <= columnCount; i++) {
row.put(metadata.getColumnLabel(i), rs.getObject(i));
}
result.add(row);
}
return result;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the query and pass the {@link ResultSet} to the given handler.
*
* @param handler The {@link ResultSetHandler} to execute.
* @param <V> The return type of this method.
* @return The results of {@link ResultSetHandler#apply(ResultSet)}.
*/
public <V> V executeAndFetch(ResultSetHandler<V> handler) {
try (ResultSet rs = executeQuery()) {
return handler.apply(rs);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
@Override
public void close() {
try {
if (null != statement && !statement.isClosed()) {
statement.close();
}
} catch (SQLException ex) {
logger.warn("Error closing prepared statement: {}", ex.getMessage());
}
}
protected final Query addParameterInternal(InternalParameterSetter setter) {
int index = getAndIncrementIndex();
try {
setter.apply(this.statement, index);
return this;
} catch (SQLException ex) {
throw new NonTransientException("Could not apply bind parameter at index " + index, ex);
}
}
protected <V> V getScalarFromResultSet(ResultSet rs, Class<V> returnType) throws SQLException {
Object value = null;
if (Integer.class == returnType) {
value = rs.getInt(1);
} else if (Long.class == returnType) {
value = rs.getLong(1);
} else if (String.class == returnType) {
value = rs.getString(1);
} else if (Boolean.class == returnType) {
value = rs.getBoolean(1);
} else if (Double.class == returnType) {
value = rs.getDouble(1);
} else if (Date.class == returnType) {
value = rs.getDate(1);
} else if (Timestamp.class == returnType) {
value = rs.getTimestamp(1);
} else {
value = rs.getObject(1);
}
if (null == value) {
throw new NullPointerException(
"Cannot get value from ResultSet of type " + returnType.getName());
}
return returnType.cast(value);
}
protected <V> V convert(Object value, Class<V> returnType) {
if (Boolean.class == returnType) {
return returnType.cast(convertBoolean(value));
} else if (Integer.class == returnType) {
return returnType.cast(convertInt(value));
} else if (Long.class == returnType) {
return returnType.cast(convertLong(value));
} else if (Double.class == returnType) {
return returnType.cast(convertDouble(value));
} else if (String.class == returnType) {
return returnType.cast(convertString(value));
} else if (value instanceof String) {
return fromJson((String) value, returnType);
}
final String vName = value.getClass().getName();
final String rName = returnType.getName();
throw new NonTransientException("Cannot convert type " + vName + " to " + rName);
}
protected Integer convertInt(Object value) {
if (null == value) {
return null;
}
if (value instanceof Integer) {
return (Integer) value;
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
return NumberUtils.toInt(value.toString());
}
protected Double convertDouble(Object value) {
if (null == value) {
return null;
}
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
return NumberUtils.toDouble(value.toString());
}
protected Long convertLong(Object value) {
if (null == value) {
return null;
}
if (value instanceof Long) {
return (Long) value;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
return NumberUtils.toLong(value.toString());
}
protected String convertString(Object value) {
if (null == value) {
return null;
}
if (value instanceof String) {
return (String) value;
}
return value.toString().trim();
}
protected Boolean convertBoolean(Object value) {
if (null == value) {
return null;
}
if (value instanceof Boolean) {
return (Boolean) value;
}
if (value instanceof Number) {
return ((Number) value).intValue() != 0;
}
String text = value.toString().trim();
return "Y".equalsIgnoreCase(text)
|| "YES".equalsIgnoreCase(text)
|| "TRUE".equalsIgnoreCase(text)
|| "T".equalsIgnoreCase(text)
|| "1".equalsIgnoreCase(text);
}
protected String toJson(Object value) {
if (null == value) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <V> V fromJson(String value, Class<V> returnType) {
if (null == value) {
return null;
}
try {
return objectMapper.readValue(value, returnType);
} catch (IOException ex) {
throw new NonTransientException(
"Could not convert JSON '" + value + "' to " + returnType.getName(), ex);
}
}
protected final int getIndex() {
return index.get();
}
protected final int getAndIncrementIndex() {
return index.getAndIncrement();
}
@FunctionalInterface
private interface InternalParameterSetter {
void apply(PreparedStatement ps, int idx) throws SQLException;
}
}
| 8,178 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.sql.SQLException;
import java.util.Optional;
import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.*;
import org.springframework.retry.RetryContext;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.postgres.dao.PostgresExecutionDAO;
import com.netflix.conductor.postgres.dao.PostgresIndexDAO;
import com.netflix.conductor.postgres.dao.PostgresMetadataDAO;
import com.netflix.conductor.postgres.dao.PostgresQueueDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(PostgresProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "postgres")
// Import the DataSourceAutoConfiguration when postgres database is selected.
// By default, the datasource configuration is excluded in the main module.
@Import(DataSourceAutoConfiguration.class)
public class PostgresConfiguration {
DataSource dataSource;
private final PostgresProperties properties;
public PostgresConfiguration(DataSource dataSource, PostgresProperties properties) {
this.dataSource = dataSource;
this.properties = properties;
}
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForPrimaryDb() {
return Flyway.configure()
.locations("classpath:db/migration_postgres")
.schemas(properties.getSchema())
.dataSource(dataSource)
.baselineOnMigrate(true)
.load();
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresMetadataDAO postgresMetadataDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
PostgresProperties properties) {
return new PostgresMetadataDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresExecutionDAO postgresExecutionDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new PostgresExecutionDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresQueueDAO postgresQueueDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new PostgresQueueDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(name = "conductor.indexing.type", havingValue = "postgres")
public PostgresIndexDAO postgresIndexDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new PostgresIndexDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
public RetryTemplate postgresRetryTemplate(PostgresProperties properties) {
SimpleRetryPolicy retryPolicy = new CustomRetryPolicy();
retryPolicy.setMaxAttempts(3);
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(new NoBackOffPolicy());
return retryTemplate;
}
public static class CustomRetryPolicy extends SimpleRetryPolicy {
private static final String ER_LOCK_DEADLOCK = "40P01";
private static final String ER_SERIALIZATION_FAILURE = "40001";
@Override
public boolean canRetry(final RetryContext context) {
final Optional<Throwable> lastThrowable =
Optional.ofNullable(context.getLastThrowable());
return lastThrowable
.map(throwable -> super.canRetry(context) && isDeadLockError(throwable))
.orElseGet(() -> super.canRetry(context));
}
private boolean isDeadLockError(Throwable throwable) {
SQLException sqlException = findCauseSQLException(throwable);
if (sqlException == null) {
return false;
}
return ER_LOCK_DEADLOCK.equals(sqlException.getSQLState())
|| ER_SERIALIZATION_FAILURE.equals(sqlException.getSQLState());
}
private SQLException findCauseSQLException(Throwable throwable) {
Throwable causeException = throwable;
while (null != causeException && !(causeException instanceof SQLException)) {
causeException = causeException.getCause();
}
return (SQLException) causeException;
}
}
}
| 8,179 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.postgres")
public class PostgresProperties {
/** The time in seconds after which the in-memory task definitions cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
private Integer deadlockRetryMax = 3;
public String schema = "public";
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
public void setDeadlockRetryMax(Integer deadlockRetryMax) {
this.deadlockRetryMax = deadlockRetryMax;
}
public String getSchema() {
return schema;
}
public void setSchema(String schema) {
this.schema = schema;
}
}
| 8,180 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Uninterruptibles;
public class PostgresQueueDAO extends PostgresBaseDAO implements QueueDAO {
private static final Long UNACK_SCHEDULE_MS = 60_000L;
private final ScheduledExecutorService scheduledExecutorService;
public PostgresQueueDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-queue-"));
this.scheduledExecutorService.scheduleAtFixedRate(
this::processAllUnacks,
UNACK_SCHEDULE_MS,
UNACK_SCHEDULE_MS,
TimeUnit.MILLISECONDS);
logger.debug("{} is ready to serve", PostgresQueueDAO.class.getName());
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for processAllUnacks",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public void push(String queueName, String messageId, long offsetTimeInSecond) {
push(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) {
withTransaction(
tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond));
}
@Override
public void push(String queueName, List<Message> messages) {
withTransaction(
tx ->
messages.forEach(
message ->
pushMessage(
tx,
queueName,
message.getId(),
message.getPayload(),
message.getPriority(),
0)));
}
@Override
public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) {
return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String messageId, int priority, long offsetTimeInSecond) {
return getWithRetriedTransactions(
tx -> {
if (!existsMessage(tx, queueName, messageId)) {
pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond);
return true;
}
return false;
});
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
return pollMessages(queueName, count, timeout).stream()
.map(Message::getId)
.collect(Collectors.toList());
}
@Override
public List<Message> pollMessages(String queueName, int count, int timeout) {
if (timeout < 1) {
List<Message> messages =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count, timeout));
if (messages == null) {
return new ArrayList<>();
}
return messages;
}
long start = System.currentTimeMillis();
final List<Message> messages = new ArrayList<>();
while (true) {
List<Message> messagesSlice =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count - messages.size(), timeout));
if (messagesSlice == null) {
logger.warn(
"Unable to poll {} messages from {} due to tx conflict, only {} popped",
count,
queueName,
messages.size());
// conflict could have happened, returned messages popped so far
return messages;
}
messages.addAll(messagesSlice);
if (messages.size() >= count || ((System.currentTimeMillis() - start) > timeout)) {
return messages;
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
@Override
public void remove(String queueName, String messageId) {
withTransaction(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public int getSize(String queueName) {
final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?";
return queryWithTransaction(
GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue());
}
@Override
public boolean ack(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) {
long updatedOffsetTimeInSecond = unackTimeout / 1000;
final String UPDATE_UNACK_TIMEOUT =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
UPDATE_UNACK_TIMEOUT,
q ->
q.addParameter(updatedOffsetTimeInSecond)
.addParameter(updatedOffsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate())
== 1;
}
@Override
public void flush(String queueName) {
final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?";
executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete());
}
@Override
public Map<String, Long> queuesDetail() {
final String GET_QUEUES_DETAIL =
"SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_QUEUES_DETAIL,
q ->
q.executeAndFetch(
rs -> {
Map<String, Long> detail = Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
detail.put(queueName, size);
}
return detail;
}));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
// @formatter:off
final String GET_QUEUES_DETAIL_VERBOSE =
"SELECT queue_name, \n"
+ " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n"
+ " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n"
+ "FROM queue q FOR SHARE SKIP LOCKED";
// @formatter:on
return queryWithTransaction(
GET_QUEUES_DETAIL_VERBOSE,
q ->
q.executeAndFetch(
rs -> {
Map<String, Map<String, Map<String, Long>>> result =
Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
Long queueUnacked = rs.getLong("uacked");
result.put(
queueName,
ImmutableMap.of(
"a",
ImmutableMap
.of( // sharding not implemented,
// returning only
// one shard with all the
// info
"size",
size,
"uacked",
queueUnacked)));
}
return result;
}));
}
/**
* Un-pop all un-acknowledged messages for all queues.
*
* @since 1.11.6
*/
public void processAllUnacks() {
logger.trace("processAllUnacks started");
getWithRetriedTransactions(
tx -> {
String LOCK_TASKS =
"SELECT queue_name, message_id FROM queue_message WHERE popped = true AND (deliver_on + (60 ||' seconds')::interval) < current_timestamp limit 1000 FOR UPDATE SKIP LOCKED";
List<QueueMessage> messages =
query(
tx,
LOCK_TASKS,
p ->
p.executeAndFetch(
rs -> {
List<QueueMessage> results =
new ArrayList<QueueMessage>();
while (rs.next()) {
QueueMessage qm = new QueueMessage();
qm.queueName =
rs.getString("queue_name");
qm.messageId =
rs.getString("message_id");
results.add(qm);
}
return results;
}));
if (messages.size() == 0) {
return 0;
}
Map<String, List<String>> queueMessageMap = new HashMap<String, List<String>>();
for (QueueMessage qm : messages) {
if (!queueMessageMap.containsKey(qm.queueName)) {
queueMessageMap.put(qm.queueName, new ArrayList<String>());
}
queueMessageMap.get(qm.queueName).add(qm.messageId);
}
int totalUnacked = 0;
for (String queueName : queueMessageMap.keySet()) {
Integer unacked = 0;
;
try {
final List<String> msgIds = queueMessageMap.get(queueName);
final String UPDATE_POPPED =
String.format(
"UPDATE queue_message SET popped = false WHERE queue_name = ? and message_id IN (%s)",
Query.generateInBindings(msgIds.size()));
unacked =
query(
tx,
UPDATE_POPPED,
q ->
q.addParameter(queueName)
.addParameters(msgIds)
.executeUpdate());
} catch (Exception e) {
e.printStackTrace();
}
totalUnacked += unacked;
logger.debug("Unacked {} messages from all queues", unacked);
}
if (totalUnacked > 0) {
logger.debug("Unacked {} messages from all queues", totalUnacked);
}
return totalUnacked;
});
}
@Override
public void processUnacks(String queueName) {
final String PROCESS_UNACKS =
"UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND (current_timestamp - (60 ||' seconds')::interval) > deliver_on";
executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate());
}
@Override
public boolean resetOffsetTime(String queueName, String messageId) {
long offsetTimeInSecond = 0; // Reset to 0
final String SET_OFFSET_TIME =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) \n"
+ "WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
SET_OFFSET_TIME,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate()
== 1);
}
private boolean existsMessage(Connection connection, String queueName, String messageId) {
final String EXISTS_MESSAGE =
"SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?) FOR SHARE";
return query(
connection,
EXISTS_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).exists());
}
private void pushMessage(
Connection connection,
String queueName,
String messageId,
String payload,
Integer priority,
long offsetTimeInSecond) {
createQueueIfNotExists(connection, queueName);
String UPDATE_MESSAGE =
"UPDATE queue_message SET payload=?, deliver_on=(current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?";
int rowsUpdated =
query(
connection,
UPDATE_MESSAGE,
q ->
q.addParameter(payload)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate());
if (rowsUpdated == 0) {
String PUSH_MESSAGE =
"INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES ((current_timestamp + (? ||' seconds')::interval), ?,?,?,?,?) ON CONFLICT (queue_name,message_id) DO UPDATE SET payload=excluded.payload, deliver_on=excluded.deliver_on";
execute(
connection,
PUSH_MESSAGE,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.addParameter(priority)
.addParameter(offsetTimeInSecond)
.addParameter(payload)
.executeUpdate());
}
}
private boolean removeMessage(Connection connection, String queueName, String messageId) {
final String REMOVE_MESSAGE =
"DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?";
return query(
connection,
REMOVE_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).executeDelete());
}
private List<Message> peekMessages(Connection connection, String queueName, int count) {
if (count < 1) {
return Collections.emptyList();
}
final String PEEK_MESSAGES =
"SELECT message_id, priority, payload FROM queue_message WHERE queue_name = ? AND popped = false AND deliver_on <= (current_timestamp + (1000 ||' microseconds')::interval) ORDER BY priority DESC, deliver_on, created_on LIMIT ? FOR UPDATE SKIP LOCKED";
return query(
connection,
PEEK_MESSAGES,
p ->
p.addParameter(queueName)
.addParameter(count)
.executeAndFetch(
rs -> {
List<Message> results = new ArrayList<>();
while (rs.next()) {
Message m = new Message();
m.setId(rs.getString("message_id"));
m.setPriority(rs.getInt("priority"));
m.setPayload(rs.getString("payload"));
results.add(m);
}
return results;
}));
}
private List<Message> popMessages(
Connection connection, String queueName, int count, int timeout) {
List<Message> messages = peekMessages(connection, queueName, count);
if (messages.isEmpty()) {
return messages;
}
List<Message> poppedMessages = new ArrayList<>();
for (Message message : messages) {
final String POP_MESSAGE =
"UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false";
int result =
query(
connection,
POP_MESSAGE,
q ->
q.addParameter(queueName)
.addParameter(message.getId())
.executeUpdate());
if (result == 1) {
poppedMessages.add(message);
}
}
return poppedMessages;
}
@Override
public boolean containsMessage(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> existsMessage(tx, queueName, messageId));
}
private void createQueueIfNotExists(Connection connection, String queueName) {
logger.trace("Creating new queue '{}'", queueName);
final String EXISTS_QUEUE =
"SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?) FOR SHARE";
boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists());
if (!exists) {
final String CREATE_QUEUE =
"INSERT INTO queue (queue_name) VALUES (?) ON CONFLICT (queue_name) DO NOTHING";
execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate());
}
}
private class QueueMessage {
public String queueName;
public String messageId;
}
}
| 8,181 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.postgres.util.PostgresIndexQueryBuilder;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PostgresIndexDAO extends PostgresBaseDAO implements IndexDAO {
public PostgresIndexDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
String INSERT_WORKFLOW_INDEX_SQL =
"INSERT INTO workflow_index (workflow_id, correlation_id, workflow_type, start_time, status, json_data)"
+ "VALUES (?, ?, ?, ?, ?, ?::JSONB) ON CONFLICT (workflow_id) \n"
+ "DO UPDATE SET correlation_id = EXCLUDED.correlation_id, workflow_type = EXCLUDED.workflow_type, "
+ "start_time = EXCLUDED.start_time, status = EXCLUDED.status, json_data = EXCLUDED.json_data";
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(workflow.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
queryWithTransaction(
INSERT_WORKFLOW_INDEX_SQL,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addParameter(workflow.getWorkflowType())
.addParameter(startTime)
.addParameter(workflow.getStatus().toString())
.addJsonParameter(workflow)
.executeUpdate());
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
PostgresIndexQueryBuilder queryBuilder =
new PostgresIndexQueryBuilder(
"workflow_index", query, freeText, start, count, sort);
List<WorkflowSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(WorkflowSummary.class);
});
// To avoid making a second potentially expensive query to postgres say we've
// got enough results for another page so the pagination works
int totalHits = results.size() == count ? start + count + 1 : start + results.size();
return new SearchResult<>(totalHits, results);
}
@Override
public void indexTask(TaskSummary task) {
String INSERT_TASK_INDEX_SQL =
"INSERT INTO task_index (task_id, task_type, task_def_name, status, start_time, update_time, workflow_type, json_data)"
+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?::JSONB) ON CONFLICT (task_id) "
+ "DO UPDATE SET task_type = EXCLUDED.task_type, task_def_name = EXCLUDED.task_def_name, "
+ "status = EXCLUDED.status, update_time = EXCLUDED.update_time, json_data = EXCLUDED.json_data";
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(task.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(task.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
queryWithTransaction(
INSERT_TASK_INDEX_SQL,
q ->
q.addParameter(task.getTaskId())
.addParameter(task.getTaskType())
.addParameter(task.getTaskDefName())
.addParameter(task.getStatus().toString())
.addParameter(startTime)
.addParameter(updateTime)
.addParameter(task.getWorkflowType())
.addJsonParameter(task)
.executeUpdate());
}
@Override
public SearchResult<TaskSummary> searchTaskSummary(
String query, String freeText, int start, int count, List<String> sort) {
PostgresIndexQueryBuilder queryBuilder =
new PostgresIndexQueryBuilder("task_index", query, freeText, start, count, sort);
List<TaskSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(TaskSummary.class);
});
// To avoid making a second potentially expensive query to postgres say we've
// got enough results for another page so the pagination works
int totalHits = results.size() == count ? start + count + 1 : start + results.size();
return new SearchResult<>(totalHits, results);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> logs) {
String INSERT_LOG =
"INSERT INTO task_execution_logs (task_id, created_time, log) VALUES (?, ?, ?)";
for (TaskExecLog log : logs) {
queryWithTransaction(
INSERT_LOG,
q ->
q.addParameter(log.getTaskId())
.addParameter(new Timestamp(log.getCreatedTime()))
.addParameter(log.getLog())
.executeUpdate());
}
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
return queryWithTransaction(
"SELECT log, task_id, created_time FROM task_execution_logs WHERE task_id = ? ORDER BY created_time ASC",
q ->
q.addParameter(taskId)
.executeAndFetch(
rs -> {
List<TaskExecLog> result = new ArrayList<>();
while (rs.next()) {
TaskExecLog log = new TaskExecLog();
log.setLog(rs.getString("log"));
log.setTaskId(rs.getString("task_id"));
log.setCreatedTime(
rs.getDate("created_time").getTime());
result.add(log);
}
return result;
}));
}
@Override
public void setup() {}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
logger.info("asyncIndexWorkflow is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
logger.info("asyncIndexTask is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchWorkflows is not supported for postgres indexing");
return null;
}
@Override
public SearchResult<String> searchTasks(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchTasks is not supported for postgres indexing");
return null;
}
@Override
public void removeWorkflow(String workflowId) {
logger.info("removeWorkflow is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) {
logger.info("asyncRemoveWorkflow is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
logger.info("updateWorkflow is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateWorkflow(
String workflowInstanceId, String[] keys, Object[] values) {
logger.info("asyncUpdateWorkflow is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void removeTask(String workflowId, String taskId) {
logger.info("removeTask is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncRemoveTask(String workflowId, String taskId) {
logger.info("asyncRemoveTask is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void updateTask(String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("updateTask is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateTask(
String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("asyncUpdateTask is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public String get(String workflowInstanceId, String key) {
logger.info("get is not supported for postgres indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
logger.info("asyncAddTaskExecutionLogs is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
logger.info("addEventExecution is not supported for postgres indexing");
}
@Override
public List<EventExecution> getEventExecutions(String event) {
logger.info("getEventExecutions is not supported for postgres indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
logger.info("asyncAddEventExecution is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addMessage(String queue, Message msg) {
logger.info("addMessage is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
logger.info("asyncAddMessage is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public List<Message> getMessages(String queue) {
logger.info("getMessages is not supported for postgres indexing");
return null;
}
@Override
public List<String> searchArchivableWorkflows(String indexName, long archiveTtlDays) {
logger.info("searchArchivableWorkflows is not supported for postgres indexing");
return null;
}
public long getWorkflowCount(String query, String freeText) {
logger.info("getWorkflowCount is not supported for postgres indexing");
return 0;
}
}
| 8,182 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.postgres.util.*;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
public abstract class PostgresBaseDAO {
private static final List<String> EXCLUDED_STACKTRACE_CLASS =
ImmutableList.of(PostgresBaseDAO.class.getName(), Thread.class.getName());
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final ObjectMapper objectMapper;
protected final DataSource dataSource;
private final RetryTemplate retryTemplate;
protected PostgresBaseDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
this.retryTemplate = retryTemplate;
this.objectMapper = objectMapper;
this.dataSource = dataSource;
}
protected final LazyToString getCallingMethod() {
return new LazyToString(
() ->
Arrays.stream(Thread.currentThread().getStackTrace())
.filter(
ste ->
!EXCLUDED_STACKTRACE_CLASS.contains(
ste.getClassName()))
.findFirst()
.map(StackTraceElement::getMethodName)
.orElseThrow(() -> new NullPointerException("Cannot find Caller")));
}
protected String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, Class<T> tClass) {
try {
return objectMapper.readValue(json, tClass);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, TypeReference<T> typeReference) {
try {
return objectMapper.readValue(json, typeReference);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to
* {@literal function}.
*
* <p>Successful executions of {@literal function} will result in a commit and return of {@link
* TransactionalFunction#apply(Connection)}.
*
* <p>If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will
* result in a rollback of the transaction and will be wrapped in an {@link
* NonTransientException} if it is not already one.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce some expected return value.
*
* @param function The function to apply with a new transactional {@link Connection}
* @param <R> The return type.
* @return The result of {@code TransactionalFunction#apply(Connection)}
* @throws NonTransientException If any errors occur.
*/
private <R> R getWithTransaction(final TransactionalFunction<R> function) {
final Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
if (th instanceof NonTransientException) {
throw th;
}
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
<R> R getWithRetriedTransactions(final TransactionalFunction<R> function) {
try {
return retryTemplate.execute(context -> getWithTransaction(function));
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
protected <R> R getWithTransactionWithOutErrorPropagation(TransactionalFunction<R> function) {
Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
logger.info(th.getMessage());
return null;
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
/**
* Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce no expected return value.
*
* @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to.
* @throws NonTransientException If any errors occur.
* @see #getWithRetriedTransactions(TransactionalFunction)
*/
protected void withTransaction(Consumer<Connection> consumer) {
getWithRetriedTransactions(
connection -> {
consumer.accept(connection);
return null;
});
}
/**
* Initiate a new transaction and execute a {@link Query} within that context, then return the
* results of {@literal function}.
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R queryWithTransaction(String query, QueryFunction<R> function) {
return getWithRetriedTransactions(tx -> query(tx, query, function));
}
/**
* Execute a {@link Query} within the context of a given transaction and return the results of
* {@literal function}.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R query(Connection tx, String query, QueryFunction<R> function) {
try (Query q = new Query(objectMapper, tx, query)) {
return function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a statement with no expected return value within a given transaction.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void execute(Connection tx, String query, ExecuteFunction function) {
try (Query q = new Query(objectMapper, tx, query)) {
function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Instantiates a new transactional connection and invokes {@link #execute(Connection, String,
* ExecuteFunction)}
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void executeWithTransaction(String query, ExecuteFunction function) {
withTransaction(tx -> execute(tx, query, function));
}
}
| 8,183 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.Date;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.annotation.PreDestroy;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
public class PostgresExecutionDAO extends PostgresBaseDAO
implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO {
private final ScheduledExecutorService scheduledExecutorService;
public PostgresExecutionDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-execution-"));
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for removeWorkflowWithExpiry",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskDefName, String workflowId) {
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? AND workflow_id = ? FOR SHARE";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_WORKFLOW,
q ->
q.addParameter(taskDefName)
.addParameter(workflowId)
.executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new ArrayList<>(count);
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int found = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
// noinspection ConstantConditions
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && found < count) {
tasks.add(pendingTask);
found++;
}
}
return tasks;
}
private static String taskKey(TaskModel task) {
return task.getReferenceTaskName() + "_" + task.getRetryCount();
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> created = Lists.newArrayListWithCapacity(tasks.size());
for (TaskModel task : tasks) {
withTransaction(
connection -> {
validate(task);
task.setScheduledTime(System.currentTimeMillis());
final String taskKey = taskKey(task);
boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey);
if (!scheduledTaskAdded) {
logger.trace(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
return;
}
insertOrUpdateTaskData(connection, task);
addWorkflowToTaskMapping(connection, task);
addTaskInProgress(connection, task);
updateTask(connection, task);
created.add(task);
});
}
return created;
}
@Override
public void updateTask(TaskModel task) {
withTransaction(connection -> updateTask(connection, task));
}
/**
* This is a dummy implementation and this feature is not for Postgres backed Conductor
*
* @param task: which needs to be evaluated whether it is rateLimited or not
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
return false;
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
TaskDef taskDef = taskDefinition.get();
int limit = taskDef.concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
logger.info(
"Task execution count for {}: limit={}, current={}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
String taskId = task.getTaskId();
List<String> tasksInProgressInOrderOfArrival =
findAllTasksInProgressInOrderOfArrival(task, limit);
boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId);
if (rateLimited) {
logger.info(
"Task execution count limited. {}, limit {}, current {}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
logger.warn("No such task found by id {}", taskId);
return false;
}
final String taskKey = taskKey(task);
withTransaction(
connection -> {
removeScheduledTask(connection, task, taskKey);
removeWorkflowToTaskMapping(connection, task);
removeTaskInProgress(connection, task);
removeTaskData(connection, task);
});
return true;
}
@Override
public TaskModel getTask(String taskId) {
String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?";
return queryWithTransaction(
GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
return getWithRetriedTransactions(c -> getTasks(c, taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_TYPE =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? FOR UPDATE SKIP LOCKED";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_TYPE,
q -> q.addParameter(taskName).executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
String GET_TASKS_FOR_WORKFLOW =
"SELECT task_id FROM workflow_to_task WHERE workflow_id = ? FOR SHARE";
return getWithRetriedTransactions(
tx ->
query(
tx,
GET_TASKS_FOR_WORKFLOW,
q -> {
List<String> taskIds =
q.addParameter(workflowId)
.executeScalarList(String.class);
return getTasks(tx, taskIds);
}));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
boolean removed = false;
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
withTransaction(
connection -> {
removeWorkflowDefToWorkflowMapping(connection, workflow);
removeWorkflow(connection, workflowId);
removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId);
});
removed = true;
for (TaskModel task : workflow.getTasks()) {
if (!removeTask(task.getTaskId())) {
removed = false;
}
}
}
return removed;
}
/** Scheduled executor based implementation. */
@Override
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
scheduledExecutorService.schedule(
() -> {
try {
removeWorkflow(workflowId);
} catch (Throwable e) {
logger.warn("Unable to remove workflow: {} with expiry", workflowId, e);
}
},
ttlSeconds,
TimeUnit.SECONDS);
return true;
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId));
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId));
if (workflow != null) {
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_IDS =
"SELECT workflow_id FROM workflow_pending WHERE workflow_type = ? FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_PENDING_WORKFLOW_IDS,
q -> q.addParameter(workflowName).executeScalarList(String.class));
}
/**
* @param workflowName Name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
return getRunningWorkflowIds(workflowName, version).stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public long getPendingWorkflowCount(String workflowName) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_COUNT =
"SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount());
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String GET_IN_PROGRESS_TASK_COUNT =
"SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true";
return queryWithTransaction(
GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
withTransaction(
tx -> {
// @formatter:off
String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF =
"SELECT workflow_id FROM workflow_def_to_workflow "
+ "WHERE workflow_def = ? AND date_str BETWEEN ? AND ? FOR SHARE SKIP LOCKED";
// @formatter:on
List<String> workflowIds =
query(
tx,
GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF,
q ->
q.addParameter(workflowName)
.addParameter(dateStr(startTime))
.addParameter(dateStr(endTime))
.executeScalarList(String.class));
workflowIds.forEach(
workflowId -> {
try {
WorkflowModel wf = getWorkflow(workflowId);
if (wf.getCreateTime() >= startTime
&& wf.getCreateTime() <= endTime) {
workflows.add(wf);
}
} catch (Exception e) {
logger.error(
"Unable to load workflow id {} with name {}",
workflowId,
workflowName,
e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
Preconditions.checkNotNull(correlationId, "correlationId cannot be null");
String GET_WORKFLOWS_BY_CORRELATION_ID =
"SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ? FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_WORKFLOWS_BY_CORRELATION_ID,
q ->
q.addParameter(correlationId)
.addParameter(workflowName)
.executeAndFetch(WorkflowModel.class));
}
@Override
public boolean canSearchAcrossWorkflows() {
return true;
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to add event execution " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> removeEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to remove event execution " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> updateEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to update event execution " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
List<EventExecution> executions = Lists.newLinkedList();
withTransaction(
tx -> {
for (int i = 0; i < max; i++) {
String executionId =
messageId + "_"
+ i; // see SimpleEventProcessor.handle to understand
// how the
// execution id is set
EventExecution ee =
readEventExecution(
tx,
eventHandlerName,
eventName,
messageId,
executionId);
if (ee == null) {
break;
}
executions.add(ee);
}
});
return executions;
} catch (Exception e) {
String message =
String.format(
"Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s",
eventHandlerName, eventName, messageId);
throw new NonTransientException(message, e);
}
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
}
@Override
public PollData getPollData(String taskDefName, String domain) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
return readAllPollData(taskDefName);
}
@Override
public List<PollData> getAllPollData() {
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(true);
try {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
} catch (Throwable th) {
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
private List<TaskModel> getTasks(Connection connection, List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
// Generate a formatted query string with a variable number of bind params based
// on taskIds.size()
final String GET_TASKS_FOR_IDS =
String.format(
"SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL",
Query.generateInBindings(taskIds.size()));
return query(
connection,
GET_TASKS_FOR_IDS,
q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class));
}
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
boolean terminal = workflow.getStatus().isTerminal();
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(Lists.newLinkedList());
withTransaction(
tx -> {
if (!update) {
addWorkflow(tx, workflow);
addWorkflowDefToWorkflowMapping(tx, workflow);
} else {
updateWorkflow(tx, workflow);
}
if (terminal) {
removePendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
} else {
addPendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
}
});
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
private void updateTask(Connection connection, TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
boolean inProgress =
task.getStatus() != null
&& task.getStatus().equals(TaskModel.Status.IN_PROGRESS);
updateInProgressStatus(connection, task, inProgress);
}
insertOrUpdateTaskData(connection, task);
if (task.getStatus() != null && task.getStatus().isTerminal()) {
removeTaskInProgress(connection, task);
}
addWorkflowToTaskMapping(connection, task);
}
private WorkflowModel readWorkflow(Connection connection, String workflowId) {
String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?";
return query(
connection,
GET_WORKFLOW,
q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class));
}
private void addWorkflow(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW =
"INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addJsonParameter(workflow)
.executeUpdate());
}
private void updateWorkflow(Connection connection, WorkflowModel workflow) {
String UPDATE_WORKFLOW =
"UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?";
execute(
connection,
UPDATE_WORKFLOW,
q ->
q.addJsonParameter(workflow)
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflow(Connection connection, String workflowId) {
String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?";
execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete());
}
private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) {
String EXISTS_PENDING_WORKFLOW =
"SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)";
boolean exists =
query(
connection,
EXISTS_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).exists());
if (!exists) {
String INSERT_PENDING_WORKFLOW =
"INSERT INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?) ON CONFLICT (workflow_type,workflow_id) DO NOTHING";
execute(
connection,
INSERT_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate());
}
}
private void removePendingWorkflow(
Connection connection, String workflowType, String workflowId) {
String REMOVE_PENDING_WORKFLOW =
"DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?";
execute(
connection,
REMOVE_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete());
}
private void insertOrUpdateTaskData(Connection connection, TaskModel task) {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens.
*/
String UPDATE_TASK =
"UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?";
int rowsUpdated =
query(
connection,
UPDATE_TASK,
q ->
q.addJsonParameter(task)
.addParameter(task.getTaskId())
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_TASK =
"INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON CONFLICT (task_id) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_TASK,
q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate());
}
}
private void removeTaskData(Connection connection, TaskModel task) {
String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?";
execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete());
}
private void addWorkflowToTaskMapping(Connection connection, TaskModel task) {
String EXISTS_WORKFLOW_TO_TASK =
"SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)";
boolean exists =
query(
connection,
EXISTS_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.exists());
if (!exists) {
String INSERT_WORKFLOW_TO_TASK =
"INSERT INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?) ON CONFLICT (workflow_id,task_id) DO NOTHING";
execute(
connection,
INSERT_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeUpdate());
}
}
private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) {
String REMOVE_WORKFLOW_TO_TASK =
"DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?";
execute(
connection,
REMOVE_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeDelete());
}
private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW_DEF_TO_WORKFLOW =
"INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String REMOVE_WORKFLOW_DEF_TO_WORKFLOW =
"DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?";
execute(
connection,
REMOVE_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
@VisibleForTesting
boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) {
final String EXISTS_SCHEDULED_TASK =
"SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)";
boolean exists =
query(
connection,
EXISTS_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.exists());
if (!exists) {
final String INSERT_IGNORE_SCHEDULED_TASK =
"INSERT INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?) ON CONFLICT (workflow_id,task_key) DO NOTHING";
int count =
query(
connection,
INSERT_IGNORE_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.addParameter(task.getTaskId())
.executeUpdate());
return count > 0;
} else {
return false;
}
}
private void removeScheduledTask(Connection connection, TaskModel task, String taskKey) {
String REMOVE_SCHEDULED_TASK =
"DELETE FROM task_scheduled WHERE workflow_id = ? AND task_key = ?";
execute(
connection,
REMOVE_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.executeDelete());
}
private void addTaskInProgress(Connection connection, TaskModel task) {
String EXISTS_IN_PROGRESS_TASK =
"SELECT EXISTS(SELECT 1 FROM task_in_progress WHERE task_def_name = ? AND task_id = ?)";
boolean exists =
query(
connection,
EXISTS_IN_PROGRESS_TASK,
q ->
q.addParameter(task.getTaskDefName())
.addParameter(task.getTaskId())
.exists());
if (!exists) {
String INSERT_IN_PROGRESS_TASK =
"INSERT INTO task_in_progress (task_def_name, task_id, workflow_id) VALUES (?, ?, ?)";
execute(
connection,
INSERT_IN_PROGRESS_TASK,
q ->
q.addParameter(task.getTaskDefName())
.addParameter(task.getTaskId())
.addParameter(task.getWorkflowInstanceId())
.executeUpdate());
}
}
private void removeTaskInProgress(Connection connection, TaskModel task) {
String REMOVE_IN_PROGRESS_TASK =
"DELETE FROM task_in_progress WHERE task_def_name = ? AND task_id = ?";
execute(
connection,
REMOVE_IN_PROGRESS_TASK,
q ->
q.addParameter(task.getTaskDefName())
.addParameter(task.getTaskId())
.executeUpdate());
}
private void updateInProgressStatus(Connection connection, TaskModel task, boolean inProgress) {
String UPDATE_IN_PROGRESS_TASK_STATUS =
"UPDATE task_in_progress SET in_progress_status = ?, modified_on = CURRENT_TIMESTAMP "
+ "WHERE task_def_name = ? AND task_id = ?";
execute(
connection,
UPDATE_IN_PROGRESS_TASK_STATUS,
q ->
q.addParameter(inProgress)
.addParameter(task.getTaskDefName())
.addParameter(task.getTaskId())
.executeUpdate());
}
private boolean insertEventExecution(Connection connection, EventExecution eventExecution) {
String INSERT_EVENT_EXECUTION =
"INSERT INTO event_execution (event_handler_name, event_name, message_id, execution_id, json_data) "
+ "VALUES (?, ?, ?, ?, ?)";
int count =
query(
connection,
INSERT_EVENT_EXECUTION,
q ->
q.addParameter(eventExecution.getName())
.addParameter(eventExecution.getEvent())
.addParameter(eventExecution.getMessageId())
.addParameter(eventExecution.getId())
.addJsonParameter(eventExecution)
.executeUpdate());
return count > 0;
}
private void updateEventExecution(Connection connection, EventExecution eventExecution) {
// @formatter:off
String UPDATE_EVENT_EXECUTION =
"UPDATE event_execution SET "
+ "json_data = ?, "
+ "modified_on = CURRENT_TIMESTAMP "
+ "WHERE event_handler_name = ? "
+ "AND event_name = ? "
+ "AND message_id = ? "
+ "AND execution_id = ?";
// @formatter:on
execute(
connection,
UPDATE_EVENT_EXECUTION,
q ->
q.addJsonParameter(eventExecution)
.addParameter(eventExecution.getName())
.addParameter(eventExecution.getEvent())
.addParameter(eventExecution.getMessageId())
.addParameter(eventExecution.getId())
.executeUpdate());
}
private void removeEventExecution(Connection connection, EventExecution eventExecution) {
String REMOVE_EVENT_EXECUTION =
"DELETE FROM event_execution "
+ "WHERE event_handler_name = ? "
+ "AND event_name = ? "
+ "AND message_id = ? "
+ "AND execution_id = ?";
execute(
connection,
REMOVE_EVENT_EXECUTION,
q ->
q.addParameter(eventExecution.getName())
.addParameter(eventExecution.getEvent())
.addParameter(eventExecution.getMessageId())
.addParameter(eventExecution.getId())
.executeUpdate());
}
private EventExecution readEventExecution(
Connection connection,
String eventHandlerName,
String eventName,
String messageId,
String executionId) {
// @formatter:off
String GET_EVENT_EXECUTION =
"SELECT json_data FROM event_execution "
+ "WHERE event_handler_name = ? "
+ "AND event_name = ? "
+ "AND message_id = ? "
+ "AND execution_id = ?";
// @formatter:on
return query(
connection,
GET_EVENT_EXECUTION,
q ->
q.addParameter(eventHandlerName)
.addParameter(eventName)
.addParameter(messageId)
.addParameter(executionId)
.executeAndFetchFirst(EventExecution.class));
}
private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase
* dramatically even though it won't be used.
*/
String UPDATE_POLL_DATA =
"UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?";
int rowsUpdated =
query(
connection,
UPDATE_POLL_DATA,
q ->
q.addJsonParameter(pollData)
.addParameter(pollData.getQueueName())
.addParameter(domain)
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_POLL_DATA =
"INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_POLL_DATA,
q ->
q.addParameter(pollData.getQueueName())
.addParameter(domain)
.addJsonParameter(pollData)
.executeUpdate());
}
}
private PollData readPollData(Connection connection, String queueName, String domain) {
String GET_POLL_DATA =
"SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?";
return query(
connection,
GET_POLL_DATA,
q ->
q.addParameter(queueName)
.addParameter(domain)
.executeAndFetchFirst(PollData.class));
}
private List<PollData> readAllPollData(String queueName) {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?";
return queryWithTransaction(
GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class));
}
private List<String> findAllTasksInProgressInOrderOfArrival(TaskModel task, int limit) {
String GET_IN_PROGRESS_TASKS_WITH_LIMIT =
"SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY created_on LIMIT ?";
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_WITH_LIMIT,
q ->
q.addParameter(task.getTaskDefName())
.addParameter(limit)
.executeScalarList(String.class));
}
private void validate(TaskModel task) {
Preconditions.checkNotNull(task, "task object cannot be null");
Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null");
Preconditions.checkNotNull(
task.getWorkflowInstanceId(), "Workflow instance id cannot be null");
Preconditions.checkNotNull(
task.getReferenceTaskName(), "Task reference name cannot be null");
}
}
| 8,184 |
0 | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres | Create_ds/conductor-community/persistence/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.PreDestroy;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class PostgresMetadataDAO extends PostgresBaseDAO implements MetadataDAO, EventHandlerDAO {
private final ConcurrentHashMap<String, TaskDef> taskDefCache = new ConcurrentHashMap<>();
private static final String CLASS_NAME = PostgresMetadataDAO.class.getSimpleName();
private final ScheduledExecutorService scheduledExecutorService;
public PostgresMetadataDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-metadata-"));
this.scheduledExecutorService.scheduleWithFixedDelay(
this::refreshTaskDefs, cacheRefreshTime, cacheRefreshTime, TimeUnit.SECONDS);
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for refreshTaskDefs",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef getTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
TaskDef taskDef = taskDefCache.get(name);
if (taskDef == null) {
if (logger.isTraceEnabled()) {
logger.trace("Cache miss: {}", name);
}
taskDef = getTaskDefFromDB(name);
}
return taskDef;
}
@Override
public List<TaskDef> getAllTaskDefs() {
return getWithRetriedTransactions(this::findAllTaskDefs);
}
@Override
public void removeTaskDef(String name) {
final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?";
executeWithTransaction(
DELETE_TASKDEF_QUERY,
q -> {
if (!q.addParameter(name).executeDelete()) {
throw new NotFoundException("No such task definition");
}
taskDefCache.remove(name);
});
}
@Override
public void createWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(
tx -> {
if (workflowExists(tx, def)) {
throw new ConflictException(
"Workflow with " + def.key() + " already exists!");
}
insertOrUpdateWorkflowDef(tx, def);
});
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def));
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
final String GET_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND "
+ "version = latest_version";
return Optional.ofNullable(
queryWithTransaction(
GET_LATEST_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
final String GET_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?";
return Optional.ofNullable(
queryWithTransaction(
GET_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(name)
.addParameter(version)
.executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public void removeWorkflowDef(String name, Integer version) {
final String DELETE_WORKFLOW_QUERY =
"DELETE from meta_workflow_def WHERE name = ? AND version = ?";
withTransaction(
tx -> {
// remove specified workflow
execute(
tx,
DELETE_WORKFLOW_QUERY,
q -> {
if (!q.addParameter(name).addParameter(version).executeDelete()) {
throw new NotFoundException(
String.format(
"No such workflow definition: %s version: %d",
name, version));
}
});
// reset latest version based on remaining rows for this workflow
Optional<Integer> maxVersion = getLatestVersion(tx, name);
maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion));
});
}
public List<String> findAll() {
final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def";
return queryWithTransaction(
FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
final String GET_ALL_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def ORDER BY name, version";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
final String GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY =
"SELECT json_data FROM meta_workflow_def wd WHERE wd.version = (SELECT MAX(version) FROM meta_workflow_def wd2 WHERE wd2.name = wd.name)";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY,
q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllLatest() {
final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version";
return queryWithTransaction(
GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllVersions(String name) {
final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version";
return queryWithTransaction(
GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetch(WorkflowDef.class));
}
@Override
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
final String INSERT_EVENT_HANDLER_QUERY =
"INSERT INTO meta_event_handler (name, event, active, json_data) "
+ "VALUES (?, ?, ?, ?)";
withTransaction(
tx -> {
if (getEventHandler(tx, eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name "
+ eventHandler.getName()
+ " already exists!");
}
execute(
tx,
INSERT_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getName())
.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.executeUpdate());
});
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
// @formatter:off
final String UPDATE_EVENT_HANDLER_QUERY =
"UPDATE meta_event_handler SET "
+ "event = ?, active = ?, json_data = ?, "
+ "modified_on = CURRENT_TIMESTAMP WHERE name = ?";
// @formatter:on
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + eventHandler.getName() + " not found!");
}
execute(
tx,
UPDATE_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.addParameter(eventHandler.getName())
.executeUpdate());
});
}
@Override
public void removeEventHandler(String name) {
final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?";
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, name);
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + name + " not found!");
}
execute(
tx,
DELETE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeDelete());
});
}
@Override
public List<EventHandler> getAllEventHandlers() {
final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class));
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY =
"SELECT json_data FROM meta_event_handler WHERE event = ?";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY,
q -> {
q.addParameter(event);
return q.executeAndFetch(
rs -> {
List<EventHandler> handlers = new ArrayList<>();
while (rs.next()) {
EventHandler h = readValue(rs.getString(1), EventHandler.class);
if (!activeOnly || h.isActive()) {
handlers.add(h);
}
}
return handlers;
});
});
}
/**
* Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime
* exception if validations fail.
*
* @param taskDef The {@code TaskDef} to check.
*/
private void validate(TaskDef taskDef) {
Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null");
Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null");
}
/**
* Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a
* Runtime exception if validations fail.
*
* @param def The {@code WorkflowDef} to check.
*/
private void validate(WorkflowDef def) {
Preconditions.checkNotNull(def, "WorkflowDef object cannot be null");
Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null");
}
/**
* Retrieve a {@link EventHandler} by {@literal name}.
*
* @param connection The {@link Connection} to use for queries.
* @param name The {@code EventHandler} name to look for.
* @return {@literal null} if nothing is found, otherwise the {@code EventHandler}.
*/
private EventHandler getEventHandler(Connection connection, String name) {
final String READ_ONE_EVENT_HANDLER_QUERY =
"SELECT json_data FROM meta_event_handler WHERE name = ?";
return query(
connection,
READ_ONE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class));
}
/**
* Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already
* exist.
*
* @param connection The {@link Connection} to use for queries.
* @param def The {@code WorkflowDef} to check for.
* @return {@literal true} if a {@code WorkflowDef} already exists with the same values.
*/
private Boolean workflowExists(Connection connection, WorkflowDef def) {
final String CHECK_WORKFLOW_DEF_EXISTS_QUERY =
"SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?";
return query(
connection,
CHECK_WORKFLOW_DEF_EXISTS_QUERY,
q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists());
}
/**
* Return the latest version that exists for the provided {@code name}.
*
* @param tx The {@link Connection} to use for queries.
* @param name The {@code name} to check for.
* @return {@code Optional.empty()} if no versions exist, otherwise the max {@link
* WorkflowDef#getVersion} found.
*/
private Optional<Integer> getLatestVersion(Connection tx, String name) {
final String GET_LATEST_WORKFLOW_DEF_VERSION =
"SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?";
Integer val =
query(
tx,
GET_LATEST_WORKFLOW_DEF_VERSION,
q -> {
q.addParameter(name);
return q.executeAndFetch(
rs -> {
if (!rs.next()) {
return null;
}
return rs.getInt(1);
});
});
return Optional.ofNullable(val);
}
/**
* Update the latest version for the workflow with name {@code WorkflowDef} to the version
* provided in {@literal version}.
*
* @param tx The {@link Connection} to use for queries.
* @param name Workflow def name to update
* @param version The new latest {@code version} value.
*/
private void updateLatestVersion(Connection tx, String name, int version) {
final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY =
"UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?";
execute(
tx,
UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY,
q -> q.addParameter(version).addParameter(name).executeUpdate());
}
private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) {
final String INSERT_WORKFLOW_DEF_QUERY =
"INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)";
Optional<Integer> version = getLatestVersion(tx, def.getName());
if (!workflowExists(tx, def)) {
execute(
tx,
INSERT_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(def.getName())
.addParameter(def.getVersion())
.addJsonParameter(def)
.executeUpdate());
} else {
// @formatter:off
final String UPDATE_WORKFLOW_DEF_QUERY =
"UPDATE meta_workflow_def "
+ "SET json_data = ?, modified_on = CURRENT_TIMESTAMP "
+ "WHERE name = ? AND version = ?";
// @formatter:on
execute(
tx,
UPDATE_WORKFLOW_DEF_QUERY,
q ->
q.addJsonParameter(def)
.addParameter(def.getName())
.addParameter(def.getVersion())
.executeUpdate());
}
int maxVersion = def.getVersion();
if (version.isPresent() && version.get() > def.getVersion()) {
maxVersion = version.get();
}
updateLatestVersion(tx, def.getName(), maxVersion);
}
/**
* Query persistence for all defined {@link TaskDef} data, and cache it in {@link
* #taskDefCache}.
*/
private void refreshTaskDefs() {
try {
withTransaction(
tx -> {
Map<String, TaskDef> map = new HashMap<>();
findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef));
synchronized (taskDefCache) {
taskDefCache.clear();
taskDefCache.putAll(map);
}
if (logger.isTraceEnabled()) {
logger.trace("Refreshed {} TaskDefs", taskDefCache.size());
}
});
} catch (Exception e) {
Monitors.error(CLASS_NAME, "refreshTaskDefs");
logger.error("refresh TaskDefs failed ", e);
}
}
/**
* Query persistence for all defined {@link TaskDef} data.
*
* @param tx The {@link Connection} to use for queries.
* @return A new {@code List<TaskDef>} with all the {@code TaskDef} data that was retrieved.
*/
private List<TaskDef> findAllTaskDefs(Connection tx) {
final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def";
return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class));
}
/**
* Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}.
*
* @param name The name of the {@code TaskDef} to query for.
* @return {@literal null} if nothing is found, otherwise the {@code TaskDef}.
*/
private TaskDef getTaskDefFromDB(String name) {
final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?";
return queryWithTransaction(
READ_ONE_TASKDEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class));
}
private String insertOrUpdateTaskDef(TaskDef taskDef) {
final String UPDATE_TASKDEF_QUERY =
"UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?";
final String INSERT_TASKDEF_QUERY =
"INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)";
return getWithRetriedTransactions(
tx -> {
execute(
tx,
UPDATE_TASKDEF_QUERY,
update -> {
int result =
update.addJsonParameter(taskDef)
.addParameter(taskDef.getName())
.executeUpdate();
if (result == 0) {
execute(
tx,
INSERT_TASKDEF_QUERY,
insert ->
insert.addParameter(taskDef.getName())
.addJsonParameter(taskDef)
.executeUpdate());
}
});
taskDefCache.put(taskDef.getName(), taskDef);
return taskDef.getName();
});
}
}
| 8,185 |
0 | Create_ds/conductor-community/persistence/common-persistence/src/test/java/com/netflix/conductor | Create_ds/conductor-community/persistence/common-persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.*;
public abstract class ExecutionDAOTest {
protected abstract ExecutionDAO getExecutionDAO();
protected ConcurrentExecutionLimitDAO getConcurrentExecutionLimitDAO() {
return (ConcurrentExecutionLimitDAO) getExecutionDAO();
}
@Rule public ExpectedException expectedException = ExpectedException.none();
@Test
public void testTaskExceedsLimit() {
TaskDef taskDefinition = new TaskDef();
taskDefinition.setName("task1");
taskDefinition.setConcurrentExecLimit(1);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("task1");
workflowTask.setTaskDefinition(taskDefinition);
workflowTask.setTaskDefinition(taskDefinition);
List<TaskModel> tasks = new LinkedList<>();
for (int i = 0; i < 15; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId("t_" + i);
task.setWorkflowInstanceId("workflow_" + i);
task.setReferenceTaskName("task1");
task.setTaskDefName("task1");
tasks.add(task);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setWorkflowTask(workflowTask);
}
getExecutionDAO().createTasks(tasks);
assertFalse(getConcurrentExecutionLimitDAO().exceedsLimit(tasks.get(0)));
tasks.get(0).setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().updateTask(tasks.get(0));
for (TaskModel task : tasks) {
assertTrue(getConcurrentExecutionLimitDAO().exceedsLimit(task));
}
}
@Test
public void testCreateTaskException() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Workflow instance id cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
}
@Test
public void testCreateTaskException2() {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName("task1");
task.setWorkflowInstanceId(UUID.randomUUID().toString());
expectedException.expect(NonTransientException.class);
expectedException.expectMessage("Task reference name cannot be null");
getExecutionDAO().createTasks(Collections.singletonList(task));
}
@Test
public void testTaskCreateDups() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(i + 1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("t" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
// Let's insert a retried task
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 2);
task.setReferenceTaskName("t" + 2);
task.setRetryCount(1);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 2);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
// Duplicate task!
task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + 1);
task.setReferenceTaskName("t" + 1);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("task" + 1);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size() - 1, created.size()); // 1 less
Set<String> srcIds =
tasks.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
Set<String> createdIds =
created.stream()
.map(t -> t.getReferenceTaskName() + "." + t.getRetryCount())
.collect(Collectors.toSet());
assertEquals(srcIds, createdIds);
List<TaskModel> pending = getExecutionDAO().getPendingTasksByWorkflow("task0", workflowId);
assertNotNull(pending);
assertEquals(1, pending.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0)));
List<TaskModel> found = getExecutionDAO().getTasks(tasks.get(0).getTaskDefName(), null, 1);
assertNotNull(found);
assertEquals(1, found.size());
assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0)));
}
@Test
public void testTaskOps() {
List<TaskModel> tasks = new LinkedList<>();
String workflowId = UUID.randomUUID().toString();
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId(workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
tasks.add(task);
}
for (int i = 0; i < 3; i++) {
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId("x" + workflowId + "_t" + i);
task.setReferenceTaskName("testTaskOps" + i);
task.setRetryCount(0);
task.setWorkflowInstanceId("x" + workflowId);
task.setTaskDefName("testTaskOps" + i);
task.setStatus(TaskModel.Status.IN_PROGRESS);
getExecutionDAO().createTasks(Collections.singletonList(task));
}
List<TaskModel> created = getExecutionDAO().createTasks(tasks);
assertEquals(tasks.size(), created.size());
List<TaskModel> pending =
getExecutionDAO().getPendingTasksForTaskType(tasks.get(0).getTaskDefName());
assertNotNull(pending);
assertEquals(2, pending.size());
// Pending list can come in any order. finding the one we are looking for and then
// comparing
TaskModel matching =
pending.stream()
.filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId()))
.findAny()
.get();
assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0)));
for (int i = 0; i < 3; i++) {
TaskModel found = getExecutionDAO().getTask(workflowId + "_t" + i);
assertNotNull(found);
found.getOutputData().put("updated", true);
found.setStatus(TaskModel.Status.COMPLETED);
getExecutionDAO().updateTask(found);
}
List<String> taskIds =
tasks.stream().map(TaskModel::getTaskId).collect(Collectors.toList());
List<TaskModel> found = getExecutionDAO().getTasks(taskIds);
assertEquals(taskIds.size(), found.size());
found.forEach(
task -> {
assertTrue(task.getOutputData().containsKey("updated"));
assertEquals(true, task.getOutputData().get("updated"));
boolean removed = getExecutionDAO().removeTask(task.getTaskId());
assertTrue(removed);
});
found = getExecutionDAO().getTasks(taskIds);
assertTrue(found.isEmpty());
}
@Test
public void testPending() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_test");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> workflowIds = generateWorkflows(workflow, 10);
long count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(10, count);
for (int i = 0; i < 10; i++) {
getExecutionDAO().removeFromPendingWorkflow(def.getName(), workflowIds.get(i));
}
count = getExecutionDAO().getPendingWorkflowCount(def.getName());
assertEquals(0, count);
}
@Test
public void complexExecutionTest() {
WorkflowModel workflow = createTestWorkflow();
int numTasks = workflow.getTasks().size();
String workflowId = getExecutionDAO().createWorkflow(workflow);
assertEquals(workflow.getWorkflowId(), workflowId);
List<TaskModel> created = getExecutionDAO().createTasks(workflow.getTasks());
assertEquals(workflow.getTasks().size(), created.size());
WorkflowModel workflowWithTasks =
getExecutionDAO().getWorkflow(workflow.getWorkflowId(), true);
assertEquals(workflowId, workflowWithTasks.getWorkflowId());
assertEquals(numTasks, workflowWithTasks.getTasks().size());
WorkflowModel found = getExecutionDAO().getWorkflow(workflowId, false);
assertTrue(found.getTasks().isEmpty());
workflow.getTasks().clear();
assertEquals(workflow, found);
workflow.getInput().put("updated", true);
getExecutionDAO().updateWorkflow(workflow);
found = getExecutionDAO().getWorkflow(workflowId);
assertNotNull(found);
assertTrue(found.getInput().containsKey("updated"));
assertEquals(true, found.getInput().get("updated"));
List<String> running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
workflow.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertEquals(1, running.size());
assertEquals(workflow.getWorkflowId(), running.get(0));
List<WorkflowModel> pending =
getExecutionDAO()
.getPendingWorkflowsByType(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(pending);
assertEquals(1, pending.size());
assertEquals(3, pending.get(0).getTasks().size());
pending.get(0).getTasks().clear();
assertEquals(workflow, pending.get(0));
workflow.setStatus(WorkflowModel.Status.COMPLETED);
getExecutionDAO().updateWorkflow(workflow);
running =
getExecutionDAO()
.getRunningWorkflowIds(
workflow.getWorkflowName(), workflow.getWorkflowVersion());
assertNotNull(running);
assertTrue(running.isEmpty());
List<WorkflowModel> bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
System.currentTimeMillis(),
System.currentTimeMillis() + 100);
assertNotNull(bytime);
assertTrue(bytime.isEmpty());
bytime =
getExecutionDAO()
.getWorkflowsByType(
workflow.getWorkflowName(),
workflow.getCreateTime() - 10,
workflow.getCreateTime() + 10);
assertNotNull(bytime);
assertEquals(1, bytime.size());
}
protected WorkflowModel createTestWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("Junit Workflow");
def.setVersion(3);
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCorrelationId("correlationX");
workflow.setCreatedBy("junit_tester");
workflow.setEndTime(200L);
Map<String, Object> input = new HashMap<>();
input.put("param1", "param1 value");
input.put("param2", 100);
workflow.setInput(input);
Map<String, Object> output = new HashMap<>();
output.put("ouput1", "output 1 value");
output.put("op2", 300);
workflow.setOutput(output);
workflow.setOwnerApp("workflow");
workflow.setParentWorkflowId("parentWorkflowId");
workflow.setParentWorkflowTaskId("parentWFTaskId");
workflow.setReasonForIncompletion("missing recipe");
workflow.setReRunFromWorkflowId("re-run from id1");
workflow.setCreateTime(90L);
workflow.setStatus(WorkflowModel.Status.FAILED);
workflow.setWorkflowId(UUID.randomUUID().toString());
List<TaskModel> tasks = new LinkedList<>();
TaskModel task = new TaskModel();
task.setScheduledTime(1L);
task.setSeq(1);
task.setTaskId(UUID.randomUUID().toString());
task.setReferenceTaskName("t1");
task.setWorkflowInstanceId(workflow.getWorkflowId());
task.setTaskDefName("task1");
TaskModel task2 = new TaskModel();
task2.setScheduledTime(2L);
task2.setSeq(2);
task2.setTaskId(UUID.randomUUID().toString());
task2.setReferenceTaskName("t2");
task2.setWorkflowInstanceId(workflow.getWorkflowId());
task2.setTaskDefName("task2");
TaskModel task3 = new TaskModel();
task3.setScheduledTime(2L);
task3.setSeq(3);
task3.setTaskId(UUID.randomUUID().toString());
task3.setReferenceTaskName("t3");
task3.setWorkflowInstanceId(workflow.getWorkflowId());
task3.setTaskDefName("task3");
tasks.add(task);
tasks.add(task2);
tasks.add(task3);
workflow.setTasks(tasks);
workflow.setUpdatedBy("junit_tester");
workflow.setUpdatedTime(800L);
return workflow;
}
protected List<String> generateWorkflows(WorkflowModel base, int count) {
List<String> workflowIds = new ArrayList<>();
for (int i = 0; i < count; i++) {
String workflowId = UUID.randomUUID().toString();
base.setWorkflowId(workflowId);
base.setCorrelationId("corr001");
base.setStatus(WorkflowModel.Status.RUNNING);
getExecutionDAO().createWorkflow(base);
workflowIds.add(workflowId);
}
return workflowIds;
}
}
| 8,186 |
0 | Create_ds/conductor-community/persistence/common-persistence/src/test/java/com/netflix/conductor | Create_ds/conductor-community/persistence/common-persistence/src/test/java/com/netflix/conductor/dao/TestBase.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.dao;
public class TestBase {}
| 8,187 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/test/integration/grpc | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc.mysql;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest;
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.db.type=mysql",
"conductor.grpc-server.port=8094",
"spring.datasource.url=jdbc:tc:mysql:8.0.27:///conductor", // "tc" prefix starts the
// MySql
// container
"spring.datasource.username=root",
"spring.datasource.password=root",
"spring.datasource.hikari.maximum-pool-size=8",
"spring.datasource.hikari.minimum-idle=300000"
})
public class MySQLGrpcEndToEndTest extends AbstractGrpcEndToEndTest {
@Before
public void init() {
taskClient = new TaskClient("localhost", 8094);
workflowClient = new WorkflowClient("localhost", 8094);
metadataClient = new MetadataClient("localhost", 8094);
eventClient = new EventClient("localhost", 8094);
}
}
| 8,188 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import com.netflix.conductor.mysql.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class MySQLQueueDAOTest {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLQueueDAOTest.class);
@Autowired private MySQLQueueDAO queueDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void complexQueueTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
/** Test fix for https://github.com/Netflix/conductor/issues/1892 */
@Test
public void containsMessageTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertFalse(queueDAO.containsMessage(queueName, messageId));
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
*
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
*
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't
// accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
queueDAO.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds =
messages.stream()
.map(Message::getId)
.collect(Collectors.toList())
.subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.debug("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
@Test
public void processUnacksTest() {
final String queueName = "process_unacks_test";
// Count of messages in the queue(s)
final int count = 10;
// Number of messages to process acks for
final int unackedCount = 4;
// A secondary queue to make sure we don't accidentally process other queues
final String otherQueueName = "process_unacks_test_other_queue";
// Create testing queue with some messages (but not all) that will be popped/acked.
for (int i = 0; i < count; i++) {
int offset = 0;
if (i >= unackedCount) {
offset = 1_000_000;
}
queueDAO.push(queueName, "unack-" + i, offset);
}
// Create a second queue to make sure that unacks don't occur for it
for (int i = 0; i < count; i++) {
queueDAO.push(otherQueueName, "other-" + i, 0);
}
// Poll for first batch of messages (should be equal to unackedCount)
List<Message> polled = queueDAO.pollMessages(queueName, 100, 10_000);
assertNotNull(polled);
assertFalse(polled.isEmpty());
assertEquals(unackedCount, polled.size());
// Poll messages from the other queue so we know they don't get unacked later
queueDAO.pollMessages(otherQueueName, 100, 10_000);
// Ack one of the polled messages
assertTrue(queueDAO.ack(queueName, "unack-1"));
// Should have one less un-acked popped message in the queue
Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(uacked.longValue(), unackedCount - 1);
// Process unacks
queueDAO.processUnacks(queueName);
// Check uacks for both queues after processing
Map<String, Map<String, Map<String, Long>>> details = queueDAO.queuesDetailVerbose();
uacked = details.get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(
"The messages that were polled should be unacked still",
uacked.longValue(),
unackedCount - 1);
Long otherUacked = details.get(otherQueueName).get("a").get("uacked");
assertNotNull(otherUacked);
assertEquals(
"Other queue should have all unacked messages", otherUacked.longValue(), count);
Long size = queueDAO.queuesDetail().get(queueName);
assertNotNull(size);
assertEquals(size.longValue(), count - unackedCount);
}
}
| 8,189 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.util.List;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class MySQLExecutionDAOTest extends ExecutionDAOTest {
@Autowired private MySQLExecutionDAO executionDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testPendingByCorrelationId() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_correlation_jtest");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
generateWorkflows(workflow, 10);
List<WorkflowModel> bycorrelationId =
getExecutionDAO()
.getWorkflowsByCorrelationId(
"pending_count_correlation_jtest", "corr001", true);
assertNotNull(bycorrelationId);
assertEquals(10, bycorrelationId.size());
}
@Override
public ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| 8,190 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest
public class MySQLMetadataDAOTest {
@Autowired private MySQLMetadataDAO metadataDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testDuplicateWorkflowDef() {
WorkflowDef def = new WorkflowDef();
def.setName("testDuplicate");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
NonTransientException applicationException =
assertThrows(NonTransientException.class, () -> metadataDAO.createWorkflowDef(def));
assertEquals(
"Workflow with testDuplicate.1 already exists!", applicationException.getMessage());
}
@Test
public void testRemoveNotExistingWorkflowDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeWorkflowDef("test", 1));
assertEquals(
"No such workflow definition: test version: 1", applicationException.getMessage());
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get();
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(3, found.getVersion());
all = metadataDAO.getAllLatest();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(3, all.get(0).getVersion());
all = metadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(3, all.get(1).getVersion());
def.setDescription("updated");
metadataDAO.updateWorkflowDef(def);
found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = metadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(3, found.getVersion());
metadataDAO.removeWorkflowDef("test", 3);
Optional<WorkflowDef> deleted = metadataDAO.getWorkflowDef("test", 3);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
metadataDAO.removeWorkflowDef("test", 1);
deleted = metadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(TaskDef.RetryLogic.FIXED);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createTaskDef(def);
TaskDef found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setDescription("updated description");
metadataDAO.updateTaskDef(def);
found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
metadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
metadataDAO.removeTaskDef(def.getName() + i);
}
all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test
public void testRemoveNotExistingTaskDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()));
assertEquals("No such task definition", applicationException.getMessage());
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
EventHandler.Action action = new EventHandler.Action();
action.setAction(EventHandler.Action.Type.start_workflow);
action.setStart_workflow(new EventHandler.StartWorkflow());
action.getStart_workflow().setName("workflow_x");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
metadataDAO.addEventHandler(eventHandler);
List<EventHandler> all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(eventHandler.getName(), all.get(0).getName());
assertEquals(eventHandler.getEvent(), all.get(0).getEvent());
List<EventHandler> byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
metadataDAO.updateEventHandler(eventHandler);
all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = metadataDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
def.setName("test2");
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
metadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(3, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
}
| 8,191 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions with no expected result.
*
* @author mustafa
*/
@FunctionalInterface
public interface ExecuteFunction {
void apply(Query query) throws SQLException;
}
| 8,192 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Functional interface for operations within a transactional context.
*
* @author mustafa
*/
@FunctionalInterface
public interface TransactionalFunction<R> {
R apply(Connection tx) throws SQLException;
}
| 8,193 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions that return results.
*
* @author mustafa
*/
@FunctionalInterface
public interface QueryFunction<R> {
R apply(Query query) throws SQLException;
}
| 8,194 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}.
*
* @author mustafa
*/
@FunctionalInterface
public interface ResultSetHandler<R> {
R apply(ResultSet resultSet) throws SQLException;
}
| 8,195 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.util.function.Supplier;
/** Functional class to support the lazy execution of a String result. */
public class LazyToString {
private final Supplier<String> supplier;
/**
* @param supplier Supplier to execute when {@link #toString()} is called.
*/
public LazyToString(Supplier<String> supplier) {
this.supplier = supplier;
}
@Override
public String toString() {
return supplier.get();
}
}
| 8,196 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.io.IOException;
import java.sql.Connection;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities.
*
* <p>This class simulates a parameter building pattern and all {@literal addParameter(*)} methods
* must be called in the proper order of their expected binding sequence.
*
* @author mustafa
*/
public class Query implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(getClass());
/** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */
protected final ObjectMapper objectMapper;
/** The initial supplied query String that was used to prepare {@link #statement}. */
private final String rawQuery;
/**
* Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a
* parameter is added to the {@code PreparedStatement} {@link #statement}.
*/
private final AtomicInteger index = new AtomicInteger(1);
/** The {@link PreparedStatement} that will be managed and executed by this class. */
private final PreparedStatement statement;
public Query(ObjectMapper objectMapper, Connection connection, String query) {
this.rawQuery = query;
this.objectMapper = objectMapper;
try {
this.statement = connection.prepareStatement(query);
} catch (SQLException ex) {
throw new NonTransientException(
"Cannot prepare statement for query: " + ex.getMessage(), ex);
}
}
/**
* Generate a String with {@literal count} number of '?' placeholders for {@link
* PreparedStatement} queries.
*
* @param count The number of '?' chars to generate.
* @return a comma delimited string of {@literal count} '?' binding placeholders.
*/
public static String generateInBindings(int count) {
String[] questions = new String[count];
for (int i = 0; i < count; i++) {
questions[i] = "?";
}
return String.join(", ", questions);
}
public Query addParameter(final String value) {
return addParameterInternal((ps, idx) -> ps.setString(idx, value));
}
public Query addParameter(final int value) {
return addParameterInternal((ps, idx) -> ps.setInt(idx, value));
}
public Query addParameter(final boolean value) {
return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value)));
}
public Query addParameter(final long value) {
return addParameterInternal((ps, idx) -> ps.setLong(idx, value));
}
public Query addParameter(final double value) {
return addParameterInternal((ps, idx) -> ps.setDouble(idx, value));
}
public Query addParameter(Date date) {
return addParameterInternal((ps, idx) -> ps.setDate(idx, date));
}
public Query addParameter(Timestamp timestamp) {
return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp));
}
/**
* Serializes {@literal value} to a JSON string for persistence.
*
* @param value The value to serialize.
* @return {@literal this}
*/
public Query addJsonParameter(Object value) {
return addParameter(toJson(value));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link java.sql.Date}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addDateParameter(java.util.Date date) {
return addParameter(new Date(date.getTime()));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link
* java.sql.Timestamp}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addTimestampParameter(java.util.Date date) {
return addParameter(new Timestamp(date.getTime()));
}
/**
* Bind the given epoch millis to the PreparedStatement as a {@link java.sql.Timestamp}.
*
* @param epochMillis The epoch ms to create a new {@literal Timestamp} from.
* @return {@literal this}
*/
public Query addTimestampParameter(long epochMillis) {
return addParameter(new Timestamp(epochMillis));
}
/**
* Add a collection of primitive values at once, in the order of the collection.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the
* collection.
* @see #addParameters(Object...)
*/
public Query addParameters(Collection values) {
return addParameters(values.toArray());
}
/**
* Add many primitive values at once.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered.
*/
public Query addParameters(Object... values) {
for (Object v : values) {
if (v instanceof String) {
addParameter((String) v);
} else if (v instanceof Integer) {
addParameter((Integer) v);
} else if (v instanceof Long) {
addParameter((Long) v);
} else if (v instanceof Double) {
addParameter((Double) v);
} else if (v instanceof Boolean) {
addParameter((Boolean) v);
} else if (v instanceof Date) {
addParameter((Date) v);
} else if (v instanceof Timestamp) {
addParameter((Timestamp) v);
} else {
throw new IllegalArgumentException(
"Type "
+ v.getClass().getName()
+ " is not supported by automatic property assignment");
}
}
return this;
}
/**
* Utility method for evaluating the prepared statement as a query to check the existence of a
* record using a numeric count or boolean return value.
*
* <p>The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result.
*
* @return {@literal true} If a count query returned more than 0 or an exists query returns
* {@literal true}.
* @throws NonTransientException If an unexpected return type cannot be evaluated to a {@code
* Boolean} result.
*/
public boolean exists() {
Object val = executeScalar();
if (null == val) {
return false;
}
if (val instanceof Number) {
return convertLong(val) > 0;
}
if (val instanceof Boolean) {
return (Boolean) val;
}
if (val instanceof String) {
return convertBoolean(val);
}
throw new NonTransientException(
"Expected a Numeric or Boolean scalar return value from the query, received "
+ val.getClass().getName());
}
/**
* Convenience method for executing delete statements.
*
* @return {@literal true} if the statement affected 1 or more rows.
* @see #executeUpdate()
*/
public boolean executeDelete() {
int count = executeUpdate();
if (count > 1) {
logger.trace("Removed {} row(s) for query {}", count, rawQuery);
}
return count > 0;
}
/**
* Convenience method for executing statements that return a single numeric value, typically
* {@literal SELECT COUNT...} style queries.
*
* @return The result of the query as a {@literal long}.
*/
public long executeCount() {
return executeScalar(Long.class);
}
/**
* @return The result of {@link PreparedStatement#executeUpdate()}
*/
public int executeUpdate() {
try {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
final int val = this.statement.executeUpdate();
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery);
}
return val;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a query from the PreparedStatement and return the ResultSet.
*
* <p><em>NOTE:</em> The returned ResultSet must be closed/managed by the calling methods.
*
* @return {@link PreparedStatement#executeQuery()}
* @throws NonTransientException If any SQL errors occur.
*/
public ResultSet executeQuery() {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
try {
return this.statement.executeQuery();
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}", (end - start), rawQuery);
}
}
}
/**
* @return The single result of the query as an Object.
*/
public Object executeScalar() {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
return null;
}
return rs.getObject(1);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a single 'primitive' value from the ResultSet.
*
* @param returnType The type to return.
* @param <V> The type parameter to return a List of.
* @return A single result from the execution of the statement, as a type of {@literal
* returnType}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> V executeScalar(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
Object value = null;
if (Integer.class == returnType) {
value = 0;
} else if (Long.class == returnType) {
value = 0L;
} else if (Boolean.class == returnType) {
value = false;
}
return returnType.cast(value);
} else {
return getScalarFromResultSet(rs, returnType);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeScalarList(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> values = new ArrayList<>();
while (rs.next()) {
values.add(getScalarFromResultSet(rs, returnType));
}
return values;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the statement and return only the first record from the result set.
*
* @param returnType The Class to return.
* @param <V> The type parameter.
* @return An instance of {@literal <V>} from the result set.
*/
public <V> V executeAndFetchFirst(Class<V> returnType) {
Object o = executeScalar();
if (null == o) {
return null;
}
return convert(o, returnType);
}
/**
* Execute the PreparedStatement and return a List of {@literal returnType} values from the
* ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeAndFetch(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> list = new ArrayList<>();
while (rs.next()) {
list.add(convert(rs.getObject(1), returnType));
}
return list;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the query and pass the {@link ResultSet} to the given handler.
*
* @param handler The {@link ResultSetHandler} to execute.
* @param <V> The return type of this method.
* @return The results of {@link ResultSetHandler#apply(ResultSet)}.
*/
public <V> V executeAndFetch(ResultSetHandler<V> handler) {
try (ResultSet rs = executeQuery()) {
return handler.apply(rs);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
@Override
public void close() {
try {
if (null != statement && !statement.isClosed()) {
statement.close();
}
} catch (SQLException ex) {
logger.warn("Error closing prepared statement: {}", ex.getMessage());
}
}
protected final Query addParameterInternal(InternalParameterSetter setter) {
int index = getAndIncrementIndex();
try {
setter.apply(this.statement, index);
return this;
} catch (SQLException ex) {
throw new NonTransientException("Could not apply bind parameter at index " + index, ex);
}
}
protected <V> V getScalarFromResultSet(ResultSet rs, Class<V> returnType) throws SQLException {
Object value = null;
if (Integer.class == returnType) {
value = rs.getInt(1);
} else if (Long.class == returnType) {
value = rs.getLong(1);
} else if (String.class == returnType) {
value = rs.getString(1);
} else if (Boolean.class == returnType) {
value = rs.getBoolean(1);
} else if (Double.class == returnType) {
value = rs.getDouble(1);
} else if (Date.class == returnType) {
value = rs.getDate(1);
} else if (Timestamp.class == returnType) {
value = rs.getTimestamp(1);
} else {
value = rs.getObject(1);
}
if (null == value) {
throw new NullPointerException(
"Cannot get value from ResultSet of type " + returnType.getName());
}
return returnType.cast(value);
}
protected <V> V convert(Object value, Class<V> returnType) {
if (Boolean.class == returnType) {
return returnType.cast(convertBoolean(value));
} else if (Integer.class == returnType) {
return returnType.cast(convertInt(value));
} else if (Long.class == returnType) {
return returnType.cast(convertLong(value));
} else if (Double.class == returnType) {
return returnType.cast(convertDouble(value));
} else if (String.class == returnType) {
return returnType.cast(convertString(value));
} else if (value instanceof String) {
return fromJson((String) value, returnType);
}
final String vName = value.getClass().getName();
final String rName = returnType.getName();
throw new NonTransientException("Cannot convert type " + vName + " to " + rName);
}
protected Integer convertInt(Object value) {
if (null == value) {
return null;
}
if (value instanceof Integer) {
return (Integer) value;
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
return NumberUtils.toInt(value.toString());
}
protected Double convertDouble(Object value) {
if (null == value) {
return null;
}
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
return NumberUtils.toDouble(value.toString());
}
protected Long convertLong(Object value) {
if (null == value) {
return null;
}
if (value instanceof Long) {
return (Long) value;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
return NumberUtils.toLong(value.toString());
}
protected String convertString(Object value) {
if (null == value) {
return null;
}
if (value instanceof String) {
return (String) value;
}
return value.toString().trim();
}
protected Boolean convertBoolean(Object value) {
if (null == value) {
return null;
}
if (value instanceof Boolean) {
return (Boolean) value;
}
if (value instanceof Number) {
return ((Number) value).intValue() != 0;
}
String text = value.toString().trim();
return "Y".equalsIgnoreCase(text)
|| "YES".equalsIgnoreCase(text)
|| "TRUE".equalsIgnoreCase(text)
|| "T".equalsIgnoreCase(text)
|| "1".equalsIgnoreCase(text);
}
protected String toJson(Object value) {
if (null == value) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <V> V fromJson(String value, Class<V> returnType) {
if (null == value) {
return null;
}
try {
return objectMapper.readValue(value, returnType);
} catch (IOException ex) {
throw new NonTransientException(
"Could not convert JSON '" + value + "' to " + returnType.getName(), ex);
}
}
protected final int getIndex() {
return index.get();
}
protected final int getAndIncrementIndex() {
return index.getAndIncrement();
}
@FunctionalInterface
private interface InternalParameterSetter {
void apply(PreparedStatement ps, int idx) throws SQLException;
}
}
| 8,197 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.config;
import java.sql.SQLException;
import java.util.Optional;
import javax.sql.DataSource;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
import org.springframework.retry.RetryContext;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.mysql.dao.MySQLExecutionDAO;
import com.netflix.conductor.mysql.dao.MySQLMetadataDAO;
import com.netflix.conductor.mysql.dao.MySQLQueueDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.mysql.cj.exceptions.MysqlErrorNumbers.ER_LOCK_DEADLOCK;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(MySQLProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "mysql")
// Import the DataSourceAutoConfiguration when mysql database is selected.
// By default the datasource configuration is excluded in the main module.
@Import(DataSourceAutoConfiguration.class)
public class MySQLConfiguration {
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLMetadataDAO mySqlMetadataDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
MySQLProperties properties) {
return new MySQLMetadataDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLExecutionDAO mySqlExecutionDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource) {
return new MySQLExecutionDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLQueueDAO mySqlQueueDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource) {
return new MySQLQueueDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
public RetryTemplate mysqlRetryTemplate(MySQLProperties properties) {
SimpleRetryPolicy retryPolicy = new CustomRetryPolicy();
retryPolicy.setMaxAttempts(properties.getDeadlockRetryMax());
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(new NoBackOffPolicy());
return retryTemplate;
}
public static class CustomRetryPolicy extends SimpleRetryPolicy {
@Override
public boolean canRetry(final RetryContext context) {
final Optional<Throwable> lastThrowable =
Optional.ofNullable(context.getLastThrowable());
return lastThrowable
.map(throwable -> super.canRetry(context) && isDeadLockError(throwable))
.orElseGet(() -> super.canRetry(context));
}
private boolean isDeadLockError(Throwable throwable) {
SQLException sqlException = findCauseSQLException(throwable);
if (sqlException == null) {
return false;
}
return ER_LOCK_DEADLOCK == sqlException.getErrorCode();
}
private SQLException findCauseSQLException(Throwable throwable) {
Throwable causeException = throwable;
while (null != causeException && !(causeException instanceof SQLException)) {
causeException = causeException.getCause();
}
return (SQLException) causeException;
}
}
}
| 8,198 |
0 | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql | Create_ds/conductor-community/persistence/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java | /*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.config;
import java.time.Duration;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.mysql")
public class MySQLProperties {
/** The time (in seconds) after which the in-memory task definitions cache will be refreshed */
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
private Integer deadlockRetryMax = 3;
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
public void setDeadlockRetryMax(Integer deadlockRetryMax) {
this.deadlockRetryMax = deadlockRetryMax;
}
}
| 8,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.