index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/RowSliceQuery.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import java.nio.ByteBuffer;
import java.util.Collection;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.Rows;
/**
* Interface to narrow down the path and column slices within a query after the
* keys were seleted using the ColumnFamilyQuery.
*/
public interface RowSliceQuery<K, C> extends Execution<Rows<K, C>> {
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
* @return
*/
RowSliceQuery<K, C> withColumnSlice(C... columns);
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
* @return
*/
RowSliceQuery<K, C> withColumnSlice(Collection<C> columns);
/**
* Use this when your application caches the column slice.
*
* @param slice
* @return
*/
RowSliceQuery<K, C> withColumnSlice(ColumnSlice<C> columns);
/**
* Specify a range of columns to return.
*
* @param startColumn
* First column in the range
* @param endColumn
* Last column in the range
* @param reversed
* True if the order should be reversed. Note that for reversed,
* startColumn should be greater than endColumn.
* @param count
* Maximum number of columns to return (similar to SQL LIMIT)
* @return
*/
RowSliceQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count);
/**
* Specify a range and provide pre-constructed start and end columns. Use
* this with Composite columns
*
* @param startColumn
* @param endColumn
* @param reversed
* @param count
* @return
*/
RowSliceQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count);
/**
* Specify a range of composite columns. Use this in conjunction with the
* AnnotatedCompositeSerializer.buildRange().
*
* @param range
* @return
*/
RowSliceQuery<K, C> withColumnRange(ByteBufferRange range);
/**
* Get column counts for the slice or range
* @return
*/
RowSliceColumnCountQuery<K> getColumnCounts();
}
| 7,900 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/ColumnFamilyQuery.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import java.util.Collection;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Top level column family query lets you choose the type of query being
* performed at the key level. Single key, key range or a key slice.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public interface ColumnFamilyQuery<K, C> {
/**
* Set the consistency level for this operations.
*
* @param consistencyLevel
*/
ColumnFamilyQuery<K, C> setConsistencyLevel(ConsistencyLevel consistencyLevel);
/**
* Set the retry policy to use instead of the default
*
* @param consistencyLevel
*/
ColumnFamilyQuery<K, C> withRetryPolicy(RetryPolicy retry);
/**
* Run the query on the specified host
*
* @param host
*/
ColumnFamilyQuery<K, C> pinToHost(Host host);
/**
* Query a single key
*
* @param rowKey
*/
RowQuery<K, C> getKey(K rowKey);
/**
* Query a single row
*
* @param rowKey
*/
RowQuery<K, C> getRow(K rowKey);
/**
* Query a range of keys. startKey and endKey cannot not be used with the
* RandomPartitioner.
*
* @param startKey
* @param endKey
* @param startToken
* @param endToken
* @param count
* Max number of keys to return
*/
RowSliceQuery<K, C> getKeyRange(K startKey, K endKey, String startToken, String endToken, int count);
/**
* Query a range of rows. startKey and endKey cannot not be used with the
* RandomPartitioner.
*
* @param startKey
* @param endKey
* @param startToken
* @param endToken
* @param count
* Max number of keys to return
*/
RowSliceQuery<K, C> getRowRange(K startKey, K endKey, String startToken, String endToken, int count);
/**
* Query a non-contiguous set of keys.
*
* @param keys
*/
RowSliceQuery<K, C> getKeySlice(K... keys);
/**
* Query a non-contiguous set of rows.
*
* @param keys
*/
RowSliceQuery<K, C> getRowSlice(K... keys);
/**
* Query a non-contiguous set of keys.
*
* @param keys
*/
RowSliceQuery<K, C> getKeySlice(Collection<K> keys);
/**
* Query a non-contiguous set of rows.
*
* @param keys
*/
RowSliceQuery<K, C> getRowSlice(Collection<K> keys);
/**
* Query a non-contiguous set of keys.
*
* @param keys
*/
RowSliceQuery<K, C> getKeySlice(Iterable<K> keys);
/**
* Query a non-contiguous set of rows.
*
* @param keys
*/
RowSliceQuery<K, C> getRowSlice(Iterable<K> keys);
/**
* Query to get an iterator to all rows in the column family
*/
AllRowsQuery<K, C> getAllRows();
/**
* Execute a CQL statement. Call this for creating a prepared CQL statements.
*
* withCql("...").prepareStatement().execute()
*
* @param cql
*/
CqlQuery<K, C> withCql(String cql);
/**
* Search for keys matching the provided index clause
*
* @param indexClause
*/
IndexQuery<K, C> searchWithIndex();
/**
* Enable/disable prepared statement caching for the query. Note that this interface is not applicable to the
* thrift implementation. It is mainly meant for driver impls that make use of PreparedStatements
* @param condition
* @return
*/
ColumnFamilyQuery<K, C> withCaching(boolean condition);
}
| 7,901 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/AllRowsQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.Collection;
import com.netflix.astyanax.ExceptionCallback;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.RowCallback;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.Rows;
/**
* Specialized query to iterate the contents of a column family.
*
* ColumnFamily<String, String> CF_STANDARD1 = new ColumnFamily<String,
* String>("Standard1", StringSerializer.get(), StringSerializer.get());
*
* Iterator<Row<String,String>> iter =
* keyspace.prepareQuery(MockConstants.CF_STANDARD1).iterator(); while
* (iter.hasNext()) { Row<String,String> row = iter.next(); LOG.info("ROW: " +
* row.getKey()); }
*
* The iterator is implemented by making 'paginated' queries to Cassandra with
* each query returning up to a the block size set by setBlockSize (default is
* 10). The incremental query is hidden from the caller thereby providing a
* virtual view into the column family.
*
* There are a few important implementation details that need to be considered.
* This implementation assumes the random partitioner is used. Consequently the
* KeyRange query is done using tokens and not row keys. This is done because
* when using the random partitioner tokens are sorted while keys are not.
* However, because multiple keys could potentially map to the same token each
* incremental query to Cassandra will repeat the last token from the previous
* response. This will ensure that no keys are skipped. This does however have
* to very important implications. First, the last and potentially more (if they
* have the same token) row keys from the previous response will repeat. Second,
* if a range of repeating tokens is larger than the block size then the code
* will enter an infinite loop. This can be mitigated by selecting a block size
* that is large enough so that the likelyhood of this happening is very low.
* Also, if your application can tolerate the potential for skipped row keys
* then call setRepeatLastToken(false) to turn off this features.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public interface AllRowsQuery<K, C> extends Execution<Rows<K, C>> {
/**
* @deprecated Use setRowLimit instead
*/
AllRowsQuery<K, C> setBlockSize(int blockSize);
/**
* Maximum number of rows to return for each incremental query to Cassandra.
* This limit also represents the page size when paginating.
*
* @param blockSize
*/
AllRowsQuery<K, C> setRowLimit(int rowLimit);
/**
* Sets the exception handler to use when handling exceptions inside
* Iterator.next(). This gives the caller a chance to implement a backoff
* strategy or stop the iteration.
*
* @param cb
*/
AllRowsQuery<K, C> setExceptionCallback(ExceptionCallback cb);
/**
* Use this checkpoint manager to keep track of progress as all rows are being iterated
* @param manager
*/
AllRowsQuery<K, C> setCheckpointManager(CheckpointManager manager);
/**
* If true will repeat the last token in the previous block.
*
* @param repeatLastToken
*/
AllRowsQuery<K, C> setRepeatLastToken(boolean repeatLastToken);
/**
* If set to false all empty rows will be filtered out internally.
* Default is false
*
* @param flag
*/
AllRowsQuery<K, C> setIncludeEmptyRows(boolean flag);
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
*/
AllRowsQuery<K, C> withColumnSlice(C... columns);
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
*/
AllRowsQuery<K, C> withColumnSlice(Collection<C> columns);
/**
* Use this when your application caches the column slice.
*
* @param slice
*/
AllRowsQuery<K, C> withColumnSlice(ColumnSlice<C> columns);
/**
* Specify a range of columns to return.
*
* @param startColumn
* First column in the range
* @param endColumn
* Last column in the range
* @param reversed
* True if the order should be reversed. Note that for reversed,
* startColumn should be greater than endColumn.
* @param count
* Maximum number of columns to return (similar to SQL LIMIT)
*/
AllRowsQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count);
/**
* Specify a range and provide pre-constructed start and end columns. Use
* this with Composite columns
*
* @param startColumn
* @param endColumn
* @param reversed
* @param count
*/
AllRowsQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count);
/**
* Specify a range of composite columns. Use this in conjunction with the
* AnnotatedCompositeSerializer.buildRange().
*
* @param range
*/
AllRowsQuery<K, C> withColumnRange(ByteBufferRange range);
/**
* Split the query into N threads with each thread processing an equal size chunk from the token range.
*
* Note that the actual number of threads is still limited by the available threads in the thread
* pool that was set with the AstyanaxConfiguration.
*
* @param numberOfThreads
*/
AllRowsQuery<K, C> setConcurrencyLevel(int numberOfThreads);
@Deprecated
AllRowsQuery<K, C> setThreadCount(int numberOfThreads);
/**
* Execute the operation in a separate thread for each token range and
* provide the results in a callback.
*
* @param predicate
* @throws ConnectionException
*/
void executeWithCallback(RowCallback<K, C> callback) throws ConnectionException;
/**
* Execute the operation on a specific token range, instead of the entire range.
* Use this only is combination with setConcurrencyLevel being called otherwise
* it currently will not have any effect on the query. When using forTokenRange
* the specified token range will still be split into the number of threads
* specified by setConcurrencyLevel
*
* @param startToken
* @param endToken
*/
AllRowsQuery<K, C> forTokenRange(BigInteger startToken, BigInteger endToken);
AllRowsQuery<K, C> forTokenRange(String startToken, String endToken);
}
| 7,902 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/CqlQuery.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.model.CqlResult;
/**
* Interface for executing a CQL query.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public interface CqlQuery<K, C> extends Execution<CqlResult<K, C>> {
/**
* Turns on compression for the response
*
* @return
*/
CqlQuery<K, C> useCompression();
/**
* Prepares the provided CQL statement. The statement is not executed
* here. Call, withPreparedStatement to execute the prepared statement
* with variables.
* @return
*/
PreparedCqlQuery<K,C> asPreparedStatement();
}
| 7,903 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/RowQuery.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import java.nio.ByteBuffer;
import java.util.Collection;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.RowCopier;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ColumnSlice;
/**
* Interface to narrow down the path and column slices within a query after the
* keys were seleted using the ColumnFamilyQuery.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public interface RowQuery<K, C> extends Execution<ColumnList<C>> {
/**
* Specify the path to a single column (either Standard or Super). Notice
* that the sub column type and serializer will be used now.
*
* @param <C2>
* @param path
*/
ColumnQuery<C> getColumn(C column);
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
*/
RowQuery<K, C> withColumnSlice(Collection<C> columns);
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
*/
RowQuery<K, C> withColumnSlice(C... columns);
/**
* Use this when your application caches the column slice.
*
* @param slice
*/
RowQuery<K, C> withColumnSlice(ColumnSlice<C> columns);
/**
* Specify a range of columns to return. Use this for simple ranges for
* non-composite column names. For Composite column names use
* withColumnRange(ByteBufferRange range) and the
* AnnotatedCompositeSerializer.buildRange()
*
* @param startColumn
* First column in the range
* @param endColumn
* Last column in the range
* @param reversed
* True if the order should be reversed. Note that for reversed,
* startColumn should be greater than endColumn.
* @param count
* Maximum number of columns to return (similar to SQL LIMIT)
*/
RowQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count);
/**
* Specify a range and provide pre-constructed start and end columns. Use
* this with Composite columns
*
* @param startColumn
* @param endColumn
* @param reversed
* @param count
*/
RowQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count);
/**
* Specify a range of composite columns. Use this in conjunction with the
* AnnotatedCompositeSerializer.buildRange().
*
* @param range
*/
RowQuery<K, C> withColumnRange(ByteBufferRange range);
@Deprecated
/**
* Use autoPaginate instead
*/
RowQuery<K, C> setIsPaginating();
/**
* When used in conjunction with a column range this will call subsequent
* calls to execute() to get the next block of columns.
*/
RowQuery<K, C> autoPaginate(boolean enabled);
/**
* Copy the results of the query to another column family
*
* @param columnFamily
* @param otherRowKey
*/
RowCopier<K, C> copyTo(ColumnFamily<K, C> columnFamily, K rowKey);
/**
* Returns the number of columns in the response without returning any data
* @throws ConnectionException
*/
ColumnCountQuery getCount();
}
| 7,904 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/PreparedCqlQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.UUID;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.CqlResult;
/**
* Interface for specifying parameters on a prepared CQL query.
*
* Values must be specified in the order that they were defined in the query.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public interface PreparedCqlQuery<K, C> extends Execution<CqlResult<K, C>> {
/**
* Specify a value of custom type for which a convenience method does not exist
* @param value
* @param serializer
*/
<V> PreparedCqlQuery<K, C> withByteBufferValue(V value, Serializer<V> serializer);
/**
* Set the next parameter value to this ByteBuffer
* @param value
*/
PreparedCqlQuery<K, C> withValue(ByteBuffer value);
/**
* Add a list of ByteBuffer values
* @param value
*/
PreparedCqlQuery<K, C> withValues(List<ByteBuffer> value);
/**
* Set the next parameter value to this String
* @param value
*/
PreparedCqlQuery<K, C> withStringValue(String value);
/**
* Set the next parameter value to this Integer
* @param value
*/
PreparedCqlQuery<K, C> withIntegerValue(Integer value);
/**
* Set the next parameter value to this Boolean
* @param value
*/
PreparedCqlQuery<K, C> withBooleanValue(Boolean value);
/**
* Set the next parameter value to this Double
* @param value
*/
PreparedCqlQuery<K, C> withDoubleValue(Double value);
/**
* Set the next parameter value to this Long
* @param value
*/
PreparedCqlQuery<K, C> withLongValue(Long value);
/**
* Set the next parameter value to this Float
* @param value
*/
PreparedCqlQuery<K, C> withFloatValue(Float value);
/**
* Set the next parameter value to this Short
* @param value
*/
PreparedCqlQuery<K, C> withShortValue(Short value);
/**
* Set the next parameter value to this Short
* @param value
*/
PreparedCqlQuery<K, C> withUUIDValue(UUID value);
}
| 7,905 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/IndexOperator.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
public enum IndexOperator {
GT, LT, GTE, LTE, EQ
}
| 7,906 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/AbstractPreparedCqlQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.UUID;
import com.google.common.collect.Lists;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
public abstract class AbstractPreparedCqlQuery<K, C> implements PreparedCqlQuery<K, C> {
private List<ByteBuffer> values = Lists.newArrayList();
protected List<ByteBuffer> getValues() {
return values;
}
@Override
public <V> PreparedCqlQuery<K, C> withByteBufferValue(V value, Serializer<V> serializer) {
return withValue(serializer.toByteBuffer(value));
}
@Override
public PreparedCqlQuery<K, C> withValue(ByteBuffer value) {
values.add(value);
return this;
}
@Override
public PreparedCqlQuery<K, C> withValues(List<ByteBuffer> values) {
this.values.addAll(values);
return this;
}
@Override
public PreparedCqlQuery<K, C> withStringValue(String value) {
return withByteBufferValue(value, StringSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withIntegerValue(Integer value) {
return withByteBufferValue(value, IntegerSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withBooleanValue(Boolean value) {
return withByteBufferValue(value, BooleanSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withDoubleValue(Double value) {
return withByteBufferValue(value, DoubleSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withLongValue(Long value) {
return withByteBufferValue(value, LongSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withFloatValue(Float value) {
return withByteBufferValue(value, FloatSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withShortValue(Short value) {
return withByteBufferValue(value, ShortSerializer.get());
}
@Override
public PreparedCqlQuery<K, C> withUUIDValue(UUID value) {
return withByteBufferValue(value, UUIDSerializer.get());
}
}
| 7,907 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/RowSliceColumnCountQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import java.util.Map;
import com.netflix.astyanax.Execution;
/**
* Interface for an operation to get the column count for a row slice or range
* @author elandau
*
* @param <K>
*/
public interface RowSliceColumnCountQuery<K> extends Execution<Map<K, Integer>> {
}
| 7,908 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/ColumnPredicate.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import com.netflix.astyanax.model.Equality;
public class ColumnPredicate {
private String name;
private Equality op;
private Object value;
public String getName() {
return name;
}
public Equality getOp() {
return op;
}
public Object getValue() {
return value;
}
public ColumnPredicate setName(String name) {
this.name = name;
return this;
}
public ColumnPredicate setOp(Equality op) {
this.op = op;
return this;
}
public ColumnPredicate setValue(Object value) {
this.value = value;
return this;
}
@Override
public String toString() {
return "ColumnPredicate [name=" + name + ", op=" + op + ", value="
+ value + "]";
}
}
| 7,909 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/CheckpointManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.query;
import java.util.SortedMap;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
/**
* Interface for tracking checkpoints for a getAllRows query.
* The entire token range is split into a sorted set of start tokens. Each start token is
* mapped to a checkpoint with the following possible values
* 1. startToken - start of the token range
* 2. nextToken - the checkpoint equals the next token in the sorted set of start tokens. This means the range is done
* 3. > startToken AND < nextToken - a valid checkpoint
*
* @author elandau
*
*/
public interface CheckpointManager {
/**
* Track the checkpoint for a specific range
*
* @param startToken
* @param checkpointToken
* @throws Exception
*/
void trackCheckpoint(String startToken, String checkpointToken) throws Exception;
/**
* Get the next checkpoint after the specified token. Will return null if no checkpoint was set.
*/
String getCheckpoint(String startToken) throws Exception ;
/**
* Return a sorted map of start tokens to their checkpoint
* @throws ConnectionException
*/
SortedMap<String, String> getCheckpoints() throws Exception;
}
| 7,910 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/IndexColumnExpression.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
public interface IndexColumnExpression<K, C> {
/**
* Set the column part of the expression
*
* @param columnName
* @return
*/
IndexOperationExpression<K, C> whereColumn(C columnName);
}
| 7,911 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/ColumnCountQuery.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import com.netflix.astyanax.Execution;
public interface ColumnCountQuery extends Execution<Integer> {
}
| 7,912 |
0 | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cassandra/src/main/java/com/netflix/astyanax/query/IndexValueExpression.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.query;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import com.netflix.astyanax.Serializer;
public interface IndexValueExpression<K, C> {
IndexQuery<K, C> value(String value);
IndexQuery<K, C> value(long value);
IndexQuery<K, C> value(int value);
IndexQuery<K, C> value(boolean value);
IndexQuery<K, C> value(Date value);
IndexQuery<K, C> value(byte[] value);
IndexQuery<K, C> value(ByteBuffer value);
IndexQuery<K, C> value(double value);
IndexQuery<K, C> value(UUID value);
<V> IndexQuery<K, C> value(V value, Serializer<V> valueSerializer);
}
| 7,913 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/CountingQueueStats.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class CountingQueueStats implements MessageQueueStats {
private AtomicLong emptyPartitionCount = new AtomicLong();
private AtomicLong lockContentionCount = new AtomicLong();
private AtomicLong eventProcessCount = new AtomicLong();
private AtomicLong eventReprocessCount = new AtomicLong();
private AtomicLong expiredLockCount = new AtomicLong();
private AtomicLong ackMessageCount = new AtomicLong();
private AtomicLong sendMessageCount = new AtomicLong();
private AtomicLong invalidTaskCount = new AtomicLong();
private AtomicLong persistErrorCount = new AtomicLong();
@Override
public void incEmptyPartitionCount() {
emptyPartitionCount.incrementAndGet();
}
@Override
public void incLockContentionCount() {
lockContentionCount.incrementAndGet();
}
@Override
public void incProcessCount() {
eventProcessCount.incrementAndGet();
}
@Override
public void incReprocessCount() {
eventReprocessCount.incrementAndGet();
}
@Override
public void incExpiredLockCount() {
expiredLockCount.incrementAndGet();
}
@Override
public void incSendMessageCount() {
sendMessageCount.incrementAndGet();
}
@Override
public void incAckMessageCount() {
ackMessageCount.incrementAndGet();
}
@Override
public void incInvalidMessageCount() {
invalidTaskCount.incrementAndGet();
}
@Override
public void incPersistError() {
persistErrorCount.incrementAndGet();
}
@Override
public long getEmptyPartitionCount() {
return this.emptyPartitionCount.get();
}
@Override
public long getLockCountentionCount() {
return this.lockContentionCount.get();
}
@Override
public long getProcessCount() {
return this.eventProcessCount.get();
}
@Override
public long getReprocessCount() {
return this.eventReprocessCount.get();
}
@Override
public long getExpiredLockCount() {
return this.expiredLockCount.get();
}
@Override
public long getAckMessageCount() {
return this.ackMessageCount.get();
}
@Override
public long getSendMessageCount() {
return this.sendMessageCount.get();
}
@Override
public long getInvalidMessageCount() {
return this.invalidTaskCount.get();
}
@Override
public long getPersistErrorCount() {
return this.persistErrorCount.get();
}
@Override
public String toString() {
return "CountingQueueStats [empty=" + emptyPartitionCount.get()
+ ", cont=" + lockContentionCount.get()
+ ", ok=" + eventProcessCount .get()
+ ", redo=" + eventReprocessCount.get()
+ ", exp=" + expiredLockCount .get()
+ ", released=" + ackMessageCount .get()
+ ", new=" + sendMessageCount .get()
+ ", invalid=" + invalidTaskCount + "]";
}
}
| 7,914 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/Message.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.codehaus.jackson.annotate.JsonIgnore;
import com.google.common.collect.Maps;
import com.netflix.astyanax.recipes.queue.triggers.Trigger;
import com.netflix.astyanax.util.TimeUUIDUtils;
public class Message {
private static final int DEFAULT_TIMEOUT_SECONDS = 120;
/**
* Last execution time, this value changes as the task state is transitioned.
* The token is a timeUUID and represents the next execution/expiration time
* within the queue.
*/
private UUID token;
/**
* Random number associated with this message
*/
private UUID random;
/**
* Execution time for the task in milliseconds
*/
private Trigger trigger;
/**
* Map of message parameters that are stored with the queued item
*/
private Map<String, Object> parameters;
/**
* Lower value priority tasks get executed first
*/
private byte priority = 0;
/**
* Timeout value in seconds
*/
private int timeout = DEFAULT_TIMEOUT_SECONDS;
/**
* Unique key for this message.
*/
private String key;
/**
* Class name to handle this message
*/
private String taskClass;
/**
* Set to true if history should be maintained for each handling of the message
*/
private boolean isKeepHistory = false;
/**
* True if the key is expected to be unique
*/
private boolean hasUniqueKey = false;
/**
* Set to true if next trigger should be committed when the messages is
* popped as opposed to being sent when a messages is acked.
*/
private boolean isAutoCommitTrigger = false;
public Message() {
}
public Message(UUID token, UUID random) {
this.token = token;
this.random = random;
}
public UUID getToken() {
return token;
}
public Message setToken(UUID token) {
this.token = token;
return this;
}
/**
* Get the micros time encoded in the token
* @return
*/
@JsonIgnore
public long getTokenTime() {
return TimeUUIDUtils.getMicrosTimeFromUUID(token);
}
public UUID getRandom() {
return random;
}
public Message setRandom(UUID random) {
this.random = random;
return this;
}
public Trigger getTrigger() {
return trigger;
}
public Message setTrigger(Trigger trigger) {
this.trigger = trigger;
return this;
}
public boolean hasTrigger() {
return trigger != null;
}
public Map<String, Object> getParameters() {
return parameters;
}
public Message setParameters(Map<String, Object> parameters) {
this.parameters = parameters;
return this;
}
public Message addParameter(String key, Object value) {
if (this.parameters == null)
this.parameters = Maps.newHashMap();
this.parameters.put(key, value);
return this;
}
public byte getPriority() {
return priority;
}
public int getTimeout() {
return timeout;
}
public boolean hasTimeout() {
return timeout != 0;
}
/**
* Setting priority will NOT work correctly with a future trigger time due to
* internal data model implementations.
* @param priority
* @return
*/
public Message setPriority(byte priority) {
this.priority = priority;
return this;
}
public Message setTimeout(int timeout) {
this.timeout = timeout;
return this;
}
public Message setTimeout(long timeout, TimeUnit units) {
this.timeout = (int)TimeUnit.SECONDS.convert(timeout, units);
return this;
}
public String getKey() {
return key;
}
public Message setKey(String key) {
this.key = key;
return this;
}
public Message setUniqueKey(String key) {
this.key = key;
this.hasUniqueKey = true;
return this;
}
public boolean hasKey() {
return this.key != null;
}
public boolean hasUniqueKey() {
return this.key != null && this.hasUniqueKey;
}
public String getTaskClass() {
return taskClass;
}
public Message setTaskClass(String taskClass) {
this.taskClass = taskClass;
return this;
}
public boolean hasTaskClass() {
return this.taskClass != null;
}
public boolean isKeepHistory() {
return isKeepHistory;
}
public Message setKeepHistory(Boolean isKeepHistory) {
this.isKeepHistory = isKeepHistory;
return this;
}
public Message clone() {
Message message = new Message();
message.token = token;
message.trigger = trigger;
message.parameters = parameters;
message.priority = priority;
message.timeout = timeout;
message.key = key;
message.taskClass = taskClass;
message.isKeepHistory = isKeepHistory;
return message;
}
public boolean isAutoCommitTrigger() {
return isAutoCommitTrigger;
}
public Message setAutoCommitTrigger(boolean isAutoCommitTrigger) {
this.isAutoCommitTrigger = isAutoCommitTrigger;
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Message[");
if (token != null) {
sb.append("token=" + token + " (" + TimeUUIDUtils.getMicrosTimeFromUUID(token) + ")");
}
if (random != null)
sb.append(", random=" + random);
if (trigger != null)
sb.append(", trigger=" + trigger);
if (parameters != null)
sb.append(", parameters=" + parameters);
sb.append(", priority=" + priority);
sb.append(", timeout=" + timeout);
if (key != null)
sb.append(", key=" + key);
if (hasUniqueKey)
sb.append(", hasUniqueKey=" + hasUniqueKey);
if (taskClass != null)
sb.append(", taskClass=" + taskClass);
if (isKeepHistory)
sb.append(", isKeepHistory=" + isKeepHistory);
sb.append("]");
return sb.toString();
}
}
| 7,915 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageHandlerFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import com.google.common.base.Function;
/**
* Abstraction for creating message handlers. Implementations of this class can
* be used to tie into dependency injection.
* @author elandau
*
*/
public interface MessageHandlerFactory {
Function<MessageContext, Boolean> createInstance(String className) throws Exception;
}
| 7,916 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageConsumer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.netflix.astyanax.recipes.locks.BusyLockException;
public interface MessageConsumer {
/**
* Acquire up to N items from the queue. Each item must be released by calling ackMessage.
*
* TODO: Items since last process time
*
* @param itemsToRead
* @return
* @throws InterruptedException
* @throws Exception
*/
List<MessageContext> readMessages(int itemsToRead) throws MessageQueueException, BusyLockException, InterruptedException;
/**
* Acquire up to N items from the queue. Each item must be released by calling ackMessage.
*
* @param itemsToRead
* @param timeout
* @param units
* @return
*/
List<MessageContext> readMessages(int itemsToRead, long timeout, TimeUnit units) throws MessageQueueException, BusyLockException, InterruptedException;
/**
* Read messages from a known shard
*
* @param shard
* @param itemsToRead
* @return
* @throws BusyLockException
* @throws MessageQueueException
*/
List<MessageContext> readMessagesFromShard(String shard, int itemsToRead) throws MessageQueueException, BusyLockException;
/**
* Peek into messages from the queue. The queue state is not altered by this operation.
* @param itemsToPop
* @return
* @throws MessageQueueException
*/
Collection<Message> peekMessages(int itemsToPop) throws MessageQueueException;
/**
* Release a job after completion
* @param item
* @throws Exception
*/
void ackMessage(MessageContext message) throws MessageQueueException;
/**
* Release a set of jobs
* @param items
*/
void ackMessages(Collection<MessageContext> messages) throws MessageQueueException;
/**
* Acknowledge the message as a poison message. This will put the message into
* a poison queue so it is persisted but does not interfere with the active queue.
*
* @param message
*/
void ackPoisonMessage(MessageContext message) throws MessageQueueException;
}
| 7,917 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueException.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public class MessageQueueException extends Exception {
private static final long serialVersionUID = 3917437309288808628L;
public MessageQueueException(String message) {
super(message);
}
public MessageQueueException(Throwable t) {
super(t);
}
public MessageQueueException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,918 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/SimpleMessageHandlerFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import com.google.common.base.Function;
public class SimpleMessageHandlerFactory implements MessageHandlerFactory {
@Override
public Function<MessageContext, Boolean> createInstance(String className) throws Exception {
return (Function<MessageContext, Boolean>)Class.forName(className).newInstance();
}
}
| 7,919 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageMetadataEntryType.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public enum MessageMetadataEntryType {
Lock,
Unique,
MessageId,
Field
}
| 7,920 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueHooks.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import com.netflix.astyanax.MutationBatch;
/**
* This interface provides a hook to piggyback on top of the executed mutation
* for each stage of processing
*
* @author elandau
*
*/
public interface MessageQueueHooks {
/**
* Called after tasks are read from the queue and before the mutation
* for updating their state is committed.
*
* @param messages
* @param mb
*/
void beforeAckMessages(Collection<Message> messages, MutationBatch mb);
/**
* Called before a task is released from the queue
*
* @param message
* @param mb
*/
void beforeAckMessage(Message message, MutationBatch mb);
/**
* Called before a task is inserted in the queue
* @param message
* @param mb
*/
void beforeSendMessage(Message message, MutationBatch mb);
}
| 7,921 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageStatus.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public enum MessageStatus {
WAITING,
RUNNING,
DONE,
FAILED,
SKIPPED
}
| 7,922 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageProducer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
public interface MessageProducer {
/**
* Schedule a job for execution
* @param message
* @return UUID assigned to this message
*
* @throws MessageQueueException
*/
String sendMessage(Message message) throws MessageQueueException;
/**
* Schedule a batch of jobs
* @param messages
* @return Map of messages to their assigned UUIDs
*
* @throws MessageQueueException
*/
SendMessageResponse sendMessages(Collection<Message> messages) throws MessageQueueException;
}
| 7,923 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.List;
public interface MessageQueueManager {
/**
* Create a new message queue
* @param name
* @return
*/
MessageQueue createMessageQueue(MessageQueueMetadata name);
/**
* Get an existing message queue
* @param name
* @return
*/
MessageQueue getMessageQueue(String name);
/**
* Delete a message queue
* @param name
*/
void deleteMessageQueue(String name);
/**
* List all message queues
* @return
*/
List<MessageQueueMetadata> listMessageQueues();
}
| 7,924 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageContext.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import org.apache.commons.lang.exception.ExceptionUtils;
/**
* Context of a message being handled by a dispatcher.
*
* @author elandau
*
*/
public class MessageContext {
/**
* Message being handled
*/
private Message message;
/**
* Next message that was queued up for processing
*/
private Message nextMessage;
/**
* MesasgeID used when acking
*/
private String ackMessageId;
/**
* History item associated with this message. This is only
* valid if message.hasKey() is true.
*/
private MessageHistory history = new MessageHistory();
public Message getMessage() {
return message;
}
public MessageContext setMessage(Message message) {
this.message = message;
return this;
}
public Message getNextMessage() {
return nextMessage;
}
public MessageContext setNextMessage(Message nextMessage) {
this.nextMessage = nextMessage;
return this;
}
public MessageHistory getHistory() {
return history;
}
public MessageContext setException(Throwable t) {
this.history.setStatus(MessageStatus.FAILED);
this.history.setError(t.getMessage());
this.history.setStackTrace(ExceptionUtils.getStackTrace(t));
return this;
}
public MessageContext setStatus(MessageStatus status) {
this.history.setStatus(status);
return this;
}
public String getAckMessageId() {
return ackMessageId;
}
public MessageContext setAckMessageId(String ackMessageId) {
this.ackMessageId = ackMessageId;
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("MessageContext [")
.append("ackMessageId=" + ackMessageId)
.append(", message=" + message)
.append(", nextMessage=" + nextMessage)
.append("]");
return sb.toString();
}
}
| 7,925 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueDispatcher.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Queues;
import com.netflix.astyanax.recipes.locks.BusyLockException;
/**
* The message queue dispatcher reads message from the message queue
* and dispatches to worker threads.
*
* @author elandau
*
*/
public class MessageQueueDispatcher {
private static final Logger LOG = LoggerFactory.getLogger(MessageQueueDispatcher.class);
public final static int DEFAULT_BATCH_SIZE = 5;
public final static int DEFAULT_POLLING_INTERVAL = 1000;
public final static int DEFAULT_THREAD_COUNT = 1;
public final static int DEFAULT_CONSUMER_COUNT = 1;
public final static int DEFAULT_ACK_SIZE = 100;
public final static int DEFAULT_ACK_INTERVAL = 100;
public final static int DEFAULT_BACKLOG_SIZE = 1000;
public static class Builder {
private final MessageQueueDispatcher dispatcher = new MessageQueueDispatcher();
/**
* Specify the message queue to use for this dispatcher
* @param messageQueue
* @return
*/
public Builder withMessageQueue(MessageQueue messageQueue) {
dispatcher.messageQueue = messageQueue;
return this;
}
/**
* Change the number of threads reading from the queue
*
* @param threadCount
* @deprecated Use withProcessorThreadCount
*/
public Builder withThreadCount(int threadCount) {
return withProcessorThreadCount(threadCount);
}
/**
* Specify number of threads that are processing events popped from the queue
* @param threadCount
* @return
*/
public Builder withProcessorThreadCount(int threadCount) {
dispatcher.processorThreadCount = threadCount;
return this;
}
/**
* Number of pending events to process in the backlog
* @param size
* @return
*/
public Builder withBacklogSize(int size) {
dispatcher.backlogSize = size;
return this;
}
/**
* Set the number of consumers that will be removing items from the
* queue. This value must be less than or equal to the thread count.
* @param consumerCount
* @return
*/
public Builder withConsumerCount(int consumerCount) {
dispatcher.consumerCount = consumerCount;
return this;
}
/**
* Number of 'triggers' to read from the queue in each call.
* Default is 1
* @param batchSize
*/
public Builder withBatchSize(int batchSize) {
dispatcher.batchSize = batchSize;
return this;
}
/**
* Flush the ack queue on this interval.
* @param interval
* @param units
*/
public Builder withAckInterval(long interval, TimeUnit units) {
dispatcher.ackInterval = TimeUnit.MILLISECONDS.convert(interval, units);
return this;
}
/**
* Interval for polling from the queue.
* @param interval
* @param units
*/
public Builder withPollingInterval(long interval, TimeUnit units) {
dispatcher.pollingInterval = TimeUnit.MILLISECONDS.convert(interval, units);
return this;
}
/**
* Callback to process messages. The callback is called from any of the internal processing
* threads and is therefore not thread safe.
* @param callback
* @return true to ack the message, false to not ack and cause the message to timeout
* Throw an exception to force the message to be added to the poison queue
*/
public Builder withCallback(Function<MessageContext, Boolean> callback) {
dispatcher.callback = callback;
return this;
}
/**
* Provide a message handler factory to use when creating tasks.
* @param factory
* @return
*/
public Builder withMessageHandlerFactory(MessageHandlerFactory factory) {
dispatcher.handlerFactory = factory;
return this;
}
public MessageQueueDispatcher build() {
Preconditions.checkArgument(dispatcher.consumerCount <= dispatcher.processorThreadCount, "consumerCounter must be <= threadCount");
dispatcher.initialize();
return dispatcher;
}
}
private int processorThreadCount = DEFAULT_THREAD_COUNT;
private int batchSize = DEFAULT_BATCH_SIZE;
private int consumerCount = DEFAULT_CONSUMER_COUNT;
private int ackSize = DEFAULT_ACK_SIZE;
private long ackInterval = DEFAULT_ACK_INTERVAL;
private int backlogSize = DEFAULT_BACKLOG_SIZE;
private long pollingInterval = DEFAULT_POLLING_INTERVAL;
private boolean terminate = false;
private MessageQueue messageQueue;
private ExecutorService executor;
private MessageConsumer ackConsumer;
private Function<MessageContext, Boolean> callback;
private MessageHandlerFactory handlerFactory;
private LinkedBlockingQueue<MessageContext> toAck = Queues.newLinkedBlockingQueue();
private LinkedBlockingQueue<MessageContext> toProcess = Queues.newLinkedBlockingQueue(500);
private MessageQueueDispatcher() {
}
private void initialize() {
Preconditions.checkNotNull(messageQueue, "Must specify message queue");
if (this.handlerFactory == null)
this.handlerFactory = new SimpleMessageHandlerFactory();
toProcess = Queues.newLinkedBlockingQueue(backlogSize);
}
public void start() {
executor = Executors.newScheduledThreadPool(processorThreadCount + consumerCount + 1);
startAckThread();
for (int i = 0; i < consumerCount; i++) {
startConsumer(i);
}
for (int i = 0; i < processorThreadCount; i++) {
startProcessor(i);
}
}
public void stop() {
terminate = true;
executor.shutdownNow();
}
private void startAckThread() {
ackConsumer = messageQueue.createConsumer();
executor.submit(new Runnable() {
@Override
public void run() {
String name = StringUtils.join(Lists.newArrayList(messageQueue.getName(), "Ack"), ":");
Thread.currentThread().setName(name);
while (!terminate) {
try {
List<MessageContext> messages = Lists.newArrayList();
toAck.drainTo(messages);
if (!messages.isEmpty()) {
try {
ackConsumer.ackMessages(messages);
} catch (MessageQueueException e) {
toAck.addAll(messages);
LOG.warn("Failed to ack consumer", e);
}
}
}
catch (Throwable t) {
LOG.info("Error acking messages", t);
}
try {
Thread.sleep(ackInterval);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
}
}
});
}
private void startConsumer(final int id) {
executor.submit(new Runnable() {
@Override
public void run() {
String name = StringUtils.join(Lists.newArrayList(messageQueue.getName(), "Consumer", Integer.toString(id)), ":");
Thread.currentThread().setName(name);
// Create the consumer context
final MessageConsumer consumer = messageQueue.createConsumer();
while (!terminate) {
// Process events in a tight loop, until asked to terminate
Collection<MessageContext> messages = null;
try {
messages = consumer.readMessages(batchSize);
if (messages.isEmpty()) {
Thread.sleep(pollingInterval);
}
else {
for (MessageContext context : messages) {
toProcess.put(context);
}
}
}
catch (BusyLockException e) {
try {
Thread.sleep(pollingInterval);
} catch (InterruptedException e1) {
Thread.interrupted();
return;
}
}
catch (Throwable t) {
LOG.warn("Error consuming messages ", t);
}
}
}
});
}
private void startProcessor(final int id) {
executor.submit(new Runnable() {
@Override
public void run() {
String name = StringUtils.join(Lists.newArrayList(messageQueue.getName(), "Processor", Integer.toString(id)), ":");
Thread.currentThread().setName(name);
LOG.info("Starting message processor : " + name);
try {
while (!terminate) {
// Pop a message off the queue, blocking if empty
final MessageContext context;
try {
context = toProcess.take();
if (context == null)
continue;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
// Process the message
Message message = context.getMessage();
try {
// Message has a specific handler class
if (message.getTaskClass() != null) {
@SuppressWarnings("unchecked")
Function<MessageContext, Boolean> task = handlerFactory.createInstance(message.getTaskClass());
if (task.apply(context)) {
toAck.add(context);
}
continue;
}
// Use default callback
if (callback.apply(context)) {
context.setStatus(MessageStatus.DONE);
toAck.add(context);
continue;
}
}
catch (Throwable t) {
context.setException(t);
toAck.add(context);
LOG.error("Error processing message " + message.getKey(), t);
// try {
// ackConsumer.ackPoisonMessage(context);
// } catch (MessageQueueException e) {
// LOG.warn("Failed to ack poison message " + message.getKey(), e);
// }
}
}
}
catch (Throwable t) {
LOG.error("Error running producer : " + name, t);
}
}
});
}
}
| 7,926 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/ShardLockManager.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import com.netflix.astyanax.recipes.locks.BusyLockException;
/**
* Interface for a queue shard lock manager.
*
* @author pbhattacharyya
*/
public interface ShardLockManager {
ShardLock acquireLock(String shardName) throws BusyLockException;
void releaseLock(ShardLock lock);
}
| 7,927 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueStats.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public interface MessageQueueStats {
void incEmptyPartitionCount();
void incLockContentionCount();
void incProcessCount();
void incReprocessCount();
void incExpiredLockCount();
void incSendMessageCount();
void incAckMessageCount();
void incInvalidMessageCount();
void incPersistError();
/**
* Number of shards that were empty when read. This is normal and
* a high number can indicate that the pooling interval is too
* low or that there are too many shards in the queue.
* @return
*/
long getEmptyPartitionCount();
/**
* Number of lock contention events
* @return
*/
long getLockCountentionCount();
/**
* Number of messages consumed
* @return
*/
long getProcessCount();
/**
* Number of timed out messages.
* @return
*/
long getReprocessCount();
/**
* Number of expired locks found on a queue shard. An expired lock indicates
* that a client crashed before it could unlock a shard when popping events.
* @return
*/
long getExpiredLockCount();
/**
* Number of message acks being sent
* @return
*/
long getAckMessageCount();
/**
* Number of messages send by this producer. This is not a global total
* number of messages ever sent on the queue.
* @return
*/
long getSendMessageCount();
/**
* Number of messages that contain invalid data such unfound task class
* @return
*/
long getInvalidMessageCount();
/**
* Number of storage errors trying to commit a change to the queue.
* This include popping and acking messages
* @return
*/
long getPersistErrorCount();
}
| 7,928 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueShardStats.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public interface MessageQueueShardStats {
public long getLastReadCount();
public long getReadCount();
public long getWriteCount();
}
| 7,929 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueShard.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.concurrent.atomic.AtomicLong;
/**
* Track the state of a partition
*
* @author elandau
*/
public class MessageQueueShard implements MessageQueueShardStats {
private volatile int lastCount = 0;
private final String name;
private final int partition;
private final int shard;
private final AtomicLong readCount = new AtomicLong();
private final AtomicLong writeCount = new AtomicLong();
public MessageQueueShard(String name, int partition, int shard) {
this.name = name;
this.partition = partition;
this.shard = shard;
}
public String getName() {
return name;
}
public void setLastCount(int count) {
this.lastCount = count;
this.readCount.addAndGet(count);
}
@Override
public long getReadCount() {
return this.readCount.get();
}
@Override
public long getWriteCount() {
return this.writeCount.get();
}
@Override
public long getLastReadCount() {
return this.lastCount;
}
public void incInsertCount(int count) {
this.writeCount.addAndGet(count);
}
public int getShard() {
return this.shard;
}
public int getPartition() {
return this.partition;
}
@Override
public String toString() {
return "Partition [lastCount=" + lastCount + ", name=" + name + ", partition=" + partition + ", shard=" + shard + "]";
}
} | 7,930 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueMetadata.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.concurrent.TimeUnit;
/**
* MessageQueueSettings settings that are persisted to cassandra
*/
public class MessageQueueMetadata {
public static final String DEFAULT_QUEUE_NAME = "Queue";
public static final Integer DEFAULT_RETENTION_TIMEOUT = null;
public static final int DEFAULT_SHARD_COUNT = 1;
public static final Long DEFAULT_BUCKET_DURATION = null;
public static final int DEFAULT_BUCKET_COUNT = 1;
public static final Integer DEFAULT_HISTORY_TTL = null;
@Deprecated
public static final long DEFAULT_POLL_WAIT = TimeUnit.MILLISECONDS.convert(100, TimeUnit.MILLISECONDS);
private Long partitionDuration = DEFAULT_BUCKET_DURATION;
private int partitionCount = DEFAULT_BUCKET_COUNT;
private Integer retentionTimeout = DEFAULT_RETENTION_TIMEOUT;
private int shardCount = DEFAULT_SHARD_COUNT;
private Integer historyTtl = DEFAULT_HISTORY_TTL;
private String queueName = DEFAULT_QUEUE_NAME;
@Deprecated
private long pollInterval = DEFAULT_POLL_WAIT;
public Long getPartitionDuration() {
return partitionDuration;
}
public int getPartitionCount() {
return partitionCount;
}
public Integer getRetentionTimeout() {
return retentionTimeout;
}
public int getShardCount() {
return shardCount;
}
public void setPartitionDuration(long partitionDuration) {
this.partitionDuration = partitionDuration;
}
public void setPartitionCount(int partitionCount) {
this.partitionCount = partitionCount;
}
public void setRetentionTimeout(Integer retentionTimeout) {
this.retentionTimeout = retentionTimeout;
}
public void setRetentionTimeout(Long retentionTimeout, TimeUnit units) {
this.retentionTimeout = (int)TimeUnit.SECONDS.convert(retentionTimeout, units);
}
public void setShardCount(int shardCount) {
this.shardCount = shardCount;
}
public Integer getHistoryTtl() {
return historyTtl;
}
public void setHistoryTtl(Integer historyTtl) {
this.historyTtl = historyTtl;
}
public String getQueueName() {
return queueName;
}
public void setQueueName(String queueName) {
this.queueName = queueName;
}
/**
* Define this on the ShardReaderPolicy
* @return
*/
@Deprecated
public long getPollInterval() {
return pollInterval;
}
/**
* Define this on the ShardReaderPolicy
* @return
*/
@Deprecated
public void setPollInterval(long pollInterval) {
this.pollInterval = pollInterval;
}
@Override
public String toString() {
return "MessageQueueSettings [partitionDuration=" + partitionDuration + ", partitionCount=" + partitionCount
+ ", retentionTimeout=" + retentionTimeout + ", shardCount=" + shardCount + ", historyTtl=" + historyTtl
+ ", queueName=" + queueName + ", pollInterval=" + pollInterval + "]";
}
}
| 7,931 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/ShardedDistributedMessageQueue.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang.StringUtils;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.annotate.JsonSerialize;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.Equality;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.recipes.queue.shard.ModShardPolicy;
import com.netflix.astyanax.recipes.queue.shard.ShardReaderPolicy;
import com.netflix.astyanax.recipes.queue.shard.TimeModShardPolicy;
import com.netflix.astyanax.recipes.queue.shard.TimePartitionedShardReaderPolicy;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.TimeUUIDSerializer;
import com.netflix.astyanax.util.RangeBuilder;
import com.netflix.astyanax.util.TimeUUIDUtils;
/**
* ShardedDistributedMessageQueue is a Cassandra backed client driven message queue.
*
* Key features
* 1. Time partition circular row key set used to time bound how much a wide row can grow. This,
* along with an aggressive gc_grace_seconds will give cassandra a chance to clear out the row
* before the clients cycle back to the time partition. Only one partition is active at any
* given time.
* 2. Mod sharding per partition based on message time. This solves the problem of lock contention
* on the acitve time partition.
* 3. Smart processing of partitions and shards to read mostly from the current time shard but allowing
* some cycle for processing older shards
* 4. Read-ack model of removing elements from the queue. As part of removing an element from the queue
* the client inserts a timeout message. Once the message has been processed the timeout message is removed
* from the queue. Otherwise it will be processed if it's time arrived and it is still in the queue.
* 5. Batch read of events
* 6. Batch insert of events
*
* Algorithm:
*
* Messages are stored as columns in an index where the columns are stored in time order. The time can
* be the current time for immediate execution or future time for recurring or scheduled messages.
* Jobs will be processed in time order.
*
* To achieve higher scalability the job queue (implemented as a row) is sharded by a user provided shard.
* Rows also implement a rolling time window which is used to alleviate tombstone pressure
*
* Enque:
*
* Deque:
* 1. Lock + read top N columns
* 2. Select M jobs to process
* Select jobs in <state> = scheduled
* If any jobs are marked as processing then delete and update their state
* 3. Release the lock with a mutation that has a
* delete for the columns being processed and
* insert with the same data but <state> = processing
* 4. Process the jobs
* 5. If the processing thread is about to enter a section which is not repeatable then update the column
* by changing the state to NotRepeatable.
* 6. Issue a delete for processed job
*
* Schema:
* RowKey: TimeBucket + Shard
* Column: <type><priority><timeuuid><state>
* Value: Job Data
*
* <type>
* 0 - Lock meta
* 1 - Queue item
* <state>
* 0 - Lock columns - There are special columns that are used to lock the row
* 1 - Scheduled
* 2 - Processing - timeuuid = timeout
* 3 - NotRepeatable - special indicator that tells the queue that the job is not replayble since there could
* be a persistence
*
*
* Recurring Messages:
*
* Column families:
* Queue
* KeyLookup
* History
*
* @author elandau
*
*/
public class ShardedDistributedMessageQueue implements MessageQueue {
private static final Logger LOG = LoggerFactory.getLogger(ShardedDistributedMessageQueue.class);
public static final char COMPOSITE_ID_DELIMITER = ':';
public static final char COMPOSITE_KEY_DELIMITER = '$';
public static final String DEFAULT_COLUMN_FAMILY_NAME = "Queues";
public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.CL_LOCAL_QUORUM;
public static final RetryPolicy DEFAULT_RETRY_POLICY = RunOnce.get();
public static final long DEFAULT_LOCK_TIMEOUT = TimeUnit.MICROSECONDS.convert(30, TimeUnit.SECONDS);
public static final Integer DEFAULT_LOCK_TTL = (int)TimeUnit.SECONDS.convert(2, TimeUnit.MINUTES);
public static final Integer DEFAULT_METADATA_DELETE_TTL = (int)TimeUnit.SECONDS.convert(2, TimeUnit.SECONDS);
public static final Boolean DEFAULT_POISON_QUEUE_ENABLED = false;
public static final String DEFAULT_QUEUE_SUFFIX = "_queue";
public static final String DEFAULT_METADATA_SUFFIX = "_metadata";
public static final String DEFAULT_HISTORY_SUFFIX = "_history";
public static final long SCHEMA_CHANGE_DELAY = 3000;
public static final ImmutableMap<String, Object> DEFAULT_COLUMN_FAMILY_SETTINGS = ImmutableMap.<String, Object>builder()
.put("read_repair_chance", 1.0)
.put("gc_grace_seconds", 5) // TODO: Calculate gc_grace_seconds
.put("compaction_strategy", "SizeTieredCompactionStrategy")
.build();
final static AnnotatedCompositeSerializer<MessageQueueEntry> entrySerializer
= new AnnotatedCompositeSerializer<MessageQueueEntry>(MessageQueueEntry.class);
final static AnnotatedCompositeSerializer<MessageMetadataEntry> metadataSerializer
= new AnnotatedCompositeSerializer<MessageMetadataEntry>(MessageMetadataEntry.class);
static final ObjectMapper mapper = new ObjectMapper();
{
mapper.getSerializationConfig().setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
mapper.enableDefaultTyping();
}
/**
*
* @author elandau
*/
public static class Builder {
private String columnFamilyName = DEFAULT_COLUMN_FAMILY_NAME;
private ShardLockManager lockManager;
private Keyspace keyspace;
private ConsistencyLevel consistencyLevel = DEFAULT_CONSISTENCY_LEVEL;
private long lockTimeout = DEFAULT_LOCK_TIMEOUT;
private int lockTtl = DEFAULT_LOCK_TTL;
private String queueName = MessageQueueMetadata.DEFAULT_QUEUE_NAME;
private int metadataDeleteTTL = DEFAULT_METADATA_DELETE_TTL;
private Collection<MessageQueueHooks> hooks = Lists.newArrayList();
private MessageQueueMetadata metadata = new MessageQueueMetadata();
private MessageQueueStats stats ;
private Boolean bPoisonQueueEnabled = DEFAULT_POISON_QUEUE_ENABLED;
private Map<String, Object> columnFamilySettings = DEFAULT_COLUMN_FAMILY_SETTINGS;
private ShardReaderPolicy.Factory shardReaderPolicyFactory;
private ModShardPolicy modShardPolicy;
public Builder() {
metadata.setQueueName(queueName);
}
public Builder withColumnFamily(String columnFamilyName) {
this.columnFamilyName = columnFamilyName;
return this;
}
public Builder withMetadata(MessageQueueMetadata metadata) {
this.metadata = metadata;
return this;
}
public Builder withShardCount(int count) {
this.metadata.setShardCount(count);
return this;
}
public Builder withTimeBuckets(int bucketCount, int bucketDuration, TimeUnit units) {
this.metadata.setPartitionDuration(TimeUnit.MICROSECONDS.convert(bucketDuration, units));
this.metadata.setPartitionCount(bucketCount);
return this;
}
/**
* @deprecated Use withTimeBuckets instead
*/
public Builder withBuckets(int bucketCount, int bucketDuration, TimeUnit units) {
return withTimeBuckets(bucketCount, bucketDuration, units);
}
public Builder withRetentionTimeout(Long timeout, TimeUnit units) {
this.metadata.setRetentionTimeout(timeout, units);
return this;
}
public Builder withLockTimeout(Long timeout, TimeUnit units) {
this.lockTimeout = TimeUnit.MICROSECONDS.convert(timeout, units);
return this;
}
public Builder withLockTtl(Long ttl, TimeUnit units) {
this.lockTtl = (int)TimeUnit.SECONDS.convert(ttl, units);
return this;
}
/**
* Define this on the ShardReaderPolicy instead
* @param internval
* @param units
* @return
*/
@Deprecated
public Builder withPollInterval(Long internval, TimeUnit units) {
this.metadata.setPollInterval(TimeUnit.MILLISECONDS.convert(internval, units));
return this;
}
public Builder withQueueName(String queueName) {
this.metadata.setQueueName(queueName);
return this;
}
public Builder withConsistencyLevel(ConsistencyLevel level) {
this.consistencyLevel = level;
return this;
}
public Builder withColumnFamilySettings(Map<String, Object> settings) {
this.columnFamilySettings = settings;
return this;
}
public Builder withKeyspace(Keyspace keyspace) {
this.keyspace = keyspace;
return this;
}
public Builder withStats(MessageQueueStats stats) {
this.stats = stats;
return this;
}
public Builder withHook(MessageQueueHooks hooks) {
this.hooks.add(hooks);
return this;
}
public Builder withHooks(Collection<MessageQueueHooks> hooks) {
this.hooks.addAll(hooks);
return this;
}
public Builder withPoisonQueue(Boolean enabled) {
this.bPoisonQueueEnabled = enabled;
return this;
}
public Builder withModShardPolicy(ModShardPolicy policy) {
this.modShardPolicy = policy;
return this;
}
public Builder withShardReaderPolicy(final ShardReaderPolicy shardReaderPolicy) {
this.shardReaderPolicyFactory = new ShardReaderPolicy.Factory() {
@Override
public ShardReaderPolicy create(MessageQueueMetadata metadata) {
return shardReaderPolicy;
}
};
return this;
}
public Builder withShardReaderPolicy(ShardReaderPolicy.Factory shardReaderPolicyFactory) {
this.shardReaderPolicyFactory = shardReaderPolicyFactory;
return this;
}
public Builder withShardLockManager(ShardLockManager mgr) {
this.lockManager = mgr;
return this;
}
public ShardedDistributedMessageQueue build() throws MessageQueueException {
Preconditions.checkArgument(
TimeUnit.SECONDS.convert(lockTimeout, TimeUnit.MICROSECONDS) < lockTtl,
"Timeout " + lockTtl + " seconds must be less than TTL " + TimeUnit.SECONDS.convert(lockTtl, TimeUnit.MICROSECONDS) + " seconds");
Preconditions.checkNotNull(keyspace, "Must specify keyspace");
if (shardReaderPolicyFactory == null)
shardReaderPolicyFactory = TimePartitionedShardReaderPolicy.Factory.builder().build();
if (modShardPolicy == null)
modShardPolicy = TimeModShardPolicy.getInstance();
if (stats == null)
stats = new CountingQueueStats();
return new ShardedDistributedMessageQueue(this);
}
}
// Immutable after configuration
final ShardLockManager lockManager;
final ColumnFamily<String, MessageQueueEntry> queueColumnFamily;
final ColumnFamily<String, MessageMetadataEntry> keyIndexColumnFamily;
final ColumnFamily<String, UUID> historyColumnFamily;
final Keyspace keyspace;
final ConsistencyLevel consistencyLevel;
final long lockTimeout;
final int lockTtl;
final int metadataDeleteTTL;
final Collection<MessageQueueHooks> hooks;
final MessageQueueMetadata metadata;
final Boolean bPoisonQueueEnabled;
final Map<String, Object> columnFamilySettings;
final ShardReaderPolicy shardReaderPolicy;
final ModShardPolicy modShardPolicy;
final Function<String, Message> invalidMessageHandler = new Function<String, Message>() {
@Override
public Message apply(String input) {
LOG.warn("Invalid message: " + input);
return null;
}
};
final MessageQueueStats stats;
final AtomicLong counter = new AtomicLong(new Random().nextInt(1000));
private ShardedDistributedMessageQueue(Builder builder) throws MessageQueueException {
this.queueColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_QUEUE_SUFFIX, StringSerializer.get(), entrySerializer);
this.keyIndexColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_METADATA_SUFFIX, StringSerializer.get(), metadataSerializer);
this.historyColumnFamily = ColumnFamily.newColumnFamily(builder.columnFamilyName + DEFAULT_HISTORY_SUFFIX, StringSerializer.get(), TimeUUIDSerializer.get());
this.consistencyLevel = builder.consistencyLevel;
this.keyspace = builder.keyspace;
this.hooks = builder.hooks;
this.modShardPolicy = builder.modShardPolicy;
this.lockManager = builder.lockManager;
this.lockTimeout = builder.lockTimeout;
this.lockTtl = builder.lockTtl;
this.bPoisonQueueEnabled = builder.bPoisonQueueEnabled;
this.metadata = builder.metadata;
this.columnFamilySettings = builder.columnFamilySettings;
this.metadataDeleteTTL = builder.metadataDeleteTTL;
this.stats = builder.stats;
this.shardReaderPolicy = builder.shardReaderPolicyFactory.create(metadata);
// try {
// Column<MessageQueueEntry> column = keyspace.prepareQuery(queueColumnFamily)
// .setConsistencyLevel(consistencyLevel)
// .getRow(getName())
// .getColumn(MessageQueueEntry.newMetadataEntry())
// .execute()
// .getResult();
//
// ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
// MessageQueueSettings existingSettings = mapper.readValue(bais, MessageQueueSettings.class);
//
// // TODO: Override some internal settings with those persisted in the queue metadata
// }
// catch (NotFoundException e) {
// LOG.info("Message queue metadata not found. Queue does not exist in CF and will be created now.");
// }
// catch (BadRequestException e) {
// if (e.isUnconfiguredColumnFamilyError()) {
// LOG.info("Column family does not exist. Call createStorage() to create column family.");
// }
// else {
// throw new MessageQueueException("Error getting message queue metadata", e);
// }
// }
// catch (Exception e) {
// throw new MessageQueueException("Error getting message queue metadata", e);
// }
}
/**
* Return the shard for this message
* @param message
* @return
*/
String getShardKey(Message message) {
return getShardKey(message.getTokenTime(), this.modShardPolicy.getMessageShard(message, metadata));
}
/**
* Return the shard for this timestamp
* @param messageTime
* @param modShard
* @return
*/
private String getShardKey(long messageTime, int modShard) {
long timePartition;
if (metadata.getPartitionDuration() != null)
timePartition = (messageTime / metadata.getPartitionDuration()) % metadata.getPartitionCount();
else
timePartition = 0;
return getName() + ":" + timePartition + ":" + modShard;
}
String getCompositeKey(String name, String key) {
return name + COMPOSITE_KEY_DELIMITER + key;
}
private static String[] splitCompositeKey(String key) throws MessageQueueException {
String[] parts = StringUtils.split(key, COMPOSITE_KEY_DELIMITER);
if (parts.length != 2) {
throw new MessageQueueException("Invalid key '" + key + "'. Expected format <queue|shard>$<name>. ");
}
return parts;
}
<T> String serializeToString(T trigger) throws JsonGenerationException, JsonMappingException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
mapper.writeValue(baos, trigger);
baos.flush();
return baos.toString();
}
private <T> T deserializeString(String data, Class<T> clazz) throws JsonParseException, JsonMappingException, IOException {
return (T) mapper.readValue(
new ByteArrayInputStream(data.getBytes()),
clazz);
}
@SuppressWarnings({ "unused", "unchecked" })
private <T> T deserializeString(String data, String className) throws JsonParseException, JsonMappingException, IOException, ClassNotFoundException {
return (T) mapper.readValue(
new ByteArrayInputStream(data.getBytes()),
Class.forName(className));
}
@Override
public String getName() {
return metadata.getQueueName();
}
@Override
public long getMessageCount() throws MessageQueueException {
Map<String, Integer> counts = getShardCounts();
long count = 0;
for (Integer value : counts.values()) {
count += value;
}
return count;
}
@Override
public Map<String, Integer> getShardCounts() throws MessageQueueException {
try {
List<String> keys = Lists.newArrayList();
for (int i = 0; i < metadata.getPartitionCount(); i++) {
for (int j = 0; j < metadata.getShardCount(); j++) {
keys.add(getName() + ":" + i + ":" + j);
}
}
Map<String, Integer> result = Maps.newTreeMap();
result.putAll(keyspace.prepareQuery(queueColumnFamily)
.getKeySlice(keys)
.getColumnCounts()
.execute()
.getResult());
return result;
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to get counts", e);
}
}
@Override
public void clearMessages() throws MessageQueueException {
LOG.info("Clearing messages from '" + getName() + "'");
MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
for (MessageQueueShard partition : shardReaderPolicy.listShards()) {
mb.withRow(queueColumnFamily, partition.getName()).delete();
}
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to clear messages from queue " + getName(), e);
}
}
@Override
public void deleteQueue() throws MessageQueueException {
LOG.info("Deleting queue '" + getName() + "'");
MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
for (MessageQueueShard partition : shardReaderPolicy.listShards()) {
mb.withRow(queueColumnFamily, partition.getName()).delete();
}
mb.withRow(queueColumnFamily, getName());
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to clear messages from queue " + getName(), e);
}
}
@Override
public Message peekMessage(String messageId) throws MessageQueueException {
String[] parts = splitCompositeKey(messageId);
String shardKey = parts[0];
MessageQueueEntry entry = new MessageQueueEntry(parts[1]);
try {
Column<MessageQueueEntry> column = keyspace
.prepareQuery(queueColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(shardKey)
.getColumn(entry)
.execute().getResult();
try {
ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
return mapper.readValue(bais, Message.class);
} catch (Exception e) {
LOG.warn("Error parsing message", e);
// Error parsing the message so we pass it on to the invalid message handler.
try {
return invalidMessageHandler.apply(column.getStringValue());
}
catch (Exception e2) {
LOG.warn("Error handling invalid message message", e2);
throw new MessageQueueException("Error parsing message " + messageId);
}
}
} catch (NotFoundException e) {
return null;
} catch (ConnectionException e) {
throw new MessageQueueException("Error getting message " + messageId, e);
}
}
@Override
public List<Message> peekMessagesByKey(String key) throws MessageQueueException {
String groupRowKey = getCompositeKey(getName(), key);
List<Message> messages = Lists.newArrayList();
try {
ColumnList<MessageMetadataEntry> columns = keyspace.prepareQuery(keyIndexColumnFamily)
.getRow(groupRowKey)
.withColumnRange(metadataSerializer.buildRange()
.greaterThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.lessThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.build()
)
.execute()
.getResult();
for (Column<MessageMetadataEntry> entry : columns) {
if (entry.getTtl() != 0)
continue;
Message message = peekMessage(entry.getName().getName());
if (message != null) {
messages.add(peekMessage(entry.getName().getName()));
}
else {
LOG.warn("No queue item for " + entry.getName());
}
}
} catch (NotFoundException e) {
} catch (ConnectionException e) {
throw new MessageQueueException("Error fetching row " + groupRowKey, e);
}
return messages;
}
@Override
public Message peekMessageByKey(String key) throws MessageQueueException {
String groupRowKey = getCompositeKey(getName(), key);
try {
ColumnList<MessageMetadataEntry> columns = keyspace.prepareQuery(keyIndexColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getRow(groupRowKey)
.withColumnRange(metadataSerializer.buildRange()
.greaterThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.lessThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.build()
)
.execute()
.getResult();
for (Column<MessageMetadataEntry> entry : columns) {
if (entry.getTtl() != 0)
continue;
// Return the first one we get. Hmmm... maybe we want to do some validation checks here
return peekMessage(entry.getName().getName());
}
return null;
} catch (NotFoundException e) {
return null;
} catch (ConnectionException e) {
throw new MessageQueueException("Error fetching row " + groupRowKey, e);
}
}
@Override
public boolean deleteMessageByKey(String key) throws MessageQueueException {
MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
String groupRowKey = getCompositeKey(getName(), key);
try {
ColumnList<MessageMetadataEntry> columns = keyspace.prepareQuery(keyIndexColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getRow(groupRowKey)
.withColumnRange(metadataSerializer.buildRange()
.greaterThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.lessThanEquals((byte)MessageMetadataEntryType.MessageId.ordinal())
.build()
)
.execute()
.getResult();
for (Column<MessageMetadataEntry> entry : columns) {
String[] parts = splitCompositeKey(entry.getName().getName());
String shardKey = parts[0];
MessageQueueEntry queueEntry = new MessageQueueEntry(parts[1]);
mb.withRow(queueColumnFamily, shardKey).deleteColumn(queueEntry);
}
mb.withRow(keyIndexColumnFamily, groupRowKey).delete();
} catch (NotFoundException e) {
return false;
} catch (ConnectionException e) {
throw new MessageQueueException("Error fetching row " + groupRowKey, e);
}
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Error deleting queue item " + groupRowKey, e);
}
return true;
}
@Override
public void deleteMessage(String messageId) throws MessageQueueException {
String[] parts = splitCompositeKey(messageId);
String shardKey = parts[0];
MessageQueueEntry entry = new MessageQueueEntry(parts[1]);
try {
keyspace.prepareColumnMutation(queueColumnFamily, shardKey, entry)
.setConsistencyLevel(consistencyLevel)
.deleteColumn().execute();
}
catch (ConnectionException e) {
throw new MessageQueueException("Error deleting message " + messageId, e);
}
}
@Override
public void deleteMessages(Collection<String> messageIds) throws MessageQueueException {
MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
for (String messageId : messageIds) {
String[] parts = splitCompositeKey(messageId);
String shardKey = parts[0];
MessageQueueEntry entry = new MessageQueueEntry(parts[1]);
mb.withRow(queueColumnFamily, shardKey)
.deleteColumn(entry);
}
try {
mb.execute();
}
catch (ConnectionException e) {
throw new MessageQueueException("Error deleting messages " + messageIds, e);
}
}
private void changeSchema(Callable<Void> callable) throws MessageQueueException {
for (int i = 0; i < 3; i++) {
try {
callable.call();
try {
Thread.sleep(SCHEMA_CHANGE_DELAY);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new MessageQueueException("Interrupted while trying to create column family for queue " + getName(), ie);
}
return;
} catch (Exception e) {
if (e.getClass().getSimpleName().equals("SchemaDisagreementException")){
// Check by class name since SchemaDisagreementException is defined in cassandra-thrift,
// but astayanx-cassandra should not have a Thrift-specific dependency.
try {
Thread.sleep(SCHEMA_CHANGE_DELAY);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new MessageQueueException("Interrupted while trying to create column family for queue " + getName(), ie);
}
}
if (e.getMessage().contains("already exist"))
return;
throw new MessageQueueException("Failed to create column family for " + queueColumnFamily.getName(), e);
}
}
}
@Override
public void createStorage() throws MessageQueueException {
changeSchema(new Callable<Void>() {
@Override
public Void call() throws Exception {
keyspace.createColumnFamily(queueColumnFamily, ImmutableMap.<String, Object>builder()
.put("key_validation_class", "UTF8Type")
.put("comparator_type", "CompositeType(BytesType, BytesType(reversed=true), TimeUUIDType, TimeUUIDType, BytesType)")
.putAll(columnFamilySettings)
.build());
return null;
}
});
changeSchema(new Callable<Void>() {
@Override
public Void call() throws Exception {
keyspace.createColumnFamily(keyIndexColumnFamily, ImmutableMap.<String, Object>builder()
.put("key_validation_class", "UTF8Type")
.put("comparator_type", "CompositeType(BytesType, UTF8Type)")
.putAll(columnFamilySettings)
.build());
return null;
}
});
changeSchema(new Callable<Void>() {
@Override
public Void call() throws Exception {
keyspace.createColumnFamily(historyColumnFamily, ImmutableMap.<String, Object>builder()
.put("default_validation_class", "UTF8Type")
.putAll(columnFamilySettings)
.build());
return null;
}
});
}
@Override
public void dropStorage() throws MessageQueueException {
try {
keyspace.dropColumnFamily(this.queueColumnFamily);
try {
Thread.sleep(SCHEMA_CHANGE_DELAY);
} catch (InterruptedException e) {
}
} catch (ConnectionException e) {
if (!e.getMessage().contains("already exist"))
throw new MessageQueueException("Failed to create column family for " + queueColumnFamily.getName(), e);
}
try {
keyspace.dropColumnFamily(this.keyIndexColumnFamily);
try {
Thread.sleep(SCHEMA_CHANGE_DELAY);
} catch (InterruptedException e) {
}
} catch (ConnectionException e) {
if (!e.getMessage().contains("already exist"))
throw new MessageQueueException("Failed to create column family for " + queueColumnFamily.getName(), e);
}
}
@Override
public void createQueue() throws MessageQueueException {
try {
// Convert the message object to JSON
ByteArrayOutputStream baos = new ByteArrayOutputStream();
mapper.writeValue(baos, metadata);
baos.flush();
keyspace.prepareColumnMutation(queueColumnFamily, getName(), MessageQueueEntry.newMetadataEntry())
.putValue(baos.toByteArray(), null)
.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to create column family for " + queueColumnFamily.getName(), e);
} catch (Exception e) {
throw new MessageQueueException("Error serializing queue settings " + queueColumnFamily.getName(), e);
}
}
@Override
public MessageConsumer createConsumer() {
return new MessageConsumerImpl(this);
}
@Override
public MessageProducer createProducer() {
return new MessageProducer() {
@Override
public String sendMessage(Message message) throws MessageQueueException {
SendMessageResponse response = sendMessages(Lists.newArrayList(message));
if (!response.getNotUnique().isEmpty())
throw new KeyExistsException("Key already exists ." + message.getKey());
return Iterables.getFirst(response.getMessages().entrySet(), null).getKey();
}
@Override
public SendMessageResponse sendMessages(Collection<Message> messages) throws MessageQueueException {
Map<String, Message> uniqueKeys = Maps.newHashMap();
Set<String> notUniqueKeys = Sets.newHashSet();
List<Message> notUniqueMessages = Lists.newArrayList();
MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
MessageMetadataEntry lockColumn = MessageMetadataEntry.newUnique();
// Get list of keys that must be unique and prepare the mutation for phase 1
for (Message message : messages) {
if (message.hasUniqueKey()) {
String groupKey = getCompositeKey(getName(), message.getKey());
uniqueKeys.put(groupKey, message);
mb.withRow(keyIndexColumnFamily, groupKey)
.putEmptyColumn(lockColumn, (Integer)lockTtl);
}
}
// We have some keys that need to be unique
if (!uniqueKeys.isEmpty()) {
// Submit phase 1: Create a unique column for ALL of the unique keys
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to check keys for uniqueness (1): " + uniqueKeys, e);
}
// Phase 2: Read back ALL the lock columms
mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
Rows<String, MessageMetadataEntry> result;
try {
result = keyspace.prepareQuery(keyIndexColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getRowSlice(uniqueKeys.keySet())
.withColumnRange(metadataSerializer.buildRange()
.greaterThanEquals((byte)MessageMetadataEntryType.Unique.ordinal())
.lessThanEquals((byte)MessageMetadataEntryType.Unique.ordinal())
.build())
.execute()
.getResult();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to check keys for uniqueness (2): " + uniqueKeys, e);
}
for (Row<String, MessageMetadataEntry> row : result) {
// This key is already taken, roll back the check
if (row.getColumns().size() != 1) {
String messageKey = splitCompositeKey(row.getKey())[1];
notUniqueKeys.add(messageKey);
notUniqueMessages.add(uniqueKeys.get(messageKey));
mb.withRow(keyIndexColumnFamily, row.getKey())
.deleteColumn(lockColumn);
}
// This key is now unique
else {
mb.withRow(keyIndexColumnFamily, row.getKey())
.putEmptyColumn(lockColumn);
}
}
}
// Commit the messages
Map<String, Message> success = Maps.newLinkedHashMap();
for (Message message : messages) {
if (message.hasKey() && notUniqueKeys.contains(message.getKey()))
continue;
String messageId = fillMessageMutation(mb, message);
success.put(messageId, message);
}
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to insert messages into queue.", e);
}
return new SendMessageResponse(success, notUniqueMessages);
}
};
}
String fillMessageMutation(MutationBatch mb, Message message) throws MessageQueueException {
// Get the execution time from the message or set to current time so it runs immediately
long curTimeMicros;
if (!message.hasTrigger()) {
curTimeMicros = TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
else {
curTimeMicros = TimeUnit.MICROSECONDS.convert(message.getTrigger().getTriggerTime(), TimeUnit.MILLISECONDS);
}
curTimeMicros += (counter.incrementAndGet() % 1000);
// Update the message for the new token
message.setToken(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros));
// Set up the queue entry
MessageQueueEntry entry = MessageQueueEntry.newMessageEntry(
message.getPriority(),
message.getToken(),
MessageQueueEntryState.Waiting);
// Convert the message object to JSON
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
mapper.writeValue(baos, message);
baos.flush();
} catch (Exception e) {
throw new MessageQueueException("Failed to serialize message data: " + message, e);
}
// Write the queue entry
String shardKey = getShardKey(message);
mb.withRow(queueColumnFamily, shardKey)
.putColumn(entry, new String(baos.toByteArray()), metadata.getRetentionTimeout());
// Write the lookup from queue key to queue entry
if (message.hasKey()) {
mb.withRow(keyIndexColumnFamily, getCompositeKey(getName(), message.getKey()))
.putEmptyColumn(MessageMetadataEntry.newMessageId(getCompositeKey(shardKey, entry.getMessageId())),
metadata.getRetentionTimeout());
}
// Allow hook processing
for (MessageQueueHooks hook : hooks) {
hook.beforeSendMessage(message, mb);
}
// Update state and retun the token
stats.incSendMessageCount();
return getCompositeKey(shardKey, entry.getMessageId());
}
/**
* Return history for a single key for the specified time range
*
* TODO: honor the time range :)
*/
@Override
public List<MessageHistory> getKeyHistory(String key, Long startTime, Long endTime, int count) throws MessageQueueException {
List<MessageHistory> list = Lists.newArrayList();
ColumnList<UUID> columns;
try {
columns = keyspace.prepareQuery(historyColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getRow(key)
.execute()
.getResult();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to load history for " + key, e);
}
for (Column<UUID> column : columns) {
try {
list.add(deserializeString(column.getStringValue(), MessageHistory.class));
} catch (Exception e) {
LOG.info("Error deserializing history entry", e);
}
}
return list;
}
/**
* Iterate through shards attempting to extract itemsToPeek items. Will return
* once itemToPeek items have been read or all shards have been checked.
*
* Note that this call does not take into account the message trigger time and
* will likely return messages that aren't due to be executed yet.
* @return List of items
*/
@Override
public List<Message> peekMessages(int itemsToPeek) throws MessageQueueException {
List<Message> messages = Lists.newArrayList();
for (MessageQueueShard shard : shardReaderPolicy.listShards()) {
messages.addAll(peekMessages(shard.getName(), itemsToPeek - messages.size()));
if (messages.size() == itemsToPeek)
return messages;
}
return messages;
}
/**
* Peek into messages contained in the shard. This call does not take trigger time into account
* and will return messages that are not yet due to be executed
* @param shardName
* @param itemsToPeek
* @return
* @throws MessageQueueException
*/
private Collection<Message> peekMessages(String shardName, int itemsToPeek) throws MessageQueueException {
try {
ColumnList<MessageQueueEntry> result = keyspace.prepareQuery(queueColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(shardName)
.withColumnRange(new RangeBuilder()
.setLimit(itemsToPeek)
.setStart(entrySerializer
.makeEndpoint((byte)MessageQueueEntryType.Message.ordinal(), Equality.GREATER_THAN_EQUALS)
.toBytes())
.setEnd(entrySerializer
.makeEndpoint((byte)MessageQueueEntryType.Message.ordinal(), Equality.LESS_THAN_EQUALS)
.toBytes())
.build())
.execute()
.getResult();
List<Message> messages = Lists.newArrayListWithCapacity(result.size());
for (Column<MessageQueueEntry> column : result) {
Message message = extractMessageFromColumn(column);
if (message != null)
messages.add(message);
}
return messages;
} catch (ConnectionException e) {
throw new MessageQueueException("Error peeking for messages from shard " + shardName, e);
}
}
/**
* Extract a message body from a column
* @param column
* @return
*/
Message extractMessageFromColumn(Column<MessageQueueEntry> column) {
// Next, parse the message metadata and add a timeout entry
Message message = null;
try {
ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
message = mapper.readValue(bais, Message.class);
} catch (Exception e) {
LOG.warn("Error processing message ", e);
try {
message = invalidMessageHandler.apply(column.getStringValue());
}
catch (Exception e2) {
LOG.warn("Error processing invalid message", e2);
}
}
return message;
}
/**
* Fast check to see if a shard has messages to process
* @param shardName
* @throws MessageQueueException
*/
private boolean hasMessages(String shardName) throws MessageQueueException {
UUID currentTime = TimeUUIDUtils.getUniqueTimeUUIDinMicros();
try {
ColumnList<MessageQueueEntry> result = keyspace.prepareQuery(queueColumnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(shardName)
.withColumnRange(new RangeBuilder()
.setLimit(1) // Read extra messages because of the lock column
.setStart(entrySerializer
.makeEndpoint((byte)MessageQueueEntryType.Message.ordinal(), Equality.EQUAL)
.toBytes()
)
.setEnd(entrySerializer
.makeEndpoint((byte)MessageQueueEntryType.Message.ordinal(), Equality.EQUAL)
.append((byte)0, Equality.EQUAL)
.append(currentTime, Equality.LESS_THAN_EQUALS)
.toBytes()
)
.build())
.execute()
.getResult();
return !result.isEmpty();
} catch (ConnectionException e) {
throw new MessageQueueException("Error checking shard for messages. " + shardName, e);
}
}
@Override
public Map<String, MessageQueueShardStats> getShardStats() {
return shardReaderPolicy.getShardStats();
}
public ShardReaderPolicy getShardReaderPolicy() {
return shardReaderPolicy;
}
public ColumnFamily<String, MessageQueueEntry> getQueueColumnFamily() {
return this.queueColumnFamily;
}
public ColumnFamily<String, MessageMetadataEntry> getKeyIndexColumnFamily() {
return this.keyIndexColumnFamily;
}
public ColumnFamily<String, UUID> getHistoryColumnFamily() {
return this.historyColumnFamily;
}
}
| 7,932 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageMetadataEntry.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import com.netflix.astyanax.annotations.Component;
import com.netflix.astyanax.util.TimeUUIDUtils;
public class MessageMetadataEntry {
/**
* Type of column. See MessageMetadataType
*/
@Component(ordinal=0)
private Byte type;
@Component(ordinal=1)
private String name;
public MessageMetadataEntry() {
}
public MessageMetadataEntry(MessageMetadataEntryType type, String name) {
this.type = (byte)type.ordinal();
this.name = name;
}
public Byte getType() {
return type;
}
public MessageMetadataEntryType getMetadataType() {
return MessageMetadataEntryType.values()[type];
}
public void setType(Byte type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public static MessageMetadataEntry newMessageId(String messageId) {
return new MessageMetadataEntry(MessageMetadataEntryType.MessageId, messageId);
}
public static MessageMetadataEntry newField(String name) {
return new MessageMetadataEntry(MessageMetadataEntryType.Field, name);
}
public static MessageMetadataEntry newUnique() {
return new MessageMetadataEntry(MessageMetadataEntryType.Unique, TimeUUIDUtils.getUniqueTimeUUIDinMicros().toString());
}
public static MessageMetadataEntry newLock() {
return new MessageMetadataEntry(MessageMetadataEntryType.Lock, TimeUUIDUtils.getUniqueTimeUUIDinMicros().toString());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MessageMetadataEntry other = (MessageMetadataEntry) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
@Override
public String toString() {
return "MessageMetadata [type=" + MessageMetadataEntryType.values()[type] + ", name=" + name + "]";
}
}
| 7,933 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueue.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Base interface for a distributed message queue.
*
* Common use pattern
*
* MessageQueue queue = ...;
* List<Message> messages = queue.readMessages(10);
* for (Message message : messages) {
* try {
* // Do something with this message
* }
* finally {
* queue.ackMessage(message);
* }
* }
*
* @author elandau
*
*/
public interface MessageQueue {
/**
* Return the number of messages in the queue. This is an estimate.
* This is an expensive operation and should be used sparingly.
* @return Number of messages, including messages currently being processed
*/
long getMessageCount() throws MessageQueueException;
/**
* Clear all messages in the queue
* @throws MessageQueueException
*/
void clearMessages() throws MessageQueueException;
/**
* Create the underlying storage
* @throws MessageQueueExcewption
*/
void createStorage() throws MessageQueueException;
/**
* Destroy the storage associated with this column family
* @throws MessageQueueException
*/
void dropStorage() throws MessageQueueException;
/**
* Create any metadata in the storage necessary for the queue
* @throws MessageQueueException
*/
void createQueue() throws MessageQueueException;
/**
* Deletes all the rows for this queue. This will not
* delete any 'key' columns'
*
* @throws MessageQueueException
*/
void deleteQueue() throws MessageQueueException;
/**
* Read a specific message from the queue. The message isn't modified or removed from the queue.
*
* @param messageId Message id returned from MessageProducer.sendMessage
* @return
* @throws MessageQueueException
*/
Message peekMessage(String messageId) throws MessageQueueException;
/**
* Peek into messages from the queue. The queue state is not altered by this operation.
* @param itemsToPeek
* @return
* @throws MessageQueueException
*/
List<Message> peekMessages(int itemsToPeek) throws MessageQueueException;
/**
* Read a specific message from the queue. The message isn't modified or removed from the queue.
* This operation will require a lookup of key to messageId
*
* @param message Message id returned from MessageProducer.sendMessage
* @return
* @throws MessageQueueException
*/
Message peekMessageByKey(String key) throws MessageQueueException;
/**
* Return list of pending associated with the key.
*
* @param key
* @return
* @throws MessageQueueException
*/
List<Message> peekMessagesByKey(String key) throws MessageQueueException;
/**
* Read history for the specified key
* @param key
* @return
* @throws MessageQueueException
*/
List<MessageHistory> getKeyHistory(String key, Long startTime, Long endTime, int count) throws MessageQueueException;
/**
* Delete a specific message from the queue.
* @param message
* @throws MessageQueueException
*/
void deleteMessage(String messageId) throws MessageQueueException;
/**
* Delete a message using the specified key. This operation will require a lookup of key to messageId
* prior to deleting the message
* @param key
* @return true if any items were deleted
* @throws MessageQueueException
*/
boolean deleteMessageByKey(String key) throws MessageQueueException;
/**
* Delete a set of messages
* @param messageIds
* @throws MessageQueueException
*/
void deleteMessages(Collection<String> messageIds) throws MessageQueueException;
/**
* Get the counts for each shard in the queue. This is an estimate.
* This is an expensive operation and should be used sparingly.
* @return
* @throws MessageQueueException
*/
Map<String, Integer> getShardCounts() throws MessageQueueException;
/**
* Return a map of shards and their stats for THIS instance of the queue.
* These counts are only for the lifetime of this instance and are only incremented
* by operations performed by this instance. For actual shard sizes
* call getShardCounts();
* @return
*/
Map<String, MessageQueueShardStats> getShardStats();
/**
* Create a consumer of the message queue. The consumer will have it's own context
*
* @return
* @throws MessageQueueException
*/
MessageConsumer createConsumer();
/**
* Create a producer of messages for this queue.
* @return
* @throws MessageQueueException
*/
MessageProducer createProducer();
/**
* Return the queue's unique name
* @return
*/
String getName();
}
| 7,934 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/DuplicateMessageException.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public class DuplicateMessageException extends Exception {
private static final long serialVersionUID = 3917437309288808628L;
public DuplicateMessageException(String message) {
super(message);
}
public DuplicateMessageException(Throwable t) {
super(t);
}
public DuplicateMessageException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,935 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueEntryType.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
enum MessageQueueEntryType {
// InternalEvent, // Internal event
Metadata,
Lock, // Lock column
Message, // Event in the queue
} | 7,936 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueEntry.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import com.netflix.astyanax.annotations.Component;
import com.netflix.astyanax.util.TimeUUIDUtils;
public class MessageQueueEntry {
private static final String ID_DELIMITER = ":";
/**
* Type of column.
* 0 - Lock
* 1 - Queue item
*/
@Component(ordinal=0)
private Byte type;
@Component(ordinal=1)
private Byte priority;
/**
* Time when item is to be processed
*/
@Component(ordinal=2)
private UUID timestamp;
/**
* Random number to help ensure each entry is unique
*/
@Component(ordinal=3)
private UUID random;
/**
*
*/
@Component(ordinal=4)
private Byte state;
public MessageQueueEntry() {
}
public MessageQueueEntry(String id) {
String[] parts = StringUtils.split(id, ID_DELIMITER);
if (parts.length != 5)
throw new RuntimeException("Invalid message ID. Expection <type>:<priority>:<timestamp>:<random>:<state> but got " + id);
type = Byte.parseByte(parts[0]);
priority = Byte.parseByte(parts[1]);
timestamp = UUID.fromString (parts[2]);
random = UUID.fromString (parts[3]);
state = Byte.parseByte(parts[4]);
}
private MessageQueueEntry(MessageQueueEntryType type, byte priority, UUID timestamp, UUID random, MessageQueueEntryState state) {
super();
this.type = (byte)type.ordinal();
this.priority = 0;
this.timestamp = timestamp;
this.state = (byte)state.ordinal();
if (random == null)
this.random = TimeUUIDUtils.getUniqueTimeUUIDinMicros();
else
this.random = random;
}
public static MessageQueueEntry newLockEntry(MessageQueueEntryState state) {
return new MessageQueueEntry(MessageQueueEntryType.Lock, (byte)0, TimeUUIDUtils.getUniqueTimeUUIDinMicros(), null, state);
}
public static MessageQueueEntry newLockEntry(UUID timestamp, MessageQueueEntryState state) {
return new MessageQueueEntry(MessageQueueEntryType.Lock, (byte)0, timestamp, null, state);
}
public static MessageQueueEntry newMetadataEntry() {
return new MessageQueueEntry(MessageQueueEntryType.Metadata, (byte)0, null, TimeUUIDUtils.getMicrosTimeUUID(0), MessageQueueEntryState.None);
}
public static MessageQueueEntry newMessageEntry(byte priority, UUID timestamp, MessageQueueEntryState state) {
return new MessageQueueEntry(MessageQueueEntryType.Message, priority, timestamp, null, state);
}
public static MessageQueueEntry newBusyEntry(Message message) {
return new MessageQueueEntry(MessageQueueEntryType.Message, (byte)message.getPriority(), message.getToken(), message.getRandom(), MessageQueueEntryState.Busy);
}
public static MessageQueueEntry fromMetadata(MessageMetadataEntry meta) {
String parts[] = StringUtils.split(meta.getName(), "$");
return new MessageQueueEntry(parts[1]);
}
public MessageQueueEntryType getType() {
return MessageQueueEntryType.values()[type];
}
public UUID getTimestamp() {
return timestamp;
}
public long getTimestamp(TimeUnit units) {
return units.convert(TimeUUIDUtils.getMicrosTimeFromUUID(timestamp), TimeUnit.MICROSECONDS);
}
public MessageQueueEntryState getState() {
return MessageQueueEntryState.values()[state];
}
public byte getPriority() {
return priority;
}
public void setType(Byte type) {
this.type = type;
}
public void setTimestamp(UUID timestamp) {
this.timestamp = timestamp;
}
public void setState(Byte state) {
this.state = state;
}
public void setPriorty(Byte priority) {
this.priority = priority;
}
public String getMessageId() {
return new StringBuilder()
.append(type) .append(ID_DELIMITER)
.append(priority) .append(ID_DELIMITER)
.append(timestamp.toString()).append(ID_DELIMITER)
.append(random.toString()) .append(ID_DELIMITER)
.append(state)
.toString();
}
public UUID getRandom() {
return random;
}
public void setRandom(UUID random) {
this.random = random;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("MessageQueueEntry [");
sb.append( "type=" + MessageQueueEntryType.values()[type]);
sb.append(", priority=" + priority);
if (timestamp != null)
sb.append(", timestamp=" + timestamp + "(" + TimeUUIDUtils.getMicrosTimeFromUUID(timestamp) + ")");
sb.append(", random=" + random);
sb.append(", state=" + MessageQueueEntryState.values()[state]);
sb.append("]");
return sb.toString();
}
} | 7,937 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/BaseQueueHook.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import com.netflix.astyanax.MutationBatch;
public class BaseQueueHook implements MessageQueueHooks {
@Override
public void beforeAckMessages(Collection<Message> message, MutationBatch mb) {
}
@Override
public void beforeAckMessage(Message message, MutationBatch mb) {
}
@Override
public void beforeSendMessage(Message message, MutationBatch mb) {
}
}
| 7,938 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/KeyExistsException.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
public class KeyExistsException extends MessageQueueException {
private static final long serialVersionUID = 3917437309288808628L;
public KeyExistsException(String message) {
super(message);
}
public KeyExistsException(Throwable t) {
super(t);
}
public KeyExistsException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,939 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueSettings.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
/**
* MessageQueueSettings settings that are persisted to cassandra
*
* @deprecated Use MessageQueueMetadata directly instead
*/
@Deprecated
public class MessageQueueSettings extends MessageQueueMetadata {
} | 7,940 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageQueueEntryState.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
enum MessageQueueEntryState {
None,
Waiting,
Busy,
Done,
Acquired,
} | 7,941 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/SendMessageResponse.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.Collection;
import java.util.Map;
public class SendMessageResponse {
/**
* Map of messageId to Message
*/
private Map<String, Message> messages;
/**
* List of messages that are not unique
*/
private Collection<Message> notUnique;
public SendMessageResponse(Map<String, Message> success, Collection<Message> notUnique) {
this.messages = success;
this.notUnique = notUnique;
}
public Map<String, Message> getMessages() {
return messages;
}
public Collection<Message> getNotUnique() {
return notUnique;
}
}
| 7,942 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageHistory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.UUID;
/**
* Track history for a single execution of a task
*
* @author elandau
*
*/
public class MessageHistory {
private UUID token;
/**
* Time when the task was supposed to be triggered
*/
private long triggerTime;
/**
* Time when the task was actually triggered
*/
private long startTime;
/**
* Time when task processing ended
*/
private long endTime;
/**
* Status of task execution
*/
private MessageStatus status;
/**
* Stack trace in the event that the execution failed
*/
private String stackTrace;
/**
* Error that occured during execution
*/
private String error;
public long getTriggerTime() {
return triggerTime;
}
public void setTriggerTime(long triggerTime) {
this.triggerTime = triggerTime;
}
public long getStartTime() {
return startTime;
}
public void setStartTime(long startTime) {
this.startTime = startTime;
}
public long getEndTime() {
return endTime;
}
public void setEndTime(long endTime) {
this.endTime = endTime;
}
public MessageStatus getStatus() {
return status;
}
public void setStatus(MessageStatus status) {
this.status = status;
}
public String getStackTrace() {
return stackTrace;
}
public void setStackTrace(String stackTrace) {
this.stackTrace = stackTrace;
}
public String getError() {
return error;
}
public void setError(String exception) {
this.error = exception;
}
public UUID getToken() {
return token;
}
public void setToken(UUID token) {
this.token = token;
}
@Override
public String toString() {
return "MessageHistory [token=" + token + ", triggerTime=" + triggerTime + ", startTime=" + startTime + ", endTime="
+ endTime + ", status=" + status + ", stackTrace=" + stackTrace + ", error=" + error + "]";
}
}
| 7,943 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/MessageConsumerImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import com.google.common.collect.Lists;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Equality;
import com.netflix.astyanax.model.RangeEndpoint;
import com.netflix.astyanax.recipes.locks.BusyLockException;
import com.netflix.astyanax.recipes.queue.triggers.Trigger;
import com.netflix.astyanax.util.RangeBuilder;
import com.netflix.astyanax.util.TimeUUIDUtils;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Message consumer implementation based on the sharded queue.
*
* @author pbhattacharyya
*/
class MessageConsumerImpl implements MessageConsumer {
private static final Logger LOG = LoggerFactory.getLogger(MessageConsumerImpl.class);
private final ShardedDistributedMessageQueue queue;
public MessageConsumerImpl(ShardedDistributedMessageQueue q) {
this.queue = q;
}
@Override
public List<MessageContext> readMessages(int itemsToPop) throws MessageQueueException, BusyLockException, InterruptedException {
return readMessages(itemsToPop, 0, null);
}
@Override
public List<MessageContext> readMessages(int itemsToPop, long timeout, TimeUnit units) throws MessageQueueException, BusyLockException, InterruptedException {
long timeoutTime = (timeout == 0) ? 0 : System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(timeout, units);
// Loop while trying to get messages.
// TODO: Make it possible to cancel this loop
// TODO: Read full itemsToPop instead of just stopping when we get the first successful set
List<MessageContext> messages = null;
while (true) {
boolean success = false;
MessageQueueShard partition = queue.shardReaderPolicy.nextShard();
if (partition != null) {
try {
messages = readAndReturnShard(partition, itemsToPop);
success = true;
if (messages != null && !messages.isEmpty()) {
return messages;
}
} finally {
// releaseShard needs to know how many messages were proceed OR if there was an error fetching messages (-1)
queue.shardReaderPolicy.releaseShard(partition, success ? (messages == null ? 0 : messages.size()) : -1);
}
}
if (timeoutTime != 0 && System.currentTimeMillis() > timeoutTime) {
return Lists.newLinkedList();
}
Thread.sleep(queue.shardReaderPolicy.getPollInterval());
}
}
@Override
public List<Message> peekMessages(int itemsToPeek) throws MessageQueueException {
return queue.peekMessages(itemsToPeek);
}
private List<MessageContext> readAndReturnShard(MessageQueueShard shard, int itemsToPop) throws MessageQueueException, BusyLockException, InterruptedException {
List<MessageContext> messages = null;
try {
messages = readMessagesFromShard(shard.getName(), itemsToPop);
} finally {
if (messages == null || messages.isEmpty()) {
queue.stats.incEmptyPartitionCount();
}
}
return messages;
}
@Override
public List<MessageContext> readMessagesFromShard(String shardName, int itemsToPop) throws MessageQueueException, BusyLockException {
if(queue.lockManager != null) {
return readMessagesFromShardUsingLockManager(shardName, itemsToPop);
}
return readMessagesFromShardUsingDefaultLock(shardName, itemsToPop);
}
List<MessageContext> readMessagesFromShardUsingLockManager(String shardName, int itemToPop) throws MessageQueueException, BusyLockException {
ShardLock lock = null;
try {
lock = queue.lockManager.acquireLock(shardName);
MutationBatch m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
ColumnListMutation<MessageQueueEntry> rowMutation = m.withRow(queue.queueColumnFamily, shardName);
long curTimeMicros = TimeUUIDUtils.getMicrosTimeFromUUID(TimeUUIDUtils.getUniqueTimeUUIDinMicros());
return readMessagesInternal(shardName, itemToPop, 0, null, rowMutation, m, curTimeMicros);
} catch (BusyLockException e) {
queue.stats.incLockContentionCount();
throw e;
} catch (Exception e) {
LOG.error("Error reading shard " + shardName, e);
throw new MessageQueueException("Error", e);
} finally {
queue.lockManager.releaseLock(lock);
}
}
List<MessageContext> readMessagesFromShardUsingDefaultLock(String shardName, int itemsToPop) throws MessageQueueException, BusyLockException {
MutationBatch m = null;
MessageQueueEntry lockColumn = null;
ColumnListMutation<MessageQueueEntry> rowMutation = null;
int lockColumnCount = 0;
// Try locking first
try {
// 1. Write the lock column
lockColumn = MessageQueueEntry.newLockEntry(MessageQueueEntryState.None);
long curTimeMicros = TimeUUIDUtils.getTimeFromUUID(lockColumn.getTimestamp());
m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
m.withRow(queue.queueColumnFamily, shardName).putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl);
m.execute();
// 2. Read back lock columns and entries
ColumnList<MessageQueueEntry> result = queue.keyspace.prepareQuery(queue.queueColumnFamily).setConsistencyLevel(queue.consistencyLevel).getKey(shardName)
.withColumnRange(ShardedDistributedMessageQueue.entrySerializer
.buildRange()
.greaterThanEquals((byte) MessageQueueEntryType.Lock.ordinal())
.lessThanEquals((byte) MessageQueueEntryType.Lock.ordinal())
.build()
)
.execute()
.getResult();
m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
rowMutation = m.withRow(queue.queueColumnFamily, shardName);
rowMutation.deleteColumn(lockColumn);
int lockCount = 0;
boolean lockAcquired = false;
lockColumnCount = result.size();
for (Column<MessageQueueEntry> column : result) {
MessageQueueEntry lock = column.getName();
if (lock.getType() == MessageQueueEntryType.Lock) {
lockColumnCount++;
// Stale lock so we can discard it
if (column.getLongValue() < curTimeMicros) {
queue.stats.incExpiredLockCount();
rowMutation.deleteColumn(lock);
} else if (lock.getState() == MessageQueueEntryState.Acquired) {
throw new BusyLockException("Not first lock");
} else {
lockCount++;
if (lockCount == 1 && lock.getTimestamp().equals(lockColumn.getTimestamp())) {
lockAcquired = true;
}
}
if (!lockAcquired) {
throw new BusyLockException("Not first lock");
}
// Write the acquired lock column
lockColumn = MessageQueueEntry.newLockEntry(lockColumn.getTimestamp(), MessageQueueEntryState.Acquired);
rowMutation.putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl);
}
}
} catch (BusyLockException e) {
queue.stats.incLockContentionCount();
throw e;
} catch (ConnectionException e) {
LOG.error("Error reading shard " + shardName, e);
throw new MessageQueueException("Error", e);
} finally {
try {
m.execute();
} catch (Exception e) {
throw new MessageQueueException("Error committing lock", e);
}
}
long curTimeMicros = TimeUUIDUtils.getMicrosTimeFromUUID(lockColumn.getTimestamp());
m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
// First, release the lock column
rowMutation = m.withRow(queue.queueColumnFamily, shardName);
rowMutation.deleteColumn(lockColumn);
return readMessagesInternal(shardName, itemsToPop, lockColumnCount, lockColumn, rowMutation, m, curTimeMicros);
}
@Override
public void ackMessage(MessageContext context) throws MessageQueueException {
MutationBatch mb = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
fillAckMutation(context, mb);
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to ack message", e);
}
}
@Override
public void ackMessages(Collection<MessageContext> messages) throws MessageQueueException {
MutationBatch mb = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
for (MessageContext context : messages) {
fillAckMutation(context, mb);
}
try {
mb.execute();
} catch (ConnectionException e) {
throw new MessageQueueException("Failed to ack messages", e);
}
}
private void fillAckMutation(MessageContext context, MutationBatch mb) {
queue.stats.incAckMessageCount();
Message message = context.getMessage();
// Token refers to the timeout event. If 0 (i.e. no) timeout was specified
// then the token will not exist
if (message.getToken() != null) {
MessageQueueEntry entry = MessageQueueEntry.newBusyEntry(message);
// Remove timeout entry from the queue
mb.withRow(queue.queueColumnFamily, queue.getShardKey(message)).deleteColumn(entry);
// Remove entry lookup from the key, if one exists
if (message.hasKey()) {
mb.withRow(queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey()))
.putEmptyColumn(MessageMetadataEntry.newMessageId(queue.getCompositeKey(queue.getShardKey(message), entry.getMessageId())), queue.metadataDeleteTTL);
if (message.isKeepHistory()) {
MessageHistory history = context.getHistory();
if (history.getStatus() == MessageStatus.RUNNING) {
history.setStatus(MessageStatus.DONE);
}
history.setEndTime(TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS));
try {
mb.withRow(queue.historyColumnFamily, message.getKey())
.putColumn(history.getToken(), queue.serializeToString(context.getHistory()), queue.metadata.getHistoryTtl()); // TTL
} catch (Exception e) {
LOG.warn("Error serializing message history for " + message.getKey(), e);
}
}
}
// Run hooks
for (MessageQueueHooks hook : queue.hooks) {
hook.beforeAckMessage(message, mb);
}
}
if (context.getNextMessage() != null) {
try {
queue.fillMessageMutation(mb, context.getNextMessage());
} catch (MessageQueueException e) {
LOG.warn("Error filling nextMessage for " + message.getKey(), e);
}
}
}
@Override
public void ackPoisonMessage(MessageContext context) throws MessageQueueException {
// TODO: Remove bad message and add to poison queue
MutationBatch mb = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel);
fillAckMutation(context, mb);
try {
mb.execute();
} catch (ConnectionException e) {
queue.stats.incPersistError();
throw new MessageQueueException("Failed to ack messages", e);
}
}
private List<MessageContext> readMessagesInternal(String shardName,
int itemsToPop,
int lockColumnCount,
MessageQueueEntry lockColumn,
ColumnListMutation<MessageQueueEntry> rowMutation,
MutationBatch m,
long curTimeMicros) throws BusyLockException, MessageQueueException {
try {
List<MessageContext> entries = Lists.newArrayList();
RangeEndpoint re = ShardedDistributedMessageQueue.entrySerializer
.makeEndpoint((byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL)
.append((byte) 0, Equality.EQUAL);
if(lockColumn!=null) {
re.append(lockColumn.getTimestamp(), Equality.LESS_THAN_EQUALS);
} else {
re.append(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros), Equality.LESS_THAN_EQUALS);
}
ColumnList<MessageQueueEntry> result = queue.keyspace.prepareQuery(queue.queueColumnFamily)
.setConsistencyLevel(queue.consistencyLevel).getKey(shardName).
withColumnRange(new RangeBuilder()
.setLimit(itemsToPop + (lockColumn == null? 0:(lockColumnCount + 1)))
.setEnd(re.toBytes())
.build()).execute().getResult();
for (Column<MessageQueueEntry> column : result) {
if (itemsToPop == 0) {
break;
}
MessageQueueEntry entry = column.getName();
switch (entry.getType()) {
case Lock:
// TODO: Track number of locks read and make sure we don't exceed itemsToPop
// We have the lock
if (lockColumn != null && entry.getState() == MessageQueueEntryState.Acquired) {
if (!entry.getTimestamp().equals(lockColumn.getTimestamp())) {
throw new BusyLockException("Someone else snuck in");
}
}
break;
case Message:
{
try {
itemsToPop--;
// First, we always want to remove the old item
String messageId = queue.getCompositeKey(shardName, entry.getMessageId());
rowMutation.deleteColumn(entry);
// Next, parse the message metadata and add a timeout entry
final Message message = queue.extractMessageFromColumn(column);
// Update the message state
if (message != null) {
MessageContext context = new MessageContext();
context.setMessage(message);
// Message has a trigger so we need to figure out if it is an
// unfinished repeating trigger and re-add it.
if (message.hasTrigger()) {
// Read back all messageIds associated with this key and check to see if we have duplicates.
String groupRowKey = queue.getCompositeKey(queue.getName(), message.getKey());
try {
// Use consistency level
ColumnList<MessageMetadataEntry> columns = queue.keyspace.prepareQuery(queue.keyIndexColumnFamily).getRow(groupRowKey).withColumnRange(ShardedDistributedMessageQueue.metadataSerializer.buildRange().greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()).lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()).build()).execute().getResult();
MessageMetadataEntry mostRecentMessageMetadata = null;
long mostRecentTriggerTime = 0;
for (Column<MessageMetadataEntry> currMessageEntry : columns) {
MessageQueueEntry pendingMessageEntry = MessageQueueEntry.fromMetadata(currMessageEntry.getName());
if (currMessageEntry.getTtl() == 0) {
long currMessageTriggerTime = pendingMessageEntry.getTimestamp(TimeUnit.MICROSECONDS);
// First message we found, so treat as the most recent
if (mostRecentMessageMetadata == null) {
mostRecentMessageMetadata = currMessageEntry.getName();
mostRecentTriggerTime = currMessageTriggerTime;
} else {
// This message's trigger time is after what we thought was the most recent.
// Discard the previous 'most' recent and accept this one instead
if (currMessageTriggerTime > mostRecentTriggerTime) {
LOG.warn("Need to discard : " + entry.getMessageId() + " => " + mostRecentMessageMetadata.getName());
m.withRow(queue.keyIndexColumnFamily,
queue.getCompositeKey(queue.getName(), message.getKey())).putEmptyColumn(mostRecentMessageMetadata, queue.metadataDeleteTTL);
mostRecentTriggerTime = currMessageTriggerTime;
mostRecentMessageMetadata = currMessageEntry.getName();
} else {
LOG.warn("Need to discard : " + entry.getMessageId() + " => " + currMessageEntry.getName());
m.withRow(queue.keyIndexColumnFamily,
queue.getCompositeKey(queue.getName(), message.getKey())).putEmptyColumn(currMessageEntry.getName(), queue.metadataDeleteTTL);
}
}
}
}
if (mostRecentMessageMetadata != null) {
if (!mostRecentMessageMetadata.getName().endsWith(entry.getMessageId())) {
throw new DuplicateMessageException("Duplicate trigger for " + messageId);
}
}
} catch (NotFoundException e) {
} catch (ConnectionException e) {
throw new MessageQueueException("Error fetching row " + groupRowKey, e);
}
// Update the trigger
final Message nextMessage;
Trigger trigger = message.getTrigger().nextTrigger();
if (trigger != null) {
nextMessage = message.clone();
nextMessage.setTrigger(trigger);
context.setNextMessage(nextMessage);
if (message.isAutoCommitTrigger()) {
queue.fillMessageMutation(m, nextMessage);
}
}
}
// Message has a key so we remove this item from the messages by key index.
// A timeout item will be added later
if (message.hasKey()) {
m.withRow(queue.keyIndexColumnFamily,
queue.getCompositeKey(queue.getName(), message.getKey()))
.putEmptyColumn(MessageMetadataEntry.newMessageId(messageId), queue.metadataDeleteTTL);
LOG.debug("Removing from key : " + queue.getCompositeKey(queue.getName(), message.getKey()) + " : " + messageId);
if (message.isKeepHistory()) {
MessageHistory history = context.getHistory();
history.setToken(entry.getTimestamp());
history.setStartTime(curTimeMicros);
history.setTriggerTime(message.getTrigger().getTriggerTime());
history.setStatus(MessageStatus.RUNNING);
try {
m.withRow(queue.historyColumnFamily, message.getKey()).putColumn(entry.getTimestamp(), queue.serializeToString(history)
, queue.metadata.getHistoryTtl());
} catch (Exception e) {
LOG.warn("Error serializing history for key '" + message.getKey() + "'", e);
}
}
}
// Message has a timeout so we add a timeout event.
if (message.getTimeout() > 0) {
MessageQueueEntry timeoutEntry = MessageQueueEntry.newMessageEntry((byte) 0,
TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros
+ TimeUnit.MICROSECONDS.convert(message.getTimeout(), TimeUnit.SECONDS)
+ (queue.counter.incrementAndGet() % 1000)), MessageQueueEntryState.Busy);
message.setToken(timeoutEntry.getTimestamp());
message.setRandom(timeoutEntry.getRandom());
m.withRow(queue.queueColumnFamily, queue.getShardKey(message))
.putColumn(timeoutEntry, column.getStringValue(), queue.metadata.getRetentionTimeout());
MessageMetadataEntry messageIdEntry = MessageMetadataEntry.newMessageId(queue.getCompositeKey(queue.getShardKey(message), timeoutEntry.getMessageId()));
// Add the timeout column to the key
if (message.hasKey()) {
m.withRow(queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey()))
.putEmptyColumn(messageIdEntry, queue.metadata.getRetentionTimeout());
}
context.setAckMessageId(messageIdEntry.getName());
} else {
message.setToken(null);
}
// Update some stats
switch (entry.getState()) {
case Waiting:
queue.stats.incProcessCount();
break;
case Busy:
queue.stats.incReprocessCount();
break;
default:
LOG.warn("Unknown message state: " + entry.getState());
// TODO:
break;
}
entries.add(context);
} else {
queue.stats.incInvalidMessageCount();
// TODO: Add to poison queue
}
} catch (DuplicateMessageException e) {
// OK to ignore this error. All the proper columns will have been deleted in the batch.
}
break;
}
default:
{
// TODO: Error: Unknown type
break;
}
}
}
return entries;
} catch (BusyLockException e) {
queue.stats.incLockContentionCount();
throw e;
} catch (Exception e) {
throw new MessageQueueException("Error processing queue shard : " + shardName, e);
} finally {
try {
m.execute();
} catch (Exception e) {
throw new MessageQueueException("Error processing queue shard : " + shardName, e);
}
}
}
}
| 7,944 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/ShardLock.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
/**
* Interface for a queue shard lock.
*
* @author pbhattacharyya
*/
public interface ShardLock {
String getShardName();
}
| 7,945 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/triggers/RunOnceTrigger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.triggers;
import java.util.concurrent.TimeUnit;
public class RunOnceTrigger extends AbstractTrigger {
public static class Builder {
private RunOnceTrigger trigger = new RunOnceTrigger();
public Builder withDelay(long delay, TimeUnit units) {
trigger.delay = TimeUnit.MILLISECONDS.convert(delay, units);
return this;
}
public RunOnceTrigger build() {
if (trigger.delay != null)
trigger.setTriggerTime(System.currentTimeMillis() + trigger.delay);
else
trigger.setTriggerTime(System.currentTimeMillis());
return trigger;
}
}
private Long delay; // In millseconds
@Override
public Trigger nextTrigger() {
// There is no next trigger.
return null;
}
}
| 7,946 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/triggers/RepeatingTrigger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.triggers;
import java.util.concurrent.TimeUnit;
public class RepeatingTrigger extends AbstractTrigger {
public static class Builder {
private RepeatingTrigger trigger = new RepeatingTrigger();
public Builder withInterval(long interval, TimeUnit units) {
trigger.interval = TimeUnit.MILLISECONDS.convert(interval, units);
return this;
}
public Builder withDelay(long delay, TimeUnit units) {
trigger.delay = TimeUnit.MILLISECONDS.convert(delay, units);
return this;
}
public Builder withRepeatCount(long repeatCount) {
trigger.repeatCount = repeatCount;
return this;
}
public Builder withEndTime(long endTime, TimeUnit units) {
trigger.endTime = TimeUnit.MILLISECONDS.convert(endTime, units);
return this;
}
public RepeatingTrigger build() {
if (trigger.delay != null)
trigger.setTriggerTime(System.currentTimeMillis() + trigger.delay);
else
trigger.setTriggerTime(System.currentTimeMillis());
return trigger;
}
}
private Long delay ; // In millseconds
private long interval = 0; // In milliseconds
private Long repeatCount ; // Repeat count
private long endTime = 0;
@Override
public Trigger nextTrigger() {
if (repeatCount != null && getExecutionCount()+1 >= repeatCount) {
return null;
}
long currentTime = System.currentTimeMillis();
long nextTime = getTriggerTime() + interval;
if (endTime != 0 && (nextTime > endTime || currentTime > endTime))
return null;
RepeatingTrigger next = new RepeatingTrigger();
next.delay = delay;
next.interval = interval;
next.repeatCount = repeatCount;
next.setExecutionCount(getExecutionCount() + 1);
// TODO: Handle missed or delayed execution
next.setTriggerTime(getTriggerTime() + interval);
return next;
}
public Long getDelay() {
return delay;
}
public long getInterval() {
return interval;
}
public Long getRepeatCount() {
return repeatCount;
}
public long getEndTime() {
return endTime;
}
public void setDelay(long delay) {
this.delay = delay;
}
public void setInterval(long interval) {
this.interval = interval;
}
public void setRepeatCount(long repeatCount) {
this.repeatCount = repeatCount;
}
public void setEndTime(long endTime) {
this.endTime = endTime;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("RepeatingTrigger[interval=" + interval);
if (delay != null)
sb.append(", delay=" + delay);
if (repeatCount != null)
sb.append(", repeatCount=" + repeatCount);
if (endTime > 0)
sb.append(", endTime=" + endTime);
sb.append("]");
return sb.toString();
}
}
| 7,947 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/triggers/AbstractTrigger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.triggers;
public abstract class AbstractTrigger implements Trigger {
private long triggerTime = 0; // In milliseconds
private long executeCount = 0;
@Override
public long getTriggerTime() {
return triggerTime;
}
public long getExecutionCount() {
return executeCount;
}
public void setTriggerTime(long triggerTime) {
this.triggerTime = triggerTime;
}
public void setExecutionCount(long executeCount) {
this.executeCount = executeCount;
}
@Override
public String toString() {
return "AbstractTrigger [triggerTime=" + triggerTime + ", executeCount=" + executeCount + "]";
}
}
| 7,948 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/triggers/Trigger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.triggers;
/**
* Base interface for all triggers. Triggers specify the scheduling for a task in the scheduler.
* Different implementation of trigger can specify different triggering semantics.
*
* @author elandau
*
*/
public interface Trigger {
/**
* Process the current trigger and give the next trigger to insert into the queue
*
* @param trigger Current trigger
* @return New trigger or null to stop executing the trigger
*/
Trigger nextTrigger();
/**
* Get the current trigger time for this trigger. This is the time
* for the next execution of the Task
* @return
*/
long getTriggerTime();
}
| 7,949 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/NoModShardingPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import com.netflix.astyanax.recipes.queue.Message;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
public class NoModShardingPolicy implements ModShardPolicy {
private static NoModShardingPolicy instance = new NoModShardingPolicy();
public static NoModShardingPolicy getInstance() {
return instance;
}
@Override
public int getMessageShard(Message message, MessageQueueMetadata settings) {
return 0;
}
}
| 7,950 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/ShardReaderPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import java.util.Collection;
import java.util.Map;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
import com.netflix.astyanax.recipes.queue.MessageQueueShard;
import com.netflix.astyanax.recipes.queue.MessageQueueShardStats;
/**
* Policy for scheduling shards to be processed
*
* @author elandau
*
*/
public interface ShardReaderPolicy {
public static interface Factory {
public ShardReaderPolicy create(MessageQueueMetadata metadata);
}
/**
* Acquire the next shard to be processed. Must call releaseShard when done reading
* from the shard
* @return A reference to the acquired shard.
* @throws InterruptedException
*/
public MessageQueueShard nextShard() throws InterruptedException;
/**
* Release a shard after acquiring and reading messages
* @param shard
*/
public void releaseShard(MessageQueueShard shard, int messagesRead);
/**
* @return List all the shards
*/
public Collection<MessageQueueShard> listShards();
/**
* @return Return map of all shard stats
*/
public Map<String, MessageQueueShardStats> getShardStats();
/**
* @return number of shards in the work or active queue
*/
public int getWorkQueueDepth();
/**
* @return number of shards in the idle queue
*/
public int getIdleQueueDepth();
/**
* a ShardReaderPolicy is in catch up mode when more than two time buckets are in the work or active queue.
* @return is the shard reader catching up.
*/
public boolean isCatchingUp();
/**
* @return the correct polling interval. if in catch up mode returns the catchUpPollInterval otherwise
* returns the "normal" pollInterval
*/
public long getPollInterval();
}
| 7,951 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/ModShardPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import com.netflix.astyanax.recipes.queue.Message;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
/**
* Policy for mod sharding within a time partition
*
* @author elandau
*
*/
public interface ModShardPolicy {
/**
* Return the mod shard for the specified message. The shard can be based
* on any message attribute such as the schedule time or the message key
* @param message
* @return
*/
int getMessageShard(Message message, MessageQueueMetadata settings);
}
| 7,952 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/TimePartitionedShardReaderPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Queues;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
import com.netflix.astyanax.recipes.queue.MessageQueueShard;
import com.netflix.astyanax.recipes.queue.MessageQueueShardStats;
public class TimePartitionedShardReaderPolicy implements ShardReaderPolicy {
public static final long DEFAULT_POLLING_INTERVAL = 1000;
public static final long NO_CATCHUP_POLLING_INTERVAL = 0;
public static class Factory implements ShardReaderPolicy.Factory {
public static class Builder {
private long pollingInterval = DEFAULT_POLLING_INTERVAL;
private long catchupPollingInterval = NO_CATCHUP_POLLING_INTERVAL;
public Builder withPollingInterval(long pollingInterval, TimeUnit units) {
this.pollingInterval = TimeUnit.MILLISECONDS.convert(pollingInterval, units);
return this;
}
public Builder withCatchupPollingInterval(long catchupPollingInterval, TimeUnit units) {
this.catchupPollingInterval = TimeUnit.MILLISECONDS.convert(catchupPollingInterval, units);
return this;
}
public Factory build() {
return new Factory(this);
}
}
public static Builder builder() {
return new Builder();
}
public Factory(Builder builder) {
this.builder = builder;
}
private final Builder builder;
@Override
public ShardReaderPolicy create(MessageQueueMetadata metadata) {
return new TimePartitionedShardReaderPolicy(builder, metadata);
}
}
private static final String SEPARATOR = ":";
private final MessageQueueMetadata settings;
private final List<MessageQueueShard> shards;
private final Map<String, MessageQueueShardStats> shardStats;
private final LinkedBlockingQueue<MessageQueueShard> workQueue = Queues.newLinkedBlockingQueue();
private final LinkedBlockingQueue<MessageQueueShard> idleQueue = Queues.newLinkedBlockingQueue();
private final long pollingInterval;
private final long catchupPollingInterval;
private int currentTimePartition = -1;
private TimePartitionedShardReaderPolicy(Factory.Builder builder, MessageQueueMetadata metadata) {
this.settings = metadata;
this.pollingInterval = builder.pollingInterval;
this.catchupPollingInterval = builder.catchupPollingInterval;
shards = Lists.newArrayListWithCapacity(metadata.getPartitionCount() * metadata.getShardCount());
for (int i = 0; i < metadata.getPartitionCount(); i++) {
for (int j = 0; j < metadata.getShardCount(); j++) {
shards.add(new MessageQueueShard(metadata.getQueueName() + SEPARATOR + i + SEPARATOR + j, i, j));
}
}
List<MessageQueueShard> queues = Lists.newArrayList();
shardStats = Maps.newHashMapWithExpectedSize(shards.size());
for (MessageQueueShard shard : shards) {
queues.add(shard);
shardStats.put(shard.getName(), shard);
}
Collections.shuffle(queues);
workQueue.addAll(queues);
}
private int getCurrentPartitionIndex() {
if (settings.getPartitionCount() <= 1)
return 0;
return (int) ((TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
/ settings.getPartitionDuration())%settings.getPartitionCount());
}
@Override
public MessageQueueShard nextShard() throws InterruptedException {
// We transitioned to a new time partition
int timePartition = getCurrentPartitionIndex();
if (timePartition != currentTimePartition) {
synchronized (this) {
// Double check
if (timePartition != currentTimePartition) {
currentTimePartition = timePartition;
// Drain the idle queue and transfer all shards from the
// current partition to the work queue
List<MessageQueueShard> temp = Lists.newArrayListWithCapacity(idleQueue.size());
idleQueue.drainTo(temp);
for (MessageQueueShard partition : temp) {
if (partition.getPartition() == currentTimePartition) {
workQueue.add(partition);
}
else {
idleQueue.add(partition);
}
}
}
}
}
// This should only block if we have more client threads than mod shards in the queue,
// which we would expect to be the case
return workQueue.take();
}
@Override
public void releaseShard(MessageQueueShard shard, int messagesRead) {
// Shard is not in the current partition and we did't final any messages so let's just put in the
// idle queue. It'll be added back later when in this shard's time partition.
// May want to randomly check an idle queue when there is nothing in the working queue
// A value of -1 in messagesRead means that the consumer had trouble reading messages from the shard
if (shard.getPartition() != currentTimePartition && messagesRead == 0) {
idleQueue.add(shard);
}
else {
workQueue.add(shard);
}
}
@Override
public Collection<MessageQueueShard> listShards() {
return Collections.unmodifiableList(shards);
}
@Override
public Map<String, MessageQueueShardStats> getShardStats() {
return shardStats;
}
@Override
public int getWorkQueueDepth() {
return workQueue.size();
}
@Override
public int getIdleQueueDepth() {
return idleQueue.size();
}
@Override
public boolean isCatchingUp() {
// if the work queue is larger than two partitions worth of shards we are still playing catch up.
return getWorkQueueDepth() > (settings.getShardCount() * 2);
}
@Override
public long getPollInterval() {
return (isCatchingUp() && catchupPollingInterval != NO_CATCHUP_POLLING_INTERVAL )
? catchupPollingInterval
: pollingInterval;
}
}
| 7,953 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/TimeModShardPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import com.netflix.astyanax.recipes.queue.Message;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
/**
* Sharding based on time. This policy assumes that the
* next trigger time in the message has a 'unique' incrementing
* lower bits.
*
* @author elandau
*
*/
public class TimeModShardPolicy implements ModShardPolicy {
private static TimeModShardPolicy instance = new TimeModShardPolicy();
public static ModShardPolicy getInstance() {
return instance;
}
@Override
public int getMessageShard(Message message, MessageQueueMetadata settings) {
return (int) (message.getTokenTime() % settings.getShardCount());
}
}
| 7,954 |
0 | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue | Create_ds/astyanax/astyanax-queue/src/main/java/com/netflix/astyanax/recipes/queue/shard/KeyModShardPolicy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue.shard;
import com.netflix.astyanax.recipes.queue.Message;
import com.netflix.astyanax.recipes.queue.MessageQueueMetadata;
/**
* Sharding based on the key with fallback to time mod sharding
* @author elandau
*
*/
public class KeyModShardPolicy extends TimeModShardPolicy {
private static KeyModShardPolicy instance = new KeyModShardPolicy();
public static KeyModShardPolicy getInstance() {
return instance;
}
@Override
public int getMessageShard(Message message, MessageQueueMetadata settings) {
if (message.hasKey())
return message.getKey().hashCode() % settings.getShardCount();
else
return super.getMessageShard(message, settings);
}
}
| 7,955 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/eureka/EurekaBasedHostSupplier.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.eureka;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.MyDataCenterInstanceConfig;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.discovery.DefaultEurekaClientConfig;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.DiscoveryManager;
import com.netflix.discovery.shared.Application;
/**
* Simple class that implements {@link Supplier}<{@link List}<{@link Host}>>. It provides a List<{@link Host}>
* using the {@link DiscoveryManager} which is the eureka client.
*
* Note that the class needs the eureka application name to discover all instances for that application.
*
* @author poberai
*/
public class EurekaBasedHostSupplier implements Supplier<List<Host>> {
private static final Logger LOG = LoggerFactory.getLogger(EurekaBasedHostSupplier.class);
// The C* cluster name for discovering nodes
private final String applicationName;
public EurekaBasedHostSupplier(String applicationName) {
this.applicationName = applicationName.toUpperCase();
// initialize eureka client. make sure eureka properties are properly configured in config.properties
DiscoveryManager.getInstance().initComponent(new MyDataCenterInstanceConfig(), new DefaultEurekaClientConfig());
}
@Override
public List<Host> get() {
DiscoveryClient discoveryClient = DiscoveryManager.getInstance().getDiscoveryClient();
if (discoveryClient == null) {
LOG.error("Error getting discovery client");
throw new RuntimeException("Failed to create discovery client");
}
Application app = discoveryClient.getApplication(applicationName);
List<Host> hosts = Lists.newArrayList();
if (app == null) {
return hosts;
}
List<InstanceInfo> ins = app.getInstances();
if (ins == null || ins.isEmpty()) {
return hosts;
}
hosts = Lists.newArrayList(Collections2.transform(
Collections2.filter(ins, new Predicate<InstanceInfo>() {
@Override
public boolean apply(InstanceInfo input) {
return input.getStatus() == InstanceInfo.InstanceStatus.UP;
}
}), new Function<InstanceInfo, Host>() {
@Override
public Host apply(InstanceInfo info) {
String[] parts = StringUtils.split(
StringUtils.split(info.getHostName(), ".")[0], '-');
Host host = new Host(info.getHostName(), info.getPort())
.addAlternateIpAddress(
StringUtils.join(new String[] { parts[1], parts[2], parts[3],
parts[4] }, "."))
.addAlternateIpAddress(info.getIPAddr())
.setId(info.getId());
try {
if (info.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo)info.getDataCenterInfo();
host.setRack(amazonInfo.get(MetaDataKey.availabilityZone));
}
}
catch (Throwable t) {
LOG.error("Error getting rack for host " + host.getName(), t);
}
return host;
}
}));
return hosts;
}
}
| 7,956 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/valve/RollingTimeWindowValve.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.valve;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.astyanax.contrib.valve.TimeWindowValve.RequestStatus;
public class RollingTimeWindowValve {
private final AtomicReference<InnerState> currentRef = new AtomicReference<InnerState>(null);
private final AtomicLong ratePerSecond = new AtomicLong(0L);
private final AtomicInteger numBuckets = new AtomicInteger(0);
private final AtomicBoolean valveCheckDisabled = new AtomicBoolean(false);
public RollingTimeWindowValve(long rps, int nBuckets) {
ratePerSecond.set(rps);
numBuckets.set(nBuckets);
currentRef.set(new InnerState(System.currentTimeMillis()));
}
public void setRatePerSecond(Long newRps) {
ratePerSecond.set(newRps);
}
public void setNumBuckets(int newBuckets) {
numBuckets.set(newBuckets);
}
public void disableValveCheck() {
valveCheckDisabled.set(true);
}
public void enableValveCheck() {
valveCheckDisabled.set(false);
}
public boolean decrementAndCheckQuota() {
if (valveCheckDisabled.get()) {
return true;
}
InnerState currentState = currentRef.get();
TimeWindowValve currentWindow = currentState.window;
RequestStatus status = currentWindow.decrementAndCheckQuota();
if (status == RequestStatus.Permitted) {
return true;
}
if (status == RequestStatus.OverQuota) {
return false;
}
if (status == RequestStatus.PastWindow) {
InnerState nextState = new InnerState(System.currentTimeMillis());
boolean success = currentRef.compareAndSet(currentState, nextState);
if (success) {
//System.out.println("FLIP");
}
// Try one more time before giving up
return (currentRef.get().window.decrementAndCheckQuota() == RequestStatus.Permitted);
}
return false;
}
private class InnerState {
private final String id = UUID.randomUUID().toString();
private final Long startTime;
private final TimeWindowValve window;
private InnerState(Long startWindowMillis) {
startTime = startWindowMillis;
int nBuckets = numBuckets.get();
long rateForWindow = ratePerSecond.get()/nBuckets;
long windowMillis = 1000/nBuckets;
window = new TimeWindowValve(rateForWindow, startWindowMillis, windowMillis);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((startTime == null) ? 0 : startTime.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
InnerState other = (InnerState) obj;
boolean equals = true;
equals &= (id == null) ? other.id == null : id.equals(other.id);
equals &= (startTime == null) ? other.startTime == null : startTime.equals(other.startTime);
return equals;
}
@Override
public String toString() {
return id.toString();
}
}
}
| 7,957 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/valve/TimeWindowValve.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.valve;
import java.util.concurrent.atomic.AtomicLong;
public class TimeWindowValve {
public static enum RequestStatus {
OverQuota, PastWindow, Permitted
};
final AtomicLong limitValve;
final Long startWindow;
final Long endWindow;
public TimeWindowValve(final Long limit, final Long startMilliseconds, final long windowMilliseconds) {
limitValve = new AtomicLong(limit);
startWindow = startMilliseconds;
endWindow = startWindow + windowMilliseconds;
}
public RequestStatus decrementAndCheckQuota() {
long currentTime = System.currentTimeMillis();
if (currentTime > endWindow) {
return RequestStatus.PastWindow;
}
if (limitValve.get() <= 0) {
return RequestStatus.OverQuota; // this valve is done. no more requests
}
long value = limitValve.decrementAndGet();
if (value < 0) {
return RequestStatus.OverQuota;
} else {
return RequestStatus.Permitted;
}
}
}
| 7,958 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/CassBasedFailedWritesLogger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.concurrent.atomic.AtomicInteger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
/**
* Impl of {@link FailedWritesLogger} which communicates metadata of failed writes to a separate cluster.
* The keyspace and cluster for this backup cassandra cluster needs to be provided to this class.
* Note that for ease of management, the original cluster and keysapce are represented in the backup CF name.
* Row keys are sharded between 0 ... 10 so that there is no hot spot and also represent the CF name in there for ease of management.
*
* NOTE: this class only backs up metadata about the failed write - i.e not the actual payload.
* This serves merely as an indicator of which rows in which cluster / keysapce / CF were not sent to the destination cluster / keyspace / CF.
*
* @author poberai
*
*/
public class CassBasedFailedWritesLogger implements FailedWritesLogger {
private static final Logger Logger = LoggerFactory.getLogger(CassBasedFailedWritesLogger.class);
private final AstyanaxContext<Keyspace> ksContext;
private Keyspace ks;
private final CircularCounter counter;
public CassBasedFailedWritesLogger(AstyanaxContext<Keyspace> ctx) {
this(ctx, 10);
}
public CassBasedFailedWritesLogger(AstyanaxContext<Keyspace> ctx, int numShards) {
if (numShards <= 0) {
throw new RuntimeException("numShards must be > 0");
}
this.ksContext = ctx;
this.counter = new CircularCounter(numShards);
}
@Override
public void logFailedWrite(WriteMetadata failedWrite) {
MutationBatch mutationBatch = ks.prepareMutationBatch();
addToBatch(mutationBatch, failedWrite);
try {
mutationBatch.execute();
} catch (ConnectionException e) {
Logger.error("Failed to log failed write to fallback cluster: " + failedWrite, e);
}
}
private void addToBatch(MutationBatch batch, WriteMetadata failedWrite) {
// TODO: must deal with failed operations like createCF etc
if (failedWrite.getCFName() == null || failedWrite.getRowKey() == null) {
return;
}
String cfName = failedWrite.getPrimaryCluster() + "-" + failedWrite.getPrimaryKeyspace();
ColumnFamily<String, Long> CF_FAILED_WRITES =
ColumnFamily.newColumnFamily(cfName, StringSerializer.get(), LongSerializer.get(), StringSerializer.get());
String rowKey = failedWrite.getCFName() + "_" + counter.getNext();
Long column = failedWrite.getUuid();
String value = failedWrite.getRowKey();
batch.withRow(CF_FAILED_WRITES, rowKey).putColumn(column, value);
}
private class CircularCounter {
private final int maxLimit;
private final AtomicInteger counter = new AtomicInteger(0);
private CircularCounter(int limit) {
maxLimit = limit;
}
private int getNext() {
int count = counter.incrementAndGet();
return (count % maxLimit);
}
}
@Override
public void init() {
ks = ksContext.getClient();
ksContext.start();
}
@Override
public void shutdown() {
ksContext.shutdown();
}
}
| 7,959 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesCqlPreparedStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
public class DualWritesCqlPreparedStatement implements CqlPreparedStatement {
private final CqlPreparedStatement primary;
private final CqlPreparedStatement secondary;
private final DualWritesStrategy execStrategy;
private final DualKeyspaceMetadata ksMd;
public DualWritesCqlPreparedStatement(CqlPreparedStatement primaryCql, CqlPreparedStatement secondarycql, DualWritesStrategy strategy, DualKeyspaceMetadata keyspaceMd) {
primary = primaryCql;
secondary = secondarycql;
execStrategy = strategy;
ksMd = keyspaceMd;
}
@Override
public <V> CqlPreparedStatement withByteBufferValue(V value, Serializer<V> serializer) {
primary.withByteBufferValue(value, serializer);
secondary.withByteBufferValue(value, serializer);
return this;
}
@Override
public CqlPreparedStatement withValue(ByteBuffer value) {
primary.withValue(value);
secondary.withValue(value);
return this;
}
@Override
public CqlPreparedStatement withValues(List<ByteBuffer> value) {
primary.withValues(value);
secondary.withValues(value);
return this;
}
@Override
public CqlPreparedStatement withStringValue(String value) {
primary.withStringValue(value);
secondary.withStringValue(value);
return this;
}
@Override
public CqlPreparedStatement withIntegerValue(Integer value) {
primary.withIntegerValue(value);
secondary.withIntegerValue(value);
return this;
}
@Override
public CqlPreparedStatement withBooleanValue(Boolean value) {
primary.withBooleanValue(value);
secondary.withBooleanValue(value);
return this;
}
@Override
public CqlPreparedStatement withDoubleValue(Double value) {
primary.withDoubleValue(value);
secondary.withDoubleValue(value);
return this;
}
@Override
public CqlPreparedStatement withLongValue(Long value) {
primary.withLongValue(value);
secondary.withLongValue(value);
return this;
}
@Override
public CqlPreparedStatement withFloatValue(Float value) {
primary.withFloatValue(value);
secondary.withFloatValue(value);
return this;
}
@Override
public CqlPreparedStatement withShortValue(Short value) {
primary.withShortValue(value);
secondary.withShortValue(value);
return this;
}
@Override
public CqlPreparedStatement withUUIDValue(UUID value) {
primary.withUUIDValue(value);
secondary.withUUIDValue(value);
return this;
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
WriteMetadata writeMd = new WriteMetadata(ksMd, null, null);
return execStrategy.wrapExecutions(primary, secondary, Collections.singletonList(writeMd)).execute();
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
WriteMetadata writeMd = new WriteMetadata(ksMd, null, null);
return execStrategy.wrapExecutions(primary, secondary, Collections.singletonList(writeMd)).executeAsync();
}
}
| 7,960 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesMutationBatch.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.WriteAheadLog;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Class that implements the {@link MutationBatch} interface and acts as a dual router for capturing all the dual writes.
* Note that it purely maintains state in 2 separate MutationBatch objects, each corresponding to the source of destination keyspace / mutation batches.
*
* It also tracks state of what row keys are being added to what column families. This is useful for reporting data when the dual writes fail partially,
* and hence that metadata can be communicated to some {@link FailedWritesLogger} to be dealt with accordingly.
*
* @author poberai
*
* @param <C>
*/
public class DualWritesMutationBatch implements MutationBatch {
private final DualKeyspaceMetadata dualKeyspaceMetadata;
private final MutationBatch primary;
private final MutationBatch secondary;
private final DualWritesStrategy writeExecutionStrategy;
private final AtomicReference<List<WriteMetadata>> writeMetada = new AtomicReference<List<WriteMetadata>>(new ArrayList<WriteMetadata>());
public DualWritesMutationBatch(DualKeyspaceMetadata dualKSMetadata,
MutationBatch primaryMB, MutationBatch secondaryMB, DualWritesStrategy strategy) {
this.dualKeyspaceMetadata = dualKSMetadata;
this.primary = primaryMB;
this.secondary = secondaryMB;
this.writeExecutionStrategy = strategy;
}
public MutationBatch getPrimary() {
return primary;
}
public MutationBatch getSecondary() {
return secondary;
}
@Override
public OperationResult<Void> execute() throws ConnectionException {
return writeExecutionStrategy.wrapExecutions(primary, secondary, writeMetada.get()).execute();
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return writeExecutionStrategy.wrapExecutions(primary, secondary, writeMetada.get()).executeAsync();
}
@Override
public <K, C> ColumnListMutation<C> withRow(ColumnFamily<K, C> columnFamily, K rowKey) {
writeMetada.get().add(new WriteMetadata(dualKeyspaceMetadata, columnFamily.getName(), rowKey.toString()));
ColumnListMutation<C> clmPrimary = primary.withRow(columnFamily, rowKey);
ColumnListMutation<C> clmSecondary = secondary.withRow(columnFamily, rowKey);
return new DualWritesColumnListMutation<C>(clmPrimary, clmSecondary);
}
@Override
public <K> void deleteRow(Iterable<? extends ColumnFamily<K, ?>> columnFamilies, K rowKey) {
for (ColumnFamily<K, ?> cf : columnFamilies) {
writeMetada.get().add(new WriteMetadata(dualKeyspaceMetadata, cf.getName(), rowKey.toString()));
}
primary.deleteRow(columnFamilies, rowKey);
secondary.deleteRow(columnFamilies, rowKey);
}
@Override
public void discardMutations() {
primary.discardMutations();
secondary.discardMutations();
writeMetada.set(new ArrayList<WriteMetadata>());
}
@Override
public void mergeShallow(MutationBatch other) {
primary.mergeShallow(other);
secondary.mergeShallow(other);
}
@Override
public boolean isEmpty() {
return primary.isEmpty();
}
@Override
public int getRowCount() {
return primary.getRowCount();
}
@Override
public Map<ByteBuffer, Set<String>> getRowKeys() {
return primary.getRowKeys();
}
@Override
public MutationBatch pinToHost(Host host) {
primary.pinToHost(host);
secondary.pinToHost(host);
return this;
}
@Override
public MutationBatch setConsistencyLevel(ConsistencyLevel consistencyLevel) {
primary.setConsistencyLevel(consistencyLevel);
secondary.setConsistencyLevel(consistencyLevel);
return this;
}
@Override
public MutationBatch withConsistencyLevel(ConsistencyLevel consistencyLevel) {
primary.withConsistencyLevel(consistencyLevel);
secondary.withConsistencyLevel(consistencyLevel);
return this;
}
@Override
public MutationBatch withRetryPolicy(RetryPolicy retry) {
primary.withRetryPolicy(retry);
secondary.withRetryPolicy(retry);
return this;
}
@Override
public MutationBatch usingWriteAheadLog(WriteAheadLog manager) {
primary.usingWriteAheadLog(manager);
return this;
}
@Override
public MutationBatch lockCurrentTimestamp() {
primary.lockCurrentTimestamp();
secondary.lockCurrentTimestamp();
return this;
}
@SuppressWarnings("deprecation")
@Override
public MutationBatch setTimeout(long timeout) {
primary.setTimeout(timeout);
secondary.setTimeout(timeout);
return this;
}
@Override
public MutationBatch setTimestamp(long timestamp) {
primary.setTimestamp(timestamp);
secondary.setTimestamp(timestamp);
return this;
}
@Override
public MutationBatch withTimestamp(long timestamp) {
primary.withTimestamp(timestamp);
secondary.withTimestamp(timestamp);
return this;
}
@Override
public MutationBatch withAtomicBatch(boolean condition) {
primary.withAtomicBatch(condition);
secondary.withAtomicBatch(condition);
return this;
}
@Override
public ByteBuffer serialize() throws Exception {
secondary.serialize();
return primary.serialize();
}
@Override
public void deserialize(ByteBuffer data) throws Exception {
ByteBuffer clone = clone(data);
primary.deserialize(data);
secondary.deserialize(clone);
}
@Override
public MutationBatch withCaching(boolean condition) {
primary.withCaching(condition);
secondary.withCaching(condition);
return this;
}
private static ByteBuffer clone(ByteBuffer original) {
ByteBuffer clone = ByteBuffer.allocate(original.capacity());
original.rewind();//copy from the beginning
clone.put(original);
original.rewind();
clone.flip();
return clone;
}
}
| 7,961 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesCqlStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.Collections;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.model.ConsistencyLevel;
public class DualWritesCqlStatement implements CqlStatement {
private final CqlStatement primary;
private final CqlStatement secondary;
private final DualWritesStrategy execStrategy;
private final DualKeyspaceMetadata ksMd;
public DualWritesCqlStatement(CqlStatement primaryCql, CqlStatement secondarycql, DualWritesStrategy strategy, DualKeyspaceMetadata keyspaceMd) {
primary = primaryCql;
secondary = secondarycql;
execStrategy = strategy;
ksMd = keyspaceMd;
}
@Override
public CqlStatement withConsistencyLevel(ConsistencyLevel cl) {
primary.withConsistencyLevel(cl);
secondary.withConsistencyLevel(cl);
return this;
}
@Override
public CqlStatement withCql(String cql) {
primary.withCql(cql);
secondary.withCql(cql);
return this;
}
@Override
public CqlPreparedStatement asPreparedStatement() {
CqlPreparedStatement pstmtPrimary = primary.asPreparedStatement();
CqlPreparedStatement pstmtSecondary = primary.asPreparedStatement();
return new DualWritesCqlPreparedStatement(pstmtPrimary, pstmtSecondary, execStrategy, ksMd);
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
WriteMetadata writeMd = new WriteMetadata(ksMd, null, null);
return execStrategy.wrapExecutions(primary, secondary, Collections.singletonList(writeMd)).execute();
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
WriteMetadata writeMd = new WriteMetadata(ksMd, null, null);
return execStrategy.wrapExecutions(primary, secondary, Collections.singletonList(writeMd)).executeAsync();
}
}
| 7,962 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesKeyspace.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.SerializerPackage;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.Operation;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.OperationException;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.UnknownComparatorException;
/**
* Main class that orchistrates all the dual writes. It wraps the 2 keyspaces (source and destination)
* and appropriately forwards reads and writes to it as dictated by updates using the {@link DualWritesUpdateListener}
*
* Note that if dual writes are enabled then the writes are sent to the {@link DualWritesMutationBatch} or {@link DualWritesColumnMutation}
* classes appropriately.
*
* The reads are always served from the primary data source.
*
* @author poberai
*
*/
public class DualWritesKeyspace implements Keyspace, DualWritesUpdateListener {
private static final Logger Logger = LoggerFactory.getLogger(DualWritesKeyspace.class);
private final AtomicReference<KeyspacePair> ksPair = new AtomicReference<KeyspacePair>(null);
private final AtomicBoolean dualWritesEnabled = new AtomicBoolean(false);
private final DualWritesStrategy executionStrategy;
public DualWritesKeyspace(DualKeyspaceMetadata dualKeyspaceSetup,
Keyspace primaryKS, Keyspace secondaryKS,
DualWritesStrategy execStrategy) {
ksPair.set(new KeyspacePair(dualKeyspaceSetup, primaryKS, secondaryKS));
executionStrategy = execStrategy;
}
private Keyspace getPrimaryKS() {
return ksPair.get().getPrimaryKS();
}
public DualKeyspaceMetadata getDualKeyspaceMetadata() {
return ksPair.get().getDualKSMetadata();
}
@Override
public AstyanaxConfiguration getConfig() {
return getPrimaryKS().getConfig();
}
@Override
public String getKeyspaceName() {
return getPrimaryKS().getKeyspaceName();
}
@Override
public Partitioner getPartitioner() throws ConnectionException {
return getPrimaryKS().getPartitioner();
}
@Override
public String describePartitioner() throws ConnectionException {
return getPrimaryKS().describePartitioner();
}
@Override
public List<TokenRange> describeRing() throws ConnectionException {
return getPrimaryKS().describeRing();
}
@Override
public List<TokenRange> describeRing(String dc) throws ConnectionException {
return getPrimaryKS().describeRing(dc);
}
@Override
public List<TokenRange> describeRing(String dc, String rack) throws ConnectionException {
return getPrimaryKS().describeRing(dc, rack);
}
@Override
public List<TokenRange> describeRing(boolean cached) throws ConnectionException {
return getPrimaryKS().describeRing(cached);
}
@Override
public KeyspaceDefinition describeKeyspace() throws ConnectionException {
return getPrimaryKS().describeKeyspace();
}
@Override
public Properties getKeyspaceProperties() throws ConnectionException {
return getPrimaryKS().getKeyspaceProperties();
}
@Override
public Properties getColumnFamilyProperties(String columnFamily) throws ConnectionException {
return getPrimaryKS().getColumnFamilyProperties(columnFamily);
}
@Override
public SerializerPackage getSerializerPackage(String cfName, boolean ignoreErrors) throws ConnectionException, UnknownComparatorException {
return getPrimaryKS().getSerializerPackage(cfName, ignoreErrors);
}
@Override
public MutationBatch prepareMutationBatch() {
if (dualWritesEnabled.get()) {
KeyspacePair pair = ksPair.get();
return new DualWritesMutationBatch(
pair.getDualKSMetadata(),
pair.getPrimaryKS().prepareMutationBatch(),
pair.getSecondaryKS().prepareMutationBatch(),
executionStrategy);
} else {
return getPrimaryKS().prepareMutationBatch();
}
}
@Override
public <K, C> ColumnFamilyQuery<K, C> prepareQuery(ColumnFamily<K, C> cf) {
return getPrimaryKS().prepareQuery(cf);
}
@Override
public <K, C> ColumnMutation prepareColumnMutation(ColumnFamily<K, C> columnFamily, K rowKey, C column) {
KeyspacePair pair = ksPair.get();
if (dualWritesEnabled.get()) {
WriteMetadata md = new WriteMetadata(pair.getDualKSMetadata(), columnFamily.getName(), rowKey.toString());
return new DualWritesColumnMutation(md,
pair.getPrimaryKS() .prepareColumnMutation(columnFamily, rowKey, column),
pair.getSecondaryKS().prepareColumnMutation(columnFamily, rowKey, column),
executionStrategy);
} else {
return pair.getPrimaryKS().prepareColumnMutation(columnFamily, rowKey, column);
}
}
@Override
public <K, C> OperationResult<Void> truncateColumnFamily(final ColumnFamily<K, C> columnFamily) throws OperationException, ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<Void>() {
@Override
public OperationResult<Void> exec(Keyspace ks) throws ConnectionException {
return ks.truncateColumnFamily(columnFamily);
}
});
}
@Override
public OperationResult<Void> truncateColumnFamily(final String columnFamily) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<Void>() {
@Override
public OperationResult<Void> exec(Keyspace ks) throws ConnectionException {
return ks.truncateColumnFamily(columnFamily);
}
});
}
@Override
public OperationResult<Void> testOperation(final Operation<?, ?> operation) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<Void>() {
@Override
public OperationResult<Void> exec(Keyspace ks) throws ConnectionException {
return ks.testOperation(operation);
}
});
}
@Override
public OperationResult<Void> testOperation(final Operation<?, ?> operation, RetryPolicy retry) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<Void>() {
@Override
public OperationResult<Void> exec(Keyspace ks) throws ConnectionException {
return ks.testOperation(operation);
}
});
}
@Override
public <K, C> OperationResult<SchemaChangeResult> createColumnFamily(final ColumnFamily<K, C> columnFamily, final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createColumnFamily(columnFamily, options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Properties props) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createColumnFamily(props);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createColumnFamily(options);
}
});
}
@Override
public <K, C> OperationResult<SchemaChangeResult> updateColumnFamily(final ColumnFamily<K, C> columnFamily, final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.updateColumnFamily(columnFamily, options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Properties props) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.updateColumnFamily(props);
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.updateColumnFamily(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> dropColumnFamily(final String columnFamilyName) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.dropColumnFamily(columnFamilyName);
}
});
}
@Override
public <K, C> OperationResult<SchemaChangeResult> dropColumnFamily(final ColumnFamily<K, C> columnFamily) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.dropColumnFamily(columnFamily);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspace(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspaceIfNotExists(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Properties properties) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspace(properties);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Properties properties) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspaceIfNotExists(properties);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Map<String, Object> options, final Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspace(options, cfs);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Map<String, Object> options, final Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.createKeyspaceIfNotExists(options, cfs);
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Map<String, Object> options) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.updateKeyspace(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Properties props) throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.updateKeyspace(props);
}
});
}
@Override
public OperationResult<SchemaChangeResult> dropKeyspace() throws ConnectionException {
return execDualKeyspaceOperation(new KeyspaceOperation<SchemaChangeResult>() {
@Override
public OperationResult<SchemaChangeResult> exec(Keyspace ks) throws ConnectionException {
return ks.dropKeyspace();
}
});
}
@Override
public Map<String, List<String>> describeSchemaVersions() throws ConnectionException {
return getPrimaryKS().describeSchemaVersions();
}
@Override
public CqlStatement prepareCqlStatement() {
KeyspacePair pair = ksPair.get();
CqlStatement primaryStmt = pair.getPrimaryKS().prepareCqlStatement();
CqlStatement secondaryStmt = pair.getSecondaryKS().prepareCqlStatement();
return new DualWritesCqlStatement(primaryStmt, secondaryStmt, executionStrategy, pair.getDualKSMetadata());
}
@Override
public ConnectionPool<?> getConnectionPool() throws ConnectionException {
return getPrimaryKS().getConnectionPool();
}
private class KeyspacePair {
private final DualKeyspaceMetadata dualKeyspaceMetadata;
private final Keyspace ksPrimary;
private final Keyspace ksSecondary;
private KeyspacePair(final DualKeyspaceMetadata dualKeyspaceSetup, final Keyspace pKS, final Keyspace sKS) {
dualKeyspaceMetadata = dualKeyspaceSetup;
ksPrimary = pKS;
ksSecondary = sKS;
}
private Keyspace getPrimaryKS() {
return ksPrimary;
}
private Keyspace getSecondaryKS() {
return ksSecondary;
}
private DualKeyspaceMetadata getDualKSMetadata() {
return dualKeyspaceMetadata;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((ksPrimary == null) ? 0 : ksPrimary.hashCode());
result = prime * result + ((ksSecondary == null) ? 0 : ksSecondary.hashCode());
result = prime * result + ((dualKeyspaceMetadata == null) ? 0 : dualKeyspaceMetadata.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
KeyspacePair other = (KeyspacePair) obj;
boolean equals = true;
equals &= (ksPrimary == null) ? (other.ksPrimary == null) : (ksPrimary.equals(other.ksPrimary));
equals &= (ksSecondary == null) ? (other.ksSecondary == null) : (ksSecondary.equals(other.ksSecondary));
equals &= (dualKeyspaceMetadata == null) ? (other.dualKeyspaceMetadata == null) : (dualKeyspaceMetadata.equals(other.dualKeyspaceMetadata));
return equals;
}
}
@Override
public void dualWritesEnabled() {
Logger.info("ENABLING dual writes for dual keyspace setup: " + ksPair.get().getDualKSMetadata());
dualWritesEnabled.set(true);
}
@Override
public void dualWritesDisabled() {
Logger.info("DISABLING dual writes for dual keyspace setup: " + ksPair.get().getDualKSMetadata());
dualWritesEnabled.set(false);
}
@Override
public void flipPrimaryAndSecondary() {
// Check that the expected state is actually reverse of what the destination state should be
KeyspacePair currentPair = ksPair.get();
DualKeyspaceMetadata currentKeyspaceSetup = currentPair.getDualKSMetadata();
DualKeyspaceMetadata newDualKeyspaceSetup =
new DualKeyspaceMetadata(currentKeyspaceSetup.getSecondaryCluster(), currentKeyspaceSetup.getSecondaryKeyspaceName(),
currentKeyspaceSetup.getPrimaryCluster(), currentKeyspaceSetup.getPrimaryKeyspaceName());
KeyspacePair newPair =
new KeyspacePair(newDualKeyspaceSetup, currentPair.getSecondaryKS(), currentPair.getPrimaryKS());
boolean success = ksPair.compareAndSet(currentPair, newPair);
if (success) {
Logger.info("Successfully flipped to new dual keyspace setup" + ksPair.get().getDualKSMetadata());
} else {
Logger.info("Could not flip keyspace pair: " + currentPair + " to new pair: " + newPair);
}
}
private abstract class SimpleSyncExec<R> implements Execution<R> {
@Override
public ListenableFuture<OperationResult<R>> executeAsync() throws ConnectionException {
throw new RuntimeException("executeAsync not implemented for SimpleSyncExec");
}
}
private interface KeyspaceOperation<R> {
OperationResult<R> exec(Keyspace ks) throws ConnectionException;
}
private <R> OperationResult<R> execDualKeyspaceOperation(final KeyspaceOperation<R> ksOperation) throws ConnectionException {
final KeyspacePair pair = ksPair.get();
final Execution<R> exec1 = new SimpleSyncExec<R>() {
@Override
public OperationResult<R> execute() throws ConnectionException {
return ksOperation.exec(pair.getPrimaryKS());
}
};
final Execution<R> exec2 = new SimpleSyncExec<R>() {
@Override
public OperationResult<R> execute() throws ConnectionException {
return ksOperation.exec(pair.getSecondaryKS());
}
};
WriteMetadata writeMd = new WriteMetadata(pair.getDualKSMetadata(), null, null);
return executionStrategy.wrapExecutions(exec1, exec2, Collections.singletonList(writeMd)).execute();
}
}
| 7,963 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/BestEffortSecondaryWriteStrategy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.Collection;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
/**
* Impl of {@link DualWritesStrategy} that writes SEQUENTIALLY to the 2 keyspaces.
* first the primary and then the secondary.
*
* If the 1st keyspace write fails, then the failure is propagated immediately to the caller.
* If the 1st one succeeds then the 2nd keyspace is tried as best effort. If the write to the 2nd keyspace fails,
* then that failed write is given to the provided {@link FailedWritesLogger} which can then decide what to do with it.
*
* @author poberai
*
*/
public class BestEffortSecondaryWriteStrategy implements DualWritesStrategy {
private final FailedWritesLogger failedWritesLogger;
public BestEffortSecondaryWriteStrategy(FailedWritesLogger logger) {
this.failedWritesLogger = logger;
}
@Override
public <R> Execution<R> wrapExecutions(final Execution<R> primary, final Execution<R> secondary, final Collection<WriteMetadata> writeMetadata) {
return new Execution<R>() {
@Override
public OperationResult<R> execute() throws ConnectionException {
OperationResult<R> result = primary.execute();
try {
secondary.execute();
} catch (ConnectionException e) {
if (failedWritesLogger != null) {
for (WriteMetadata writeMD : writeMetadata) {
failedWritesLogger.logFailedWrite(writeMD);
}
}
}
return result;
}
@Override
public ListenableFuture<OperationResult<R>> executeAsync() throws ConnectionException {
throw new RuntimeException("Cannot chain async primary and secondary executions");
}
};
}
@Override
public FailedWritesLogger getFailedWritesLogger() {
return failedWritesLogger;
}
}
| 7,964 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesColumnMutation.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.UUID;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Class that implements the {@link ColumnMutation} interface and acts as a dual router for capturing all the dual writes.
* Note that it purely maintains state in 2 separate ColumnMutation objects, each corresponding to the source of destination keyspace
*
* @author poberai
*
* @param <C>
*/
public class DualWritesColumnMutation implements ColumnMutation {
private final ColumnMutation primary;
private final ColumnMutation secondary;
private final DualWritesStrategy executionStrategy;
private final Collection<WriteMetadata> writeMetadata;
public DualWritesColumnMutation(WriteMetadata writeMD, ColumnMutation primaryClm, ColumnMutation secondaryClm, DualWritesStrategy execStrategy) {
writeMetadata = Collections.singletonList(writeMD);
primary = primaryClm;
secondary = secondaryClm;
executionStrategy = execStrategy;
}
@Override
public ColumnMutation setConsistencyLevel(ConsistencyLevel consistencyLevel) {
primary.setConsistencyLevel(consistencyLevel);
secondary.setConsistencyLevel(consistencyLevel);
return this;
}
@Override
public ColumnMutation withRetryPolicy(RetryPolicy retry) {
primary.withRetryPolicy(retry);
secondary.withRetryPolicy(retry);
return this;
}
@Override
public ColumnMutation withTimestamp(long timestamp) {
primary.withTimestamp(timestamp);
secondary.withTimestamp(timestamp);
return this;
}
@Override
public Execution<Void> putValue(String value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(byte[] value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(byte value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(short value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(int value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(long value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(boolean value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(ByteBuffer value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(Date value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(float value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(double value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putValue(UUID value, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, ttl);
Execution<Void> ex2 = secondary.putValue(value, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public <T> Execution<Void> putValue(T value, Serializer<T> serializer, Integer ttl) {
Execution<Void> ex1 = primary.putValue(value, serializer, ttl);
Execution<Void> ex2 = secondary.putValue(value, serializer, ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> putEmptyColumn(Integer ttl) {
Execution<Void> ex1 = primary.putEmptyColumn(ttl);
Execution<Void> ex2 = secondary.putEmptyColumn(ttl);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> incrementCounterColumn(long amount) {
Execution<Void> ex1 = primary.incrementCounterColumn(amount);
Execution<Void> ex2 = secondary.incrementCounterColumn(amount);
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> deleteColumn() {
Execution<Void> ex1 = primary.deleteColumn();
Execution<Void> ex2 = secondary.deleteColumn();
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
@Override
public Execution<Void> deleteCounterColumn() {
Execution<Void> ex1 = primary.deleteCounterColumn();
Execution<Void> ex2 = secondary.deleteCounterColumn();
return executionStrategy.wrapExecutions(ex1, ex2, writeMetadata);
}
}
| 7,965 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesStrategy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.Collection;
import com.netflix.astyanax.Execution;
/**
* Interface for dealing with 2 separate executions one is the primary, the other is the secondary.
* There are several possible strategies
*
* 1. BEST EFFORT - Do first, fail everything if it fails. The try second and just log it if it fails.
* 2. FAIL ON ALL - try both and fail if any of them FAIL.
* 3. BEST EFFORT ASYNC - similar to the 1st but try the 2nd write in a separate thread and do not block the caller.
* 4. PARALLEL WRITES - similar to 2. but try both in separate threads, so that we don't pay for the penalty of dual writes latency as in the SEQUENTIAL method.
*
* @author poberai
*
*/
public interface DualWritesStrategy {
/**
*
* @param primary
* @param secondary
* @param writeMetadata
* @return
*/
public <R> Execution<R> wrapExecutions(Execution<R> primary, Execution<R> secondary, Collection<WriteMetadata> writeMetadata);
/**
*
* @return FailedWritesLogger
*/
public FailedWritesLogger getFailedWritesLogger();
}
| 7,966 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesUpdateListener.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
/**
* Notification based listener that gets update from some controller when changing the behavior of dual writes.
* This is what folks will need to implement to be able to react to the dual writes migration process.
* @see {@link DualWritesKeyspace}
*
* @author poberai
*
*/
public interface DualWritesUpdateListener {
/**
* Start dual writes
*/
public void dualWritesEnabled();
/**
* Stop dual writes
*/
public void dualWritesDisabled();
/**
* Flip roles of primary and secondary keyspaces.
*
* @param newDualKeyspaceSetup
*/
public void flipPrimaryAndSecondary();
}
| 7,967 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesDemo.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
public class DualWritesDemo {
final String cluster1 = "cass_dualwrites_source";
final String ks1 = "dualwritessrc";
final String seed1 = "";
final String cluster2 = "cass_dualwrites_dest";
final String ks2 = "dualwritessrc";
final String seed2 = "";
final ColumnFamily<Integer, Long> CF_DUAL_WRITES =
ColumnFamily.newColumnFamily("foobar", IntegerSerializer.get(), LongSerializer.get(), StringSerializer.get());
AstyanaxContext<Keyspace> ctx1;
Keyspace keyspace1;
AstyanaxContext<Keyspace> ctx2;
Keyspace keyspace2;
DualWritesKeyspace dualKeyspace = null;
FailedWritesLogger logger = new LogBasedFailedWritesLogger();
BestEffortSecondaryWriteStrategy execStrategy = new BestEffortSecondaryWriteStrategy(logger);
public DualWritesDemo() {
ctx1 = getKeyspaceContext(ks1, seed1);
keyspace1 = ctx1.getClient();
ctx2 = getKeyspaceContext(ks2, seed2);
keyspace2 = ctx2.getClient();
}
public void run() throws Exception {
try {
ctx1 = getKeyspaceContext(ks1, seed1);
keyspace1 = ctx1.getClient();
ctx1.start();
ctx2 = getKeyspaceContext(ks2, seed2);
keyspace2 = ctx2.getClient();
ctx2.start();
Thread.sleep(100);
logger.init();
DualKeyspaceMetadata dualKeyspaceSetup = new DualKeyspaceMetadata(cluster1, ks1, cluster2, ks2);
dualKeyspace = new DualWritesKeyspace(dualKeyspaceSetup, keyspace1, keyspace2, execStrategy);
addRowToKS(dualKeyspace, 1, 0, 10);
verifyPresent(keyspace1, 1);
verifyNotPresent(keyspace2, 1);
dualKeyspace.dualWritesEnabled();
addRowToKS(dualKeyspace, 2, 0, 10);
verifyPresent(keyspace1, 2);
verifyPresent(keyspace2, 2);
dualKeyspace.dualWritesDisabled();
addRowToKS(dualKeyspace, 3, 0, 10);
verifyPresent(keyspace1, 3);
verifyNotPresent(keyspace2, 3);
dualKeyspace.flipPrimaryAndSecondary();
addRowToKS(dualKeyspace, 4, 0, 10);
verifyNotPresent(keyspace1, 4);
verifyPresent(keyspace2, 4);
dualKeyspace.dualWritesEnabled();
addRowToKS(dualKeyspace, 5, 0, 10);
verifyPresent(keyspace1, 5);
verifyPresent(keyspace2, 5);
dualKeyspace.dualWritesDisabled();
addRowToKS(dualKeyspace, 6, 0, 10);
verifyNotPresent(keyspace1, 6);
verifyPresent(keyspace2, 6);
dualKeyspace.flipPrimaryAndSecondary();
addRowToKS(dualKeyspace, 7, 0, 10);
verifyPresent(keyspace1, 7);
verifyNotPresent(keyspace2, 7);
dualKeyspace.dualWritesEnabled();
addRowToKS(dualKeyspace, 8, 0, 10);
verifyPresent(keyspace1, 8);
verifyPresent(keyspace2, 8);
deleteRowFromKS(dualKeyspace, 1,2,3,4,5,6,7,8);
verifyNotPresent(keyspace1, 1,2,3,4,5,6,7,8);
verifyNotPresent(keyspace2, 1,2,3,4,5,6,7,8);
} finally {
if (ctx1 != null) {
ctx1.shutdown();
}
if (ctx2 != null) {
ctx2.shutdown();
}
}
}
private void addRowToKS(Keyspace ks, int rowKey, int start, int end) throws ConnectionException {
MutationBatch mb = ks.prepareMutationBatch();
for (long i=start; i<end; i++) {
mb.withRow(CF_DUAL_WRITES, rowKey).putColumn(i, "foo");
}
mb.execute();
}
private void deleteRowFromKS(Keyspace ks, int ... rowKeys) throws ConnectionException {
MutationBatch mb = ks.prepareMutationBatch();
for (int rowKey : rowKeys) {
mb.withRow(CF_DUAL_WRITES, rowKey).delete();
}
mb.execute();
}
private AstyanaxContext<Keyspace> getKeyspaceContext(final String ks, final String seedHost) {
AstyanaxContext<Keyspace> ctx =
new AstyanaxContext.Builder()
.forKeyspace(ks)
.withConnectionPoolConfiguration(
new ConnectionPoolConfigurationImpl("myCPConfig-" + ks)
.setSeeds(seedHost)
.setPort(7102))
.withAstyanaxConfiguration(
new AstyanaxConfigurationImpl()
.setDefaultReadConsistencyLevel(ConsistencyLevel.CL_LOCAL_QUORUM)
.setConnectionPoolType(ConnectionPoolType.TOKEN_AWARE)
.setDiscoveryType(NodeDiscoveryType.RING_DESCRIBE))
.buildKeyspace(ThriftFamilyFactory.getInstance());
return ctx;
}
private void verifyPresent(Keyspace ks, int rowKey) throws ConnectionException {
ColumnList<Long> result = ks.prepareQuery(CF_DUAL_WRITES).getRow(rowKey).execute().getResult();
if (result.isEmpty()) {
throw new RuntimeException("Row: " + rowKey + " missing from keysapce: " + ks.getKeyspaceName());
} else {
System.out.println("Verified Row: " + rowKey + " present in ks: " + ks.getKeyspaceName());
}
}
private void verifyNotPresent(Keyspace ks, int rowKey) throws ConnectionException {
ColumnList<Long> result = ks.prepareQuery(CF_DUAL_WRITES).getRow(rowKey).execute().getResult();
if (!result.isEmpty()) {
throw new RuntimeException("Row: " + rowKey + " present in keysapce: " + ks.getKeyspaceName());
} else {
System.out.println("Verified Row: " + rowKey + " NOT present in ks: " + ks.getKeyspaceName());
}
}
private void verifyNotPresent(Keyspace ks, int ... rowKeys) throws ConnectionException {
for (int rowKey : rowKeys) {
verifyNotPresent(ks, rowKey);
}
}
public static void main(String[] args) {
try {
new DualWritesDemo().run();
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 7,968 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/AsyncFailedWritesLogger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class that acts as an async logger that helps 'unblock' the caller immediately.
* It hands off to a {@link LinkedBlockingQueue} and there is a thread on the other end
* that polls the queue for tasks which are then actually written using a real logger
* provided to this class.
*
* It leverages the decorator pattern to do this.
*
* @author poberai
*
*/
public class AsyncFailedWritesLogger implements FailedWritesLogger {
private static final Logger Logger = LoggerFactory.getLogger(AsyncFailedWritesLogger.class);
private static final int DEFAULT_QUEUE_SIZE = 1000;
private final FailedWritesLogger actualWriter;
private ExecutorService threadPool;
private final LinkedBlockingQueue<WriteMetadata> taskQueue;
private final AtomicBoolean stop = new AtomicBoolean(false);
public AsyncFailedWritesLogger(FailedWritesLogger writer) {
this(writer, DEFAULT_QUEUE_SIZE);
}
public AsyncFailedWritesLogger(FailedWritesLogger writer, int queueSize) {
this.actualWriter = writer;
this.taskQueue = new LinkedBlockingQueue<WriteMetadata>(queueSize);
}
@Override
public void logFailedWrite(WriteMetadata failedWrite) {
boolean success = taskQueue.offer(failedWrite);
if (!success) {
Logger.error("Async failed writes logger is backed up and is dropping failed writes " + failedWrite);
}
}
@Override
public void init() {
if (stop.get()) {
Logger.error("Will not start async logger, already stopped");
return;
}
if (threadPool == null) {
threadPool = Executors.newScheduledThreadPool(1);
}
threadPool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Logger.info("Async failed writes logger starting..");
while (!stop.get() && !Thread.currentThread().isInterrupted()) {
try {
WriteMetadata writeMD = taskQueue.take(); // this is a blocking call
try {
actualWriter.logFailedWrite(writeMD);
} catch (Exception e) {
Logger.error("Failed to log failed write asynchronously", e);
}
} catch(InterruptedException e) {
// stop blocking on the queue and exit
stop.set(true);
}
}
Logger.info("Async failed writes logger exiting..");
return null;
}
});
}
@Override
public void shutdown() {
stop.set(true);
if (threadPool != null) {
threadPool.shutdownNow();
}
}
}
| 7,969 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualWritesColumnListMutation.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ColumnPath;
/**
* Class that implements the {@link ColumnListMutation} interface and acts as a dual router for capturing all the dual writes.
* Note that it purely maintains state in 2 separate ColumnListMutation objects, each corresponding to the source of destination keyspace / mutation batches
*
* @author poberai
*
* @param <C>
*/
@SuppressWarnings("deprecation")
public class DualWritesColumnListMutation<C> implements ColumnListMutation<C> {
private final ColumnListMutation<C> primary;
private final ColumnListMutation<C> secondary;
public DualWritesColumnListMutation(ColumnListMutation<C> primaryClm, ColumnListMutation<C> secondaryClm) {
primary = primaryClm;
secondary = secondaryClm;
}
@Override
public <V> ColumnListMutation<C> putColumn(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
primary.putColumn(columnName, value, valueSerializer, ttl);
secondary.putColumn(columnName, value, valueSerializer, ttl);
return this;
}
@Override
public <V> ColumnListMutation<C> putColumnIfNotNull(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, valueSerializer, ttl);
secondary.putColumnIfNotNull(columnName, value, valueSerializer, ttl);
return this;
}
@Override
public <SC> ColumnListMutation<SC> withSuperColumn(ColumnPath<SC> superColumnPath) {
throw new RuntimeException("Not Implemented");
}
@Override
public ColumnListMutation<C> putColumn(C columnName, String value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, String value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, String value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, String value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value, Integer ttl) {
primary.putCompressedColumn(columnName, value, ttl);
secondary.putCompressedColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value) {
primary.putCompressedColumn(columnName, value);
secondary.putCompressedColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value, Integer ttl) {
primary.putCompressedColumnIfNotNull(columnName, value, ttl);
secondary.putCompressedColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value) {
primary.putCompressedColumnIfNotNull(columnName, value);
secondary.putCompressedColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte[] value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte[] value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, byte[] value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, byte[] value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Byte value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Byte value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, short value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, short value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Short value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Short value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, int value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, int value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Integer value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Integer value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, long value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, long value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Long value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Long value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, boolean value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, boolean value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Boolean value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Boolean value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, ByteBuffer value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, ByteBuffer value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, ByteBuffer value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, ByteBuffer value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, Date value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, Date value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Date value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Date value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, float value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, float value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Float value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Float value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, double value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, double value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Double value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Double value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, UUID value, Integer ttl) {
primary.putColumn(columnName, value, ttl);
secondary.putColumn(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, UUID value) {
primary.putColumn(columnName, value);
secondary.putColumn(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, UUID value, Integer ttl) {
primary.putColumnIfNotNull(columnName, value, ttl);
secondary.putColumnIfNotNull(columnName, value, ttl);
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, UUID value) {
primary.putColumnIfNotNull(columnName, value);
secondary.putColumnIfNotNull(columnName, value);
return this;
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName, Integer ttl) {
primary.putEmptyColumn(columnName, ttl);
secondary.putEmptyColumn(columnName, ttl);
return this;
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName) {
primary.putEmptyColumn(columnName);
secondary.putEmptyColumn(columnName);
return this;
}
@Override
public ColumnListMutation<C> incrementCounterColumn(C columnName, long amount) {
primary.incrementCounterColumn(columnName, amount);
secondary.incrementCounterColumn(columnName, amount);
return this;
}
@Override
public ColumnListMutation<C> deleteColumn(C columnName) {
primary.deleteColumn(columnName);
secondary.deleteColumn(columnName);
return this;
}
@Override
public ColumnListMutation<C> setTimestamp(long timestamp) {
primary.setTimestamp(timestamp);
secondary.setTimestamp(timestamp);
return this;
}
@Override
public ColumnListMutation<C> delete() {
primary.delete();
secondary.delete();
return this;
}
@Override
public ColumnListMutation<C> setDefaultTtl(Integer ttl) {
primary.setDefaultTtl(ttl);
secondary.setDefaultTtl(ttl);
return this;
}
}
| 7,970 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/WriteMetadata.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
/**
* Simple class encapsulating metadata about a failed write.
* It represents the source and destination cluster / keyspace along with the CF and the row key.
*
* @author poberai
*
*/
public class WriteMetadata {
private final DualKeyspaceMetadata dualKeyspaceMetadata;
private final String cfName;
private final String rowKey;
private final Long uuid;
public WriteMetadata(DualKeyspaceMetadata keyspaceMetadata,
String cfName, String rowKey) {
this.dualKeyspaceMetadata = keyspaceMetadata;
this.rowKey = rowKey;
this.cfName = cfName;
this.uuid = System.currentTimeMillis();
}
public String getPrimaryCluster() {
return dualKeyspaceMetadata.getPrimaryCluster();
}
public String getSecondaryCluster() {
return dualKeyspaceMetadata.getSecondaryCluster();
}
public String getPrimaryKeyspace() {
return dualKeyspaceMetadata.getPrimaryKeyspaceName();
}
public String getSecondaryKeyspace() {
return dualKeyspaceMetadata.getSecondaryKeyspaceName();
}
public String getCFName() {
return cfName;
}
public String getRowKey() {
return rowKey;
}
public Long getUuid() {
return uuid;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + dualKeyspaceMetadata.hashCode();
result = prime * result + ((cfName == null) ? 0 : cfName.hashCode());
result = prime * result + ((rowKey == null) ? 0 : rowKey.hashCode());
result = prime * result + ((uuid == null) ? 0 : uuid.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
WriteMetadata other = (WriteMetadata) obj;
boolean equals = true;
equals &= dualKeyspaceMetadata != null ? dualKeyspaceMetadata.equals(other.dualKeyspaceMetadata) : other.dualKeyspaceMetadata == null;
equals &= cfName != null ? cfName.equals(other.cfName) : other.cfName == null;
equals &= rowKey != null ? rowKey.equals(other.rowKey) : other.rowKey == null;
equals &= uuid != null ? uuid.equals(other.uuid) : other.uuid == null;
return equals;
}
@Override
public String toString() {
return "FailedWriteMetadata [" + dualKeyspaceMetadata +
", cfName=" + cfName + ", rowKey=" + rowKey + ", uuid=" + uuid + "]";
}
} | 7,971 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/DualKeyspaceMetadata.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
/**
* Simple class representing the keyspace pair i.e Source Keyspace --> Destination Keyspace
*
* @author poberai
*
*/
public class DualKeyspaceMetadata {
private final String primaryCluster;
private final String primaryKeyspaceName;
private final String secondaryCluster;
private final String secondaryKeyspaceName;
public DualKeyspaceMetadata(String primaryCluster, String primaryKeyspaceName, String secondaryCluster, String secondaryKeyspaceName) {
if (primaryCluster == null || primaryKeyspaceName == null) {
throw new RuntimeException("primaryCluster and primaryKeyspaceName cannot be NULL");
}
if (secondaryCluster == null || secondaryKeyspaceName == null) {
throw new RuntimeException("secondaryCluster and secondaryKeyspaceName cannot be NULL");
}
this.primaryCluster = primaryCluster;
this.primaryKeyspaceName = primaryKeyspaceName;
this.secondaryCluster = secondaryCluster;
this.secondaryKeyspaceName = secondaryKeyspaceName;
}
public String getPrimaryCluster() {
return primaryCluster;
}
public String getPrimaryKeyspaceName() {
return primaryKeyspaceName;
}
public String getSecondaryCluster() {
return secondaryCluster;
}
public String getSecondaryKeyspaceName() {
return secondaryKeyspaceName;
}
public boolean isReverse(DualKeyspaceMetadata newDualKeyspaceSetup) {
return (!this.equals(newDualKeyspaceSetup) &&
this.primaryCluster.equals(newDualKeyspaceSetup.getSecondaryCluster()) &&
this.secondaryCluster.equals(newDualKeyspaceSetup.getPrimaryCluster()));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((primaryCluster == null) ? 0 : primaryCluster.hashCode());
result = prime * result + ((primaryKeyspaceName == null) ? 0 : primaryKeyspaceName.hashCode());
result = prime * result + ((secondaryCluster == null) ? 0 : secondaryCluster.hashCode());
result = prime * result + ((secondaryKeyspaceName == null) ? 0 : secondaryKeyspaceName.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
DualKeyspaceMetadata other = (DualKeyspaceMetadata) obj;
boolean equals = true;
equals &= primaryCluster != null ? primaryCluster.equals(other.primaryCluster) : other.primaryCluster == null;
equals &= primaryKeyspaceName != null ? primaryKeyspaceName.equals(other.primaryKeyspaceName) : other.primaryKeyspaceName == null;
equals &= secondaryCluster != null ? secondaryCluster.equals(other.secondaryCluster) : other.secondaryCluster == null;
equals &= secondaryKeyspaceName != null ? secondaryKeyspaceName.equals(other.secondaryKeyspaceName) : other.secondaryKeyspaceName == null;
return equals;
}
@Override
public String toString() {
return "DualKeyspaceMetadata [primaryCluster=" + primaryCluster + ", primaryKeyspaceName=" + primaryKeyspaceName
+ ", secondaryCluster=" + secondaryCluster + ", secondaryKeyspaceName=" + secondaryKeyspaceName + "]";
}
}
| 7,972 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/LogBasedFailedWritesLogger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple impl for {@link FailedWritesLogger} that just logs to the file.
*
* @author poberai
*
*/
public class LogBasedFailedWritesLogger implements FailedWritesLogger {
private static final Logger Logger = LoggerFactory.getLogger(LogBasedFailedWritesLogger.class);
private final AtomicBoolean stop = new AtomicBoolean(false);
@Override
public void init() {
Logger.info("-------LOGGER INIT------");
stop.set(false);
}
@Override
public void logFailedWrite(WriteMetadata failedWrite) {
if (!stop.get()) {
Logger.info("FAILED WRITE: " + failedWrite.toString());
}
}
@Override
public void shutdown() {
stop.set(true);
Logger.info("-------LOGGER SHUTDOWN------");
}
}
| 7,973 |
0 | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib | Create_ds/astyanax/astyanax-contrib/src/main/java/com/netflix/astyanax/contrib/dualwrites/FailedWritesLogger.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.contrib.dualwrites;
/**
* Interface for dealing with failed writes.
*
* @author poberai
*
*/
public interface FailedWritesLogger {
/**
* Init resources (if any required)
*/
public void init();
/**
* Log metadata for a failed write
* @param failedWrite
*/
public void logFailedWrite(WriteMetadata failedWrite);
/**
* Clean up any resources allocated
*/
public void shutdown();
}
| 7,974 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractOperationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import org.apache.cassandra.thrift.Cassandra;
import com.netflix.astyanax.CassandraOperationTracer;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.Operation;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
public abstract class AbstractOperationImpl<R> implements Operation<Cassandra.Client, R> {
private final CassandraOperationTracer tracer;
private Host pinnedHost;
public AbstractOperationImpl(CassandraOperationTracer tracer, Host host) {
this.tracer = tracer;
this.pinnedHost = host;
}
public AbstractOperationImpl(CassandraOperationTracer tracer) {
this.tracer = tracer;
this.pinnedHost = null;
}
public void setPinnedHost(Host host) {
this.pinnedHost = host;
}
@Override
public ByteBuffer getRowKey() {
return null;
}
@Override
public String getKeyspace() {
return null;
}
@Override
public R execute(Cassandra.Client client, ConnectionContext state) throws ConnectionException {
try {
tracer.start();
R result = internalExecute(client, state);
tracer.success();
return result;
}
catch (Exception e) {
ConnectionException ce = ThriftConverter.ToConnectionPoolException(e);
tracer.failure(ce);
throw ce;
}
}
@Override
public Host getPinnedHost() {
return pinnedHost;
}
protected abstract R internalExecute(Cassandra.Client client, ConnectionContext state) throws Exception;
}
| 7,975 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractThriftMutationBatchImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.io.ByteArrayInputStream;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.cassandra.thrift.Cassandra.batch_mutate_args;
import org.apache.cassandra.thrift.Mutation;
import org.apache.commons.codec.binary.Hex;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TIOStreamTransport;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Maps.EntryTransformer;
import com.netflix.astyanax.Clock;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.WriteAheadLog;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.ByteBufferOutputStream;
/**
* Basic implementation of a mutation batch using the thrift data structures.
* The thrift mutation data structure is,
*
* Map of Keys -> Map of ColumnFamily -> MutationList
*
* @NotThreadSafe
* Note that this class is intended to be used by just one thread. It maintains all the state of the mixed mutations in maps which
* are meant to be used by a single thread only.
*
* @author elandau
*
*/
public abstract class AbstractThriftMutationBatchImpl implements MutationBatch {
private static final long UNSET_TIMESTAMP = -1;
protected long timestamp;
private ConsistencyLevel consistencyLevel;
private Clock clock;
private Host pinnedHost;
private RetryPolicy retry;
private WriteAheadLog wal;
private boolean useAtomicBatch = false;
private Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = Maps.newLinkedHashMap();
private Map<KeyAndColumnFamily, ColumnListMutation<?>> rowLookup = Maps.newHashMap();
private static class KeyAndColumnFamily {
private final String columnFamily;
private final ByteBuffer key;
public KeyAndColumnFamily(String columnFamily, ByteBuffer key) {
this.columnFamily = columnFamily;
this.key = key;
}
public int compareTo(Object obj) {
if (obj instanceof KeyAndColumnFamily) {
KeyAndColumnFamily other = (KeyAndColumnFamily)obj;
int result = columnFamily.compareTo(other.columnFamily);
if (result == 0) {
result = key.compareTo(other.key);
}
return result;
}
return -1;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode());
result = prime * result + ((key == null) ? 0 : key.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
KeyAndColumnFamily other = (KeyAndColumnFamily) obj;
if (columnFamily == null) {
if (other.columnFamily != null)
return false;
} else if (!columnFamily.equals(other.columnFamily))
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
return true;
}
}
public AbstractThriftMutationBatchImpl(Clock clock, ConsistencyLevel consistencyLevel, RetryPolicy retry) {
this.clock = clock;
this.timestamp = UNSET_TIMESTAMP;
this.consistencyLevel = consistencyLevel;
this.retry = retry;
}
@Override
public <K, C> ColumnListMutation<C> withRow(ColumnFamily<K, C> columnFamily, K rowKey) {
Preconditions.checkNotNull(columnFamily, "columnFamily cannot be null");
Preconditions.checkNotNull(rowKey, "Row key cannot be null");
// Upon adding the first row into the mutation get the latest time from the clock
if (timestamp == UNSET_TIMESTAMP)
timestamp = clock.getCurrentTime();
ByteBuffer bbKey = columnFamily.getKeySerializer().toByteBuffer(rowKey);
if (!bbKey.hasRemaining()) {
throw new RuntimeException("Row key cannot be empty");
}
KeyAndColumnFamily kacf = new KeyAndColumnFamily(columnFamily.getName(), bbKey);
ColumnListMutation<C> clm = (ColumnListMutation<C>) rowLookup.get(kacf);
if (clm == null) {
Map<String, List<Mutation>> innerMutationMap = mutationMap.get(bbKey);
if (innerMutationMap == null) {
innerMutationMap = Maps.newHashMap();
mutationMap.put(bbKey, innerMutationMap);
}
List<Mutation> innerMutationList = innerMutationMap.get(columnFamily.getName());
if (innerMutationList == null) {
innerMutationList = Lists.newArrayList();
innerMutationMap.put(columnFamily.getName(), innerMutationList);
}
clm = new ThriftColumnFamilyMutationImpl<C>(timestamp, innerMutationList, columnFamily.getColumnSerializer());
rowLookup.put(kacf, clm);
}
return clm;
}
@Override
public void discardMutations() {
this.timestamp = UNSET_TIMESTAMP;
this.mutationMap.clear();
this.rowLookup.clear();
}
@Override
public <K> void deleteRow(Iterable<? extends ColumnFamily<K, ?>> columnFamilies, K rowKey) {
for (ColumnFamily<K, ?> cf : columnFamilies) {
withRow(cf, rowKey).delete();
}
}
/**
* Checks whether the mutation object contains rows. While the map may
* contain row keys the row keys may not contain any mutations.
*
* @return
*/
@Override
public boolean isEmpty() {
return mutationMap.isEmpty();
}
/**
* Generate a string representation of the mutation with the following
* syntax Key1: cf1: Mutation count cf2: Mutation count Key2: cf1: Mutation
* count cf2: Mutation count
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("ThriftMutationBatch[");
boolean first = true;
for (Entry<ByteBuffer, Map<String, List<Mutation>>> row : mutationMap.entrySet()) {
if (!first)
sb.append(",");
sb.append(Hex.encodeHex(row.getKey().array())).append("(");
boolean first2 = true;
for (Entry<String, List<Mutation>> cf : row.getValue().entrySet()) {
if (!first2)
sb.append(",");
sb.append(cf.getKey()).append(":").append(cf.getValue().size());
first2 = false;
}
first = false;
sb.append(")");
}
sb.append("]");
return sb.toString();
}
@Override
public ByteBuffer serialize() throws Exception {
if (mutationMap.isEmpty()) {
throw new Exception("Mutation is empty");
}
ByteBufferOutputStream out = new ByteBufferOutputStream();
TIOStreamTransport transport = new TIOStreamTransport(out);
batch_mutate_args args = new batch_mutate_args();
args.setMutation_map(mutationMap);
try {
args.write(new TBinaryProtocol(transport));
}
catch (TException e) {
throw ThriftConverter.ToConnectionPoolException(e);
}
return out.getByteBuffer();
}
@Override
public void deserialize(ByteBuffer data) throws Exception {
ByteArrayInputStream in = new ByteArrayInputStream(data.array());
TIOStreamTransport transport = new TIOStreamTransport(in);
batch_mutate_args args = new batch_mutate_args();
try {
TBinaryProtocol bp = new TBinaryProtocol(transport);
//bp.setReadLength(data.remaining());
args.read(bp);
mutationMap = args.getMutation_map();
}
catch (TException e) {
throw ThriftConverter.ToConnectionPoolException(e);
}
}
@Override
public Map<ByteBuffer, Set<String>> getRowKeys() {
return Maps.transformEntries(mutationMap,
new EntryTransformer<ByteBuffer, Map<String, List<Mutation>>, Set<String>>() {
@Override
public Set<String> transformEntry(ByteBuffer key, Map<String, List<Mutation>> value) {
return value.keySet();
}
});
}
public Map<ByteBuffer, Map<String, List<Mutation>>> getMutationMap() {
return mutationMap;
}
public void mergeShallow(MutationBatch other) {
if (!(other instanceof AbstractThriftMutationBatchImpl)) {
throw new UnsupportedOperationException();
}
for (Map.Entry<ByteBuffer, Map<String, List<Mutation>>> otherRow : ((AbstractThriftMutationBatchImpl) other).mutationMap
.entrySet()) {
Map<String, List<Mutation>> thisRow = mutationMap.get(otherRow.getKey());
// Key not in the map
if (thisRow == null) {
mutationMap.put(otherRow.getKey(), otherRow.getValue());
}
else {
for (Map.Entry<String, List<Mutation>> otherCf : otherRow.getValue().entrySet()) {
List<Mutation> thisCf = thisRow.get(otherCf.getKey());
// Column family not in the map
if (thisCf == null) {
thisRow.put(otherCf.getKey(), otherCf.getValue());
}
else {
thisCf.addAll(otherCf.getValue());
}
}
}
}
}
@Override
public int getRowCount() {
return mutationMap.size();
}
@Override
public MutationBatch setTimeout(long timeout) {
return this;
}
@Override
public MutationBatch setTimestamp(long timestamp) {
return withTimestamp(timestamp);
}
@Override
public MutationBatch withTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public MutationBatch lockCurrentTimestamp() {
this.timestamp = clock.getCurrentTime();
return this;
}
@Override
public MutationBatch setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
@Override
public MutationBatch withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
public ConsistencyLevel getConsistencyLevel() {
return this.consistencyLevel;
}
@Override
public MutationBatch pinToHost(Host host) {
this.pinnedHost = host;
return this;
}
@Override
public MutationBatch withRetryPolicy(RetryPolicy retry) {
this.retry = retry;
return this;
}
@Override
public MutationBatch usingWriteAheadLog(WriteAheadLog manager) {
this.wal = manager;
return this;
}
@Override
public MutationBatch withAtomicBatch(boolean condition) {
useAtomicBatch = condition;
return this;
}
public boolean useAtomicBatch() {
return useAtomicBatch;
}
public Host getPinnedHost() {
return this.pinnedHost;
}
public RetryPolicy getRetryPolicy() {
return this.retry;
}
public WriteAheadLog getWriteAheadLog() {
return this.wal;
}
}
| 7,976 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCql3Factory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.query.CqlQuery;
public class ThriftCql3Factory implements ThriftCqlFactory {
@Override
public CqlStatement createCqlStatement(ThriftKeyspaceImpl keyspace) {
return new ThriftCql3Statement(keyspace);
}
@Override
public <K, C> CqlQuery<K, C> createCqlQuery(ThriftColumnFamilyQueryImpl<K, C> cfQuery, String cql) {
return new ThriftCql3Query<K,C>(cfQuery, cql);
}
}
| 7,977 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftAllRowsImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.KeySlice;
import org.apache.commons.lang.NotImplementedException;
import com.google.common.collect.Iterables;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.thrift.model.ThriftColumnOrSuperColumnListImpl;
import com.netflix.astyanax.thrift.model.ThriftRowImpl;
public class ThriftAllRowsImpl<K, C> implements Rows<K, C> {
private ColumnFamily<K, C> columnFamily;
private ThriftAllRowsQueryImpl<K, C> query;
private final Partitioner partitioner;
public ThriftAllRowsImpl(Partitioner partitioner, ThriftAllRowsQueryImpl<K, C> query, ColumnFamily<K, C> columnFamily) {
this.columnFamily = columnFamily;
this.query = query;
this.partitioner = partitioner;
}
/**
* Each call to .iterator() returns a new context starting at the beginning
* of the column family.
*/
@Override
public Iterator<Row<K, C>> iterator() {
return new Iterator<Row<K, C>>() {
private KeyRange range;
private org.apache.cassandra.thrift.KeySlice lastRow;
private List<org.apache.cassandra.thrift.KeySlice> list = null;
private Iterator<org.apache.cassandra.thrift.KeySlice> iter = null;
private boolean bContinueSearch = true;
private boolean bIgnoreTombstones = true;
{
String startToken = query.getStartToken() == null ? partitioner.getMinToken() : query.getStartToken();
String endToken = query.getEndToken() == null ? partitioner.getMaxToken() : query.getEndToken();
range = new KeyRange()
.setCount(query.getBlockSize())
.setStart_token(startToken)
.setEnd_token(endToken);
if (query.getIncludeEmptyRows() == null) {
if (query.getPredicate().isSetSlice_range() && query.getPredicate().getSlice_range().getCount() == 0) {
bIgnoreTombstones = false;
}
}
else {
bIgnoreTombstones = !query.getIncludeEmptyRows();
}
}
@Override
public boolean hasNext() {
// Get the next block
while (iter == null || (!iter.hasNext() && bContinueSearch)) {
if (lastRow != null) {
// Determine the start token for the next page
String token = partitioner.getTokenForKey(ByteBuffer.wrap(lastRow.getKey()));
if (query.getRepeatLastToken()) {
// Start token is non-inclusive
range.setStart_token(partitioner.getTokenMinusOne(token));
}
else {
range.setStart_token(token);
}
}
// Get the next block of rows from cassandra, exit if none returned
list = query.getNextBlock(range);
if (list == null || list.isEmpty()) {
return false;
}
// Since we may trim tombstones set a flag indicating whether a complete
// block was returned so we can know to try to fetch the next one
bContinueSearch = (list.size() == query.getBlockSize());
// Trim the list from tombstoned rows, i.e. rows with no columns
iter = list.iterator();
if (iter == null || !iter.hasNext()) {
return false;
}
KeySlice previousLastRow = lastRow;
lastRow = Iterables.getLast(list);
if (query.getRepeatLastToken() && previousLastRow != null) {
iter.next();
iter.remove();
}
if (iter.hasNext() && bIgnoreTombstones) {
// Discard any tombstones
while (iter.hasNext()) {
KeySlice row = iter.next();
if (row.getColumns().isEmpty()) {
iter.remove();
}
}
// Get the iterator again
iter = list.iterator();
}
}
return iter.hasNext();
}
@Override
public Row<K, C> next() {
org.apache.cassandra.thrift.KeySlice row = iter.next();
return new ThriftRowImpl<K, C>(columnFamily.getKeySerializer().fromBytes(row.getKey()),
ByteBuffer.wrap(row.getKey()), new ThriftColumnOrSuperColumnListImpl<C>(row.getColumns(),
columnFamily.getColumnSerializer()));
}
@Override
public void remove() {
throw new IllegalStateException();
}
};
}
@Override
public Row<K, C> getRow(K key) {
throw new NotImplementedException("Only iterator based access is implemented");
}
@Override
public int size() {
throw new NotImplementedException("Only iterator based access is implemented");
}
@Override
public boolean isEmpty() {
throw new NotImplementedException("Only iterator based access is implemented");
}
@Override
public Row<K, C> getRowByIndex(int i) {
throw new NotImplementedException("Only iterator based access is implemented");
}
@Override
public Collection<K> getKeys() {
throw new NotImplementedException("Only iterator based access is implemented");
}
}
| 7,978 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCql3Query.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.Compression;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.SchemaDisagreementException;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.thrift.TException;
import com.netflix.astyanax.serializers.StringSerializer;
public class ThriftCql3Query<K,C> extends AbstractThriftCqlQuery<K, C> {
ThriftCql3Query(ThriftColumnFamilyQueryImpl<K, C> cfQuery, String cql) {
super(cfQuery, cql);
}
@Override
protected org.apache.cassandra.thrift.CqlPreparedResult prepare_cql_query(Client client) throws InvalidRequestException, TException {
return client.prepare_cql3_query(StringSerializer.get().toByteBuffer(cql), Compression.NONE);
}
@Override
protected org.apache.cassandra.thrift.CqlResult execute_prepared_cql_query(Client client, int id, List<ByteBuffer> values) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException {
return client.execute_prepared_cql3_query(id, values, ThriftConverter.ToThriftConsistencyLevel(cl));
}
@Override
protected org.apache.cassandra.thrift.CqlResult execute_cql_query(Client client) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException {
return client.execute_cql3_query(
StringSerializer.get().toByteBuffer(cql),
useCompression ? Compression.GZIP : Compression.NONE,
ThriftConverter.ToThriftConsistencyLevel(cl));
}
}
| 7,979 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftFamilyFactory.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import org.apache.cassandra.thrift.Cassandra;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.Cluster;
import com.netflix.astyanax.AstyanaxTypeFactory;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.ConnectionFactory;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
public class ThriftFamilyFactory implements AstyanaxTypeFactory<Cassandra.Client> {
private final static ThriftFamilyFactory instance = new ThriftFamilyFactory();
public static ThriftFamilyFactory getInstance() {
return instance;
}
@Override
public Keyspace createKeyspace(String ksName, ConnectionPool<Cassandra.Client> cp, AstyanaxConfiguration asConfig,
KeyspaceTracerFactory tracerFactory) {
return new ThriftKeyspaceImpl(ksName, cp, asConfig, tracerFactory);
}
@Override
public Cluster createCluster(ConnectionPool<Cassandra.Client> cp, AstyanaxConfiguration asConfig,
KeyspaceTracerFactory tracerFactory) {
return new ThriftClusterImpl(asConfig, (ConnectionPool<Cassandra.Client>) cp, tracerFactory);
}
@Override
public ConnectionFactory<Cassandra.Client> createConnectionFactory(AstyanaxConfiguration asConfig, ConnectionPoolConfiguration cfConfig,
KeyspaceTracerFactory tracerFactory, ConnectionPoolMonitor monitor) {
return (ConnectionFactory<Cassandra.Client>) new ThriftSyncConnectionFactoryImpl(asConfig, cfConfig, tracerFactory,
monitor);
}
}
| 7,980 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCql3Statement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import org.apache.cassandra.thrift.Compression;
import org.apache.cassandra.thrift.Cassandra.Client;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.StringSerializer;
public class ThriftCql3Statement implements CqlStatement {
private ThriftKeyspaceImpl keyspace;
private ByteBuffer query;
private Compression compression = Compression.NONE;
private RetryPolicy retry;
private ConsistencyLevel cl = ConsistencyLevel.CL_ONE;
public ThriftCql3Statement(ThriftKeyspaceImpl keyspace) {
this.keyspace = keyspace;
this.retry = keyspace.getConfig().getRetryPolicy().duplicate();
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
return keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<CqlStatementResult>(keyspace.tracerFactory.newTracer(
CassandraOperationType.CQL, null), null, keyspace.getKeyspaceName()) {
@Override
public CqlStatementResult internalExecute(Client client, ConnectionContext context) throws Exception {
return new ThriftCqlStatementResult(client.execute_cql3_query(query, compression, ThriftConverter.ToThriftConsistencyLevel(cl)));
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
// TODO:
throw new RuntimeException("Not supported yet");
}
@Override
public CqlStatement withCql(String cql) {
query = StringSerializer.get().toByteBuffer(cql);
return this;
}
public CqlStatement withCompression(Boolean flag) {
if (flag)
compression = Compression.GZIP;
else
compression = Compression.NONE;
return this;
}
@Override
public CqlStatement withConsistencyLevel(ConsistencyLevel cl) {
this.cl = cl;
return this;
}
@Override
public CqlPreparedStatement asPreparedStatement() {
// TODO:
throw new RuntimeException("Not supported yet");
}
}
| 7,981 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftUtils.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Map.Entry;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.ColumnDef;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.thrift.TEnum;
import org.apache.thrift.TFieldIdEnum;
import org.apache.thrift.meta_data.FieldMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
public class ThriftUtils {
private final static Logger LOG = LoggerFactory.getLogger(ThriftUtils.class);
public static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new byte[0]);
// private static final SliceRange RANGE_ALL = new SliceRange(EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
public static final int MUTATION_OVERHEAD = 20;
public static SliceRange createAllInclusiveSliceRange() {
return new SliceRange(EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
}
public static <C> SliceRange createSliceRange(Serializer<C> serializer, C startColumn, C endColumn,
boolean reversed, int limit) {
return new SliceRange((startColumn == null) ? EMPTY_BYTE_BUFFER : serializer.toByteBuffer(startColumn),
(endColumn == null) ? EMPTY_BYTE_BUFFER : serializer.toByteBuffer(endColumn), reversed, limit);
}
public static <T extends org.apache.thrift.TBase> Properties getPropertiesFromThrift(T entity) throws Exception {
Properties props = new Properties();
setPropertiesFromThrift("", props, entity);
return props;
}
/**
* Quick and dirty implementation that converts thrift DDL to a Properties object by flattening
* the parameters
* @param prefix
* @param properties
* @param entity
* @throws Exception
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static void setPropertiesFromThrift(String prefix, Properties properties, org.apache.thrift.TBase entity) throws Exception {
Field field = entity.getClass().getDeclaredField("metaDataMap");
Map<org.apache.thrift.TFieldIdEnum, org.apache.thrift.meta_data.FieldMetaData> fields = (Map<org.apache.thrift.TFieldIdEnum, FieldMetaData>) field.get(entity);
for (Entry<org.apache.thrift.TFieldIdEnum, FieldMetaData> f : fields.entrySet()) {
ThriftTypes type = ThriftTypes.values()[f.getValue().valueMetaData.type];
Object value = entity.getFieldValue(f.getKey());
if (value == null)
continue;
switch (type) {
case VOID :
break;
case BOOL :
case BYTE :
case DOUBLE :
case I16 :
case I32 :
case I64 :
case STRING :
case ENUM :
if (value instanceof byte[]) {
properties.put(prefix + f.getKey().getFieldName(), Base64.encodeBase64String((byte[])value));
}
else if (value instanceof ByteBuffer) {
properties.put(prefix + f.getKey().getFieldName(), base64Encode((ByteBuffer)value));
}
else {
properties.put(prefix + f.getKey().getFieldName(), value.toString());
}
break;
case MAP : {
String newPrefix = prefix + f.getKey().getFieldName() + ".";
org.apache.thrift.meta_data.MapMetaData meta = (org.apache.thrift.meta_data.MapMetaData)f.getValue().valueMetaData;
if (!meta.keyMetaData.isStruct() && !meta.keyMetaData.isContainer()) {
Map<Object, Object> map = (Map<Object, Object>)value;
for (Entry<Object, Object> entry : map.entrySet()) {
properties.put(newPrefix + entry.getKey(), entry.getValue().toString());
}
}
else {
LOG.error(String.format("Unable to serializer field '%s' key type '%s' not supported", f.getKey().getFieldName(), meta.keyMetaData.getTypedefName()));
}
break;
}
case LIST : {
String newPrefix = prefix + f.getKey().getFieldName() + ".";
List<Object> list = (List<Object>)value;
org.apache.thrift.meta_data.ListMetaData listMeta = (org.apache.thrift.meta_data.ListMetaData)f.getValue().valueMetaData;
for (Object entry : list) {
String id;
if (entry instanceof CfDef) {
id = ((CfDef)entry).name;
}
else if (entry instanceof ColumnDef) {
ByteBuffer name = ((ColumnDef)entry).name;
id = base64Encode(name);
}
else {
LOG.error("Don't know how to convert to properties " + listMeta.elemMetaData.getTypedefName());
continue;
}
if (listMeta.elemMetaData.isStruct()) {
setPropertiesFromThrift(newPrefix + id + ".", properties, (org.apache.thrift.TBase)entry);
}
else {
properties.put(newPrefix + id, entry);
}
}
break;
}
case STRUCT : {
setPropertiesFromThrift(prefix + f.getKey().getFieldName() + ".", properties, (org.apache.thrift.TBase)value);
break;
}
case SET :
default:
LOG.error("Unhandled value : " + f.getKey().getFieldName() + " " + type);
break;
}
}
}
private static String base64Encode(ByteBuffer bb) {
if (bb == null) {
return "";
}
byte[] nbb = new byte[bb.remaining()];
bb.duplicate().get(nbb, 0, bb.remaining());
return Base64.encodeBase64String(nbb);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <T> T getThriftObjectFromProperties(Class<T> clazz, Properties props) throws Exception {
org.apache.thrift.TBase entity = (org.apache.thrift.TBase)clazz.newInstance();
return (T)populateObjectFromProperties(entity, props);
}
public static Object populateObjectFromProperties(Object entity, Properties props) throws Exception {
return populateObject(entity, propertiesToMap(props));
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private static Object populateObject(Object obj, Map<String, Object> map) throws Exception {
org.apache.thrift.TBase entity = (org.apache.thrift.TBase)obj;
Field field = entity.getClass().getDeclaredField("metaDataMap");
Map<org.apache.thrift.TFieldIdEnum, org.apache.thrift.meta_data.FieldMetaData> fields = (Map<org.apache.thrift.TFieldIdEnum, FieldMetaData>) field.get(entity);
for (Entry<TFieldIdEnum, FieldMetaData> f : fields.entrySet()) {
Object value = map.get(f.getKey().getFieldName());
if (value != null) {
ThriftTypes type = ThriftTypes.values()[f.getValue().valueMetaData.type];
switch (type) {
case VOID :
break;
case BYTE :
case BOOL :
case DOUBLE :
case I16 :
case I32 :
case I64 :
case STRING :
try {
entity.setFieldValue(f.getKey(), valueForBasicType(value, f.getValue().valueMetaData.type));
}
catch (ClassCastException e) {
if (e.getMessage().contains(ByteBuffer.class.getCanonicalName())) {
entity.setFieldValue(f.getKey(), ByteBuffer.wrap(Base64.decodeBase64((String)value)));
}
else {
throw e;
}
}
break;
case ENUM : {
org.apache.thrift.meta_data.EnumMetaData meta = (org.apache.thrift.meta_data.EnumMetaData)f.getValue().valueMetaData;
Object e = meta.enumClass;
entity.setFieldValue(f.getKey(), Enum.valueOf((Class<Enum>) e, (String)value));
break;
}
case MAP : {
org.apache.thrift.meta_data.MapMetaData meta = (org.apache.thrift.meta_data.MapMetaData)f.getValue().valueMetaData;
if (!meta.keyMetaData.isStruct() && !meta.keyMetaData.isContainer()) {
Map<Object, Object> childMap = (Map<Object, Object>)value;
Map<Object, Object> childEntityMap = Maps.newHashMap();
entity.setFieldValue(f.getKey(), childEntityMap);
if (!meta.keyMetaData.isStruct() && !meta.keyMetaData.isContainer()) {
for (Entry<Object, Object> entry : childMap.entrySet()) {
Object childKey = valueForBasicType(entry.getKey(), meta.keyMetaData.type);
Object childValue = valueForBasicType(entry.getValue(), meta.valueMetaData.type);
childEntityMap.put(childKey, childValue);
}
}
}
else {
LOG.error(String.format("Unable to serializer field '%s' key type '%s' not supported", f.getKey().getFieldName(), meta.keyMetaData.getTypedefName()));
}
break;
}
case LIST : {
Map<String, Object> childMap = (Map<String, Object>)value;
org.apache.thrift.meta_data.ListMetaData listMeta = (org.apache.thrift.meta_data.ListMetaData)f.getValue().valueMetaData;
// Create an empty list and attach to the parent entity
List<Object> childList = Lists.newArrayList();
entity.setFieldValue(f.getKey(), childList);
if (listMeta.elemMetaData instanceof org.apache.thrift.meta_data.StructMetaData) {
org.apache.thrift.meta_data.StructMetaData structMeta = (org.apache.thrift.meta_data.StructMetaData)listMeta.elemMetaData;
for (Entry<String, Object> childElement : childMap.entrySet()) {
org.apache.thrift.TBase childEntity = structMeta.structClass.newInstance();
populateObject(childEntity, (Map<String, Object>)childElement.getValue());
childList.add(childEntity);
}
}
break;
}
case STRUCT : {
break;
}
case SET :
default:
LOG.error("Unhandled value : " + f.getKey().getFieldName() + " " + type);
break;
}
}
}
return entity;
}
public static Object valueForBasicType(Object value, byte type) {
switch (ThriftTypes.values()[type]) {
case BYTE :
return Byte.parseByte((String)value);
case BOOL :
return Boolean.parseBoolean((String)value);
case DOUBLE :
return Double.parseDouble((String)value);
case I16 :
return Short.parseShort((String)value);
case I32 :
return Integer.parseInt((String)value);
case I64 :
return Long.parseLong((String)value);
case STRING :
return value;
default:
return null;
}
}
/**
* Convert a Properties object into a tree
* @param props
* @return
*/
public static Map<String, Object> propertiesToMap(Properties props) {
Map<String, Object> root = Maps.newTreeMap();
for (Entry<Object, Object> prop : props.entrySet()) {
String[] parts = StringUtils.split((String)prop.getKey(), ".");
Map<String, Object> node = root;
for (int i = 0; i < parts.length - 1; i++) {
if (!node.containsKey(parts[i])) {
node.put(parts[i], new LinkedHashMap<String, Object>());
}
node = (Map<String, Object>)node.get(parts[i]);
}
node.put(parts[parts.length-1], (String)prop.getValue());
}
return root;
}
}
| 7,982 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftConverter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.exceptions.BadRequestException;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionAbortedException;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.OperationTimeoutException;
import com.netflix.astyanax.connectionpool.exceptions.ThriftStateException;
import com.netflix.astyanax.connectionpool.exceptions.TimeoutException;
import com.netflix.astyanax.connectionpool.exceptions.TokenRangeOfflineException;
import com.netflix.astyanax.connectionpool.exceptions.TransportException;
import com.netflix.astyanax.connectionpool.exceptions.UnknownException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnPath;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.ColumnType;
import com.netflix.astyanax.model.ConsistencyLevel;
import org.apache.cassandra.thrift.AuthenticationException;
import org.apache.cassandra.thrift.AuthorizationException;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.NotFoundException;
import org.apache.cassandra.thrift.SchemaDisagreementException;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.util.Iterator;
public class ThriftConverter {
private static final Logger LOGGER = LoggerFactory.getLogger(ThriftConverter.class);
/**
* Construct a Hector ColumnParent based on the information in the query and
* the type of column family being queried.
*
* @param <K>
* @param columnFamily
* @param path
* @return
* @throws BadRequestException
*/
public static <K> ColumnParent getColumnParent(ColumnFamily<?, ?> columnFamily, ColumnPath<?> path)
throws BadRequestException {
ColumnParent cp = new ColumnParent();
cp.setColumn_family(columnFamily.getName());
if (path != null) {
Iterator<ByteBuffer> columns = path.iterator();
if (columnFamily.getType() == ColumnType.SUPER && columns.hasNext()) {
cp.setSuper_column(columns.next());
}
}
return cp;
}
/**
* Construct a Thrift ColumnPath based on the information in the query and
* the type of column family being queried.
*
* @param <K>
* @param columnFamily
* @param path
* @return
* @throws NotFoundException
* @throws InvalidRequestException
* @throws TException
*/
public static <K> org.apache.cassandra.thrift.ColumnPath getColumnPath(ColumnFamily<?, ?> columnFamily,
ColumnPath<?> path) throws BadRequestException {
org.apache.cassandra.thrift.ColumnPath cp = new org.apache.cassandra.thrift.ColumnPath();
cp.setColumn_family(columnFamily.getName());
if (path != null) {
Iterator<ByteBuffer> columns = path.iterator();
if (columnFamily.getType() == ColumnType.SUPER && columns.hasNext()) {
cp.setSuper_column(columns.next());
}
if (columns.hasNext()) {
cp.setColumn(columns.next());
}
if (columns.hasNext()) {
throw new BadRequestException("Path depth of " + path.length() + " not supported for column family \'"
+ columnFamily.getName() + "\'");
}
}
return cp;
}
/**
* Return a Hector SlicePredicate based on the provided column slice
*
* @param <C>
* @param columns
* @param colSer
* @return
*/
public static <C> SlicePredicate getPredicate(ColumnSlice<C> columns, Serializer<C> colSer) {
// Get all the columns
if (columns == null) {
SlicePredicate predicate = new SlicePredicate();
predicate.setSlice_range(new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), false,
Integer.MAX_VALUE));
return predicate;
}
// Get a specific list of columns
if (columns.getColumns() != null) {
SlicePredicate predicate = new SlicePredicate();
predicate.setColumn_namesIsSet(true);
predicate.column_names = colSer.toBytesList(columns.getColumns());
return predicate;
}
else {
SlicePredicate predicate = new SlicePredicate();
predicate.setSlice_range(new SliceRange((columns.getStartColumn() == null) ? ByteBuffer.wrap(new byte[0])
: ByteBuffer.wrap(colSer.toBytes(columns.getStartColumn())),
(columns.getEndColumn() == null) ? ByteBuffer.wrap(new byte[0]) : ByteBuffer.wrap(colSer
.toBytes(columns.getEndColumn())), columns.getReversed(), columns.getLimit()));
return predicate;
}
}
/**
* Convert from Thrift exceptions to an internal ConnectionPoolException
*
* @param e
* @return
*/
public static ConnectionException ToConnectionPoolException(Throwable e) {
if (e instanceof ConnectionException) {
return (ConnectionException) e;
}
LOGGER.debug(e.getMessage());
if (e instanceof InvalidRequestException) {
return new com.netflix.astyanax.connectionpool.exceptions.BadRequestException(e);
}
else if (e instanceof TProtocolException) {
return new com.netflix.astyanax.connectionpool.exceptions.BadRequestException(e);
}
else if (e instanceof UnavailableException) {
return new TokenRangeOfflineException(e);
}
else if (e instanceof SocketTimeoutException) {
return new TimeoutException(e);
}
else if (e instanceof TimedOutException) {
return new OperationTimeoutException(e);
}
else if (e instanceof NotFoundException) {
return new com.netflix.astyanax.connectionpool.exceptions.NotFoundException(e);
}
else if (e instanceof TApplicationException) {
return new ThriftStateException(e);
}
else if (e instanceof AuthenticationException || e instanceof AuthorizationException) {
return new com.netflix.astyanax.connectionpool.exceptions.AuthenticationException(e);
}
else if (e instanceof SchemaDisagreementException) {
return new com.netflix.astyanax.connectionpool.exceptions.SchemaDisagreementException(e);
}
else if (e instanceof TTransportException) {
if (e.getCause() != null) {
if (e.getCause() instanceof SocketTimeoutException) {
return new TimeoutException(e);
}
if (e.getCause().getMessage() != null) {
if (e.getCause().getMessage().toLowerCase().contains("connection abort")
|| e.getCause().getMessage().toLowerCase().contains("connection reset")) {
return new ConnectionAbortedException(e);
}
}
}
return new TransportException(e);
}
else {
// e.getCause().printStackTrace();
return new UnknownException(e);
}
}
public static org.apache.cassandra.thrift.ConsistencyLevel ToThriftConsistencyLevel(ConsistencyLevel cl) {
switch (cl) {
case CL_ONE:
return org.apache.cassandra.thrift.ConsistencyLevel.ONE;
case CL_QUORUM:
return org.apache.cassandra.thrift.ConsistencyLevel.QUORUM;
case CL_EACH_QUORUM:
return org.apache.cassandra.thrift.ConsistencyLevel.EACH_QUORUM;
case CL_LOCAL_QUORUM:
return org.apache.cassandra.thrift.ConsistencyLevel.LOCAL_QUORUM;
case CL_TWO:
return org.apache.cassandra.thrift.ConsistencyLevel.TWO;
case CL_THREE:
return org.apache.cassandra.thrift.ConsistencyLevel.THREE;
case CL_ALL:
return org.apache.cassandra.thrift.ConsistencyLevel.ALL;
case CL_ANY:
return org.apache.cassandra.thrift.ConsistencyLevel.ANY;
case CL_LOCAL_ONE:
return org.apache.cassandra.thrift.ConsistencyLevel.LOCAL_ONE;
default:
return org.apache.cassandra.thrift.ConsistencyLevel.QUORUM;
}
}
}
| 7,983 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftSyncConnectionFactoryImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.AuthenticationCredentials;
import com.netflix.astyanax.CassandraOperationTracer;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.Connection;
import com.netflix.astyanax.connectionpool.ConnectionFactory;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.HostConnectionPool;
import com.netflix.astyanax.connectionpool.Operation;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.RateLimiter;
import com.netflix.astyanax.connectionpool.SSLConnectionContext;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.IsTimeoutException;
import com.netflix.astyanax.connectionpool.exceptions.ThrottledException;
import com.netflix.astyanax.connectionpool.impl.OperationResultImpl;
import com.netflix.astyanax.connectionpool.impl.SimpleRateLimiterImpl;
import org.apache.cassandra.thrift.AuthenticationRequest;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TSSLTransportFactory;
import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
public class ThriftSyncConnectionFactoryImpl implements ConnectionFactory<Cassandra.Client> {
private static final String NAME_FORMAT = "ThriftConnection<%s-%d>";
private static final Logger LOG = LoggerFactory.getLogger(ThriftSyncConnectionFactoryImpl.class);
private final static ExecutorService executor = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true)
.build());
private final AtomicLong idCounter = new AtomicLong(0);
private final RateLimiter limiter;
private final ConnectionPoolConfiguration cpConfig;
private final KeyspaceTracerFactory tracerFactory;
private final ConnectionPoolMonitor monitor;
private final AstyanaxConfiguration asConfig;
public ThriftSyncConnectionFactoryImpl(AstyanaxConfiguration asConfig, ConnectionPoolConfiguration cpConfig, KeyspaceTracerFactory tracerFactory,
ConnectionPoolMonitor monitor) {
this.cpConfig = cpConfig;
this.asConfig = asConfig;
this.limiter = new SimpleRateLimiterImpl(cpConfig);
this.tracerFactory = tracerFactory;
this.monitor = monitor;
}
@Override
public Connection<Cassandra.Client> createConnection(final HostConnectionPool<Cassandra.Client> pool)
throws ThrottledException {
if (limiter.check() == false) {
throw new ThrottledException("Too many connection attempts");
}
return new ThriftConnection(pool, asConfig.getMaxThriftSize());
}
public class ThriftConnection implements Connection<Cassandra.Client> {
private final long id = idCounter.incrementAndGet();
private Cassandra.Client cassandraClient;
private TFramedTransport transport;
private TSocket socket;
private int timeout = 0;
private int maxThriftSize = 0;
private AtomicLong operationCounter = new AtomicLong();
private AtomicBoolean closed = new AtomicBoolean(false);
private volatile ConnectionException lastException = null;
private volatile String keyspaceName;
private final HostConnectionPool<Cassandra.Client> pool;
private Map<String, Object> metadata = Maps.newHashMap();
public ThriftConnection(HostConnectionPool<Cassandra.Client> pool, int maxThriftSizeVal) {
this.pool = pool;
this.maxThriftSize = maxThriftSizeVal;
}
@Override
public <R> OperationResult<R> execute(Operation<Cassandra.Client, R> op) throws ConnectionException {
long startTime = System.nanoTime();
long latency = 0;
setTimeout(cpConfig.getSocketTimeout()); // In case the configurationchanged
operationCounter.incrementAndGet();
// Set a new keyspace, if it changed
lastException = null;
if (op.getKeyspace() != null && (keyspaceName == null || !op.getKeyspace().equals(keyspaceName))) {
CassandraOperationTracer tracer = tracerFactory.newTracer(CassandraOperationType.SET_KEYSPACE)
.start();
try {
cassandraClient.set_keyspace(op.getKeyspace());
if (asConfig.getCqlVersion() != null)
cassandraClient.set_cql_version(asConfig.getCqlVersion());
keyspaceName = op.getKeyspace();
long now = System.nanoTime();
latency = now - startTime;
pool.addLatencySample(latency, now);
tracer.success();
}
catch (Exception e) {
long now = System.nanoTime();
latency = now - startTime;
lastException = ThriftConverter.ToConnectionPoolException(e).setLatency(latency);
if (e instanceof IsTimeoutException) {
pool.addLatencySample(TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS), now);
}
tracer.failure(lastException);
throw lastException;
}
startTime = System.nanoTime(); // We don't want to include
// the set_keyspace in our
// latency calculation
}
// Execute the operation
try {
R result = op.execute(cassandraClient, this);
long now = System.nanoTime();
latency = now - startTime;
pool.addLatencySample(latency, now);
return new OperationResultImpl<R>(getHost(), result, latency);
}
catch (Exception e) {
long now = System.nanoTime();
latency = now - startTime;
lastException = ThriftConverter.ToConnectionPoolException(e).setLatency(latency);
if (e instanceof IsTimeoutException) {
pool.addLatencySample(TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS), now);
}
throw lastException;
}
}
@Override
public void open() throws ConnectionException {
if (cassandraClient != null) {
throw new IllegalStateException("Open called on already open connection");
}
long startTime = System.currentTimeMillis();
try {
final SSLConnectionContext sslCxt = cpConfig.getSSLConnectionContext();
if(sslCxt != null) {
TSSLTransportParameters params = new TSSLTransportParameters(sslCxt.getSslProtocol(), sslCxt.getSslCipherSuites().toArray(new String[0]));
params.setTrustStore(sslCxt.getSslTruststore(), sslCxt.getSslTruststorePassword());
//thrift's SSL implementation does not allow you set the socket connect timeout, only read timeout
socket = TSSLTransportFactory.getClientSocket(getHost().getIpAddress(), getHost().getPort(), cpConfig.getSocketTimeout(), params);
} else {
socket = new TSocket(getHost().getIpAddress(), getHost().getPort(), cpConfig.getConnectTimeout());
}
socket.getSocket().setTcpNoDelay(true);
socket.getSocket().setKeepAlive(true);
socket.getSocket().setSoLinger(false, 0);
setTimeout(cpConfig.getSocketTimeout());
transport = new TFramedTransport(socket, maxThriftSize);
if(!transport.isOpen())
transport.open();
cassandraClient = new Cassandra.Client(new TBinaryProtocol.Factory().getProtocol(transport));
monitor.incConnectionCreated(getHost());
AuthenticationCredentials credentials = cpConfig.getAuthenticationCredentials();
if (credentials != null) {
Map<String, String> thriftCredentials = Maps.newHashMapWithExpectedSize(2);
thriftCredentials.put("username", credentials.getUsername());
thriftCredentials.put("password", credentials.getPassword());
cassandraClient.login(new AuthenticationRequest(thriftCredentials));
}
}
catch (Exception e) {
pool.addLatencySample(TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS), System.nanoTime());
closeClient();
ConnectionException ce = ThriftConverter.ToConnectionPoolException(e).setHost(getHost())
.setLatency(System.currentTimeMillis() - startTime);
monitor.incConnectionCreateFailed(getHost(), ce);
throw ce;
}
catch (Throwable t) {
LOG.error("Error creating connection", t);
pool.addLatencySample(TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS), System.nanoTime());
closeClient();
ConnectionException ce = ThriftConverter.ToConnectionPoolException(new RuntimeException("Error openning connection", t)).setHost(getHost())
.setLatency(System.currentTimeMillis() - startTime);
monitor.incConnectionCreateFailed(getHost(), ce);
throw ce;
}
}
@Override
public void openAsync(final AsyncOpenCallback<Cassandra.Client> callback) {
final Connection<Cassandra.Client> This = this;
executor.submit(new Runnable() {
@Override
public void run() {
try {
open();
callback.success(This);
}
catch (Exception e) {
pool.addLatencySample(TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS), System.nanoTime());
callback.failure(This, ThriftConverter.ToConnectionPoolException(e));
}
}
});
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
monitor.incConnectionClosed(getHost(), lastException);
executor.submit(new Runnable() {
@Override
public void run() {
try {
closeClient();
}
catch (Exception e) {
}
}
});
}
}
private void closeClient() {
if (transport != null) {
try {
transport.flush();
}
catch (TTransportException e) {
}
finally {
try {
transport.close();
}
catch (Exception e) {
}
finally {
transport = null;
}
}
}
if (socket != null) {
try {
socket.close();
}
catch (Exception e) {
}
finally {
socket = null;
}
}
}
@Override
public HostConnectionPool<Cassandra.Client> getHostConnectionPool() {
return pool;
}
@Override
public ConnectionException getLastException() {
return lastException;
}
@Override
public String toString() {
return String.format(NAME_FORMAT, getHost().getHostName(), id);
}
/**
* Compares the toString of these clients
*/
@Override
public boolean equals(Object obj) {
return toString().equals(obj.toString());
}
@Override
public long getOperationCount() {
return operationCounter.get();
}
@Override
public Host getHost() {
return pool.getHost();
}
public void setTimeout(int timeout) {
if (this.timeout != timeout) {
socket.setTimeout(timeout);
this.timeout = timeout;
}
}
@Override
public void setMetadata(String key, Object data) {
metadata.put(key, data);
}
@Override
public Object getMetadata(String key) {
return metadata.get(key);
}
@Override
public boolean hasMetadata(String key) {
return metadata.containsKey(key);
}
}
}
| 7,984 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractThriftCqlQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.SchemaDisagreementException;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.thrift.TException;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.query.AbstractPreparedCqlQuery;
import com.netflix.astyanax.query.CqlQuery;
import com.netflix.astyanax.query.PreparedCqlQuery;
import com.netflix.astyanax.thrift.model.ThriftCqlResultImpl;
import com.netflix.astyanax.thrift.model.ThriftCqlRowsImpl;
public abstract class AbstractThriftCqlQuery<K,C> implements CqlQuery<K,C> {
boolean useCompression = false;
ThriftColumnFamilyQueryImpl<K,C> cfQuery;
String cql;
ConsistencyLevel cl = ConsistencyLevel.CL_ONE;
AbstractThriftCqlQuery(ThriftColumnFamilyQueryImpl<K,C> cfQuery, String cql) {
this.cfQuery = cfQuery;
this.cql = cql;
this.cl = cfQuery.consistencyLevel;
}
@Override
public OperationResult<CqlResult<K, C>> execute() throws ConnectionException {
return cfQuery.keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<CqlResult<K, C>>(cfQuery.keyspace.tracerFactory.newTracer(
CassandraOperationType.CQL, cfQuery.columnFamily), cfQuery.pinnedHost, cfQuery.keyspace.getKeyspaceName()) {
@Override
public CqlResult<K, C> internalExecute(Client client, ConnectionContext context) throws Exception {
org.apache.cassandra.thrift.CqlResult res = execute_cql_query(client);
switch (res.getType()) {
case ROWS:
return new ThriftCqlResultImpl<K, C>(new ThriftCqlRowsImpl<K, C>(res.getRows(),
cfQuery.columnFamily.getKeySerializer(), cfQuery.columnFamily.getColumnSerializer()));
case INT:
return new ThriftCqlResultImpl<K, C>(res.getNum());
default:
return null;
}
}
}, cfQuery.retry);
}
@Override
public ListenableFuture<OperationResult<CqlResult<K, C>>> executeAsync() throws ConnectionException {
return cfQuery.keyspace.executor.submit(new Callable<OperationResult<CqlResult<K, C>>>() {
@Override
public OperationResult<CqlResult<K, C>> call() throws Exception {
return execute();
}
});
}
@Override
public CqlQuery<K, C> useCompression() {
useCompression = true;
return this;
}
@Override
public PreparedCqlQuery<K, C> asPreparedStatement() {
return new AbstractPreparedCqlQuery<K, C>() {
@Override
public OperationResult<CqlResult<K, C>> execute() throws ConnectionException {
return cfQuery.keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<CqlResult<K, C>>(cfQuery.keyspace.tracerFactory.newTracer(
CassandraOperationType.CQL, cfQuery.columnFamily), cfQuery.pinnedHost, cfQuery.keyspace.getKeyspaceName()) {
@Override
public CqlResult<K, C> internalExecute(Client client, ConnectionContext state) throws Exception {
Integer id = (Integer)state.getMetadata(cql);
if (id == null) {
org.apache.cassandra.thrift.CqlPreparedResult res = prepare_cql_query(client);
id = res.getItemId();
state.setMetadata(cql, id);
}
org.apache.cassandra.thrift.CqlResult res = execute_prepared_cql_query(client, id, getValues());
switch (res.getType()) {
case ROWS:
return new ThriftCqlResultImpl<K, C>(new ThriftCqlRowsImpl<K, C>(res.getRows(),
cfQuery.columnFamily.getKeySerializer(), cfQuery.columnFamily.getColumnSerializer()));
case INT:
return new ThriftCqlResultImpl<K, C>(res.getNum());
default:
return null;
}
}
}, cfQuery.retry);
}
@Override
public ListenableFuture<OperationResult<CqlResult<K, C>>> executeAsync() throws ConnectionException {
return cfQuery.executor.submit(new Callable<OperationResult<CqlResult<K, C>>>() {
@Override
public OperationResult<CqlResult<K, C>> call() throws Exception {
return execute();
}
});
}
};
}
public CqlQuery<K, C> withConsistencyLevel(ConsistencyLevel cl) {
this.cl = cl;
return this;
}
protected abstract org.apache.cassandra.thrift.CqlPreparedResult prepare_cql_query(Client client)
throws InvalidRequestException, TException;
protected abstract org.apache.cassandra.thrift.CqlResult execute_prepared_cql_query(Client client, int id, List<ByteBuffer> values)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException;
protected abstract org.apache.cassandra.thrift.CqlResult execute_cql_query(Client client)
throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException;
}
| 7,985 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftColumnFamilyMutationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import com.google.common.base.Preconditions;
import com.netflix.astyanax.AbstractColumnListMutation;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.Deletion;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SlicePredicate;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ColumnPath;
/**
* Implementation of a row mutation at the root of the column family.
*
* @author elandau
*
* @param <C>
*/
public class ThriftColumnFamilyMutationImpl<C> extends AbstractColumnListMutation<C> {
private final Serializer<C> columnSerializer;
private final List<Mutation> mutationList;
private Deletion lastDeletion;
public ThriftColumnFamilyMutationImpl(Long timestamp, List<Mutation> mutationList, Serializer<C> columnSerializer) {
super(timestamp);
this.mutationList = mutationList;
this.columnSerializer = columnSerializer;
}
@Override
public <SC> ColumnListMutation<SC> withSuperColumn(ColumnPath<SC> superColumnPath) {
return new ThriftSuperColumnMutationImpl<SC>(timestamp, mutationList, superColumnPath);
}
@Override
public <V> ColumnListMutation<C> putColumn(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
Preconditions.checkNotNull(columnName, "Column name cannot be null");
// 1. Set up the column with all the data
Column column = new Column();
column.setName(columnSerializer.toByteBuffer(columnName));
if (column.getName().length == 0) {
throw new RuntimeException("Column name cannot be empty");
}
if (value == null)
column.setValue(ThriftUtils.EMPTY_BYTE_BUFFER);
else
column.setValue(valueSerializer.toByteBuffer(value));
column.setTimestamp(timestamp);
if (ttl != null) {
// Treat TTL of 0 or -1 as no TTL
if (ttl > 0)
column.setTtl(ttl);
}
else if (defaultTtl != null)
column.setTtl(defaultTtl);
// 2. Create a mutation and append to the mutation list.
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setColumn(column));
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName, Integer ttl) {
Preconditions.checkNotNull(columnName, "Column name cannot be null");
Column column = new Column();
column.setName(columnSerializer.toByteBuffer(columnName));
if (column.getName().length == 0) {
throw new RuntimeException("Column name cannot be empty");
}
column.setValue(ThriftUtils.EMPTY_BYTE_BUFFER);
column.setTimestamp(timestamp);
if (ttl != null) {
// Treat TTL of 0 or -1 as no TTL
if (ttl > 0)
column.setTtl(ttl);
}
else if (defaultTtl != null)
column.setTtl(defaultTtl);
// 2. Create a mutation and append to the mutation list.
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setColumn(column));
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> delete() {
// Delete the entire row
Deletion d = new Deletion().setTimestamp(timestamp);
mutationList.add(new Mutation().setDeletion(d));
// Increment the timestamp by 1 so subsequent puts on this column may be
// written
timestamp++;
return this;
}
@Override
public ColumnListMutation<C> incrementCounterColumn(C columnName, long amount) {
Preconditions.checkNotNull(columnName, "Column name cannot be null");
// 1. Set up the column with all the data
CounterColumn column = new CounterColumn();
column.setName(columnSerializer.toByteBuffer(columnName));
if (column.getName().length == 0) {
throw new RuntimeException("Column name cannot be empty");
}
column.setValue(amount);
// 2. Create a mutation and append to the mutation list.
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setCounter_column(column));
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> deleteColumn(C columnName) {
Preconditions.checkNotNull(columnName, "Column name cannot be null");
// Create a reusable predicate for deleting columns and insert only once
if (null == lastDeletion || lastDeletion.getTimestamp() != timestamp) {
lastDeletion = new Deletion().setPredicate(new SlicePredicate()).setTimestamp(timestamp);
mutationList.add(new Mutation().setDeletion(lastDeletion));
}
ByteBuffer bb = this.columnSerializer.toByteBuffer(columnName);
if (!bb.hasRemaining()) {
throw new RuntimeException("Column name cannot be empty");
}
lastDeletion.getPredicate().addToColumn_names(bb);
return this;
}
}
| 7,986 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftClusterImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.ConcurrentMap;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.KsDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.Cluster;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.exceptions.BadRequestException;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.connectionpool.exceptions.OperationException;
import com.netflix.astyanax.connectionpool.exceptions.SchemaDisagreementException;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
import com.netflix.astyanax.ddl.impl.SchemaChangeResponseImpl;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.thrift.ddl.*;
public class ThriftClusterImpl implements Cluster {
private static final Logger LOG = LoggerFactory.getLogger(ThriftClusterImpl.class);
private static final int MAX_SCHEMA_CHANGE_ATTEMPTS = 6;
private static final int SCHEMA_DISAGREEMENT_BACKOFF = 10000;
private final ConnectionPool<Cassandra.Client> connectionPool;
private final ConcurrentMap<String, Keyspace> keyspaces;
private final AstyanaxConfiguration config;
private final KeyspaceTracerFactory tracerFactory;
public ThriftClusterImpl(
AstyanaxConfiguration config,
ConnectionPool<Cassandra.Client> connectionPool,
KeyspaceTracerFactory tracerFactory) {
this.config = config;
this.connectionPool = connectionPool;
this.tracerFactory = tracerFactory;
this.keyspaces = Maps.newConcurrentMap();
}
@Override
public String describeClusterName() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<String>(tracerFactory.newTracer(CassandraOperationType.DESCRIBE_CLUSTER)) {
@Override
public String internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_cluster_name();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
@Override
public String describeSnitch() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<String>(tracerFactory.newTracer(CassandraOperationType.DESCRIBE_SNITCH)) {
@Override
public String internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_snitch();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
@Override
public String describePartitioner() throws ConnectionException {
return connectionPool
.executeWithFailover(
new AbstractOperationImpl<String>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_PARTITIONER)) {
@Override
public String internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_partitioner();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
@Override
public Map<String, List<String>> describeSchemaVersions() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<Map<String, List<String>>>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_SCHEMA_VERSION)) {
@Override
public Map<String, List<String>> internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_schema_versions();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
/**
* Get the version from the cluster
*
* @return
* @throws OperationException
*/
@Override
public String getVersion() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<String>(tracerFactory.newTracer(CassandraOperationType.GET_VERSION)) {
@Override
public String internalExecute(Client client, ConnectionContext state) throws Exception {
return client.describe_version();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
private <K> OperationResult<K> executeSchemaChangeOperation(AbstractOperationImpl<K> op) throws OperationException,
ConnectionException {
int attempt = 0;
do {
try {
return connectionPool.executeWithFailover(op, config.getRetryPolicy().duplicate());
}
catch (SchemaDisagreementException e) {
if (++attempt >= MAX_SCHEMA_CHANGE_ATTEMPTS) {
throw e;
}
try {
Thread.sleep(SCHEMA_DISAGREEMENT_BACKOFF);
}
catch (InterruptedException e1) {
Thread.interrupted();
throw new RuntimeException(e1);
}
}
} while (true);
}
@Override
public List<KeyspaceDefinition> describeKeyspaces() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<List<KeyspaceDefinition>>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_KEYSPACES)) {
@Override
public List<KeyspaceDefinition> internalExecute(Client client, ConnectionContext context) throws Exception {
List<KsDef> ksDefs = client.describe_keyspaces();
return Lists.transform(ksDefs, new Function<KsDef, KeyspaceDefinition>() {
@Override
public KeyspaceDefinition apply(KsDef ksDef) {
return new ThriftKeyspaceDefinitionImpl(ksDef);
}
});
}
}, config.getRetryPolicy().duplicate()).getResult();
}
@Override
public KeyspaceDefinition describeKeyspace(String ksName) throws ConnectionException {
List<KeyspaceDefinition> ksDefs = describeKeyspaces();
for (KeyspaceDefinition ksDef : ksDefs) {
if (ksDef.getName().equals(ksName)) {
return ksDef;
}
}
return null;
}
@Override
public Keyspace getKeyspace(String ksName) {
Keyspace keyspace = keyspaces.get(ksName);
if (keyspace == null) {
synchronized (this) {
Keyspace newKeyspace = new ThriftKeyspaceImpl(ksName, this.connectionPool, this.config, tracerFactory);
keyspace = keyspaces.putIfAbsent(ksName, newKeyspace);
if (keyspace == null) {
keyspace = newKeyspace;
}
}
}
return keyspace;
}
@Override
public ColumnFamilyDefinition makeColumnFamilyDefinition() {
return new ThriftColumnFamilyDefinitionImpl();
}
@Override
public OperationResult<SchemaChangeResult> addColumnFamily(final ColumnFamilyDefinition def) throws ConnectionException {
return internalCreateColumnFamily(((ThriftColumnFamilyDefinitionImpl) def)
.getThriftColumnFamilyDefinition());
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Map<String, Object> options) throws ConnectionException {
final ThriftColumnFamilyDefinitionImpl def = new ThriftColumnFamilyDefinitionImpl();
def.setFields(options);
return internalCreateColumnFamily(def.getThriftColumnFamilyDefinition());
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Properties props) throws ConnectionException {
final CfDef def;
try {
def = ThriftUtils.getThriftObjectFromProperties(CfDef.class, props);
} catch (Exception e) {
throw new BadRequestException("Error converting properties to CfDef", e);
}
return internalCreateColumnFamily(def);
}
private OperationResult<SchemaChangeResult> internalCreateColumnFamily(final CfDef def) throws ConnectionException {
return executeSchemaChangeOperation(new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_COLUMN_FAMILY), def.getKeyspace()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl()
.setSchemaId(client.system_add_column_family(def));
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final ColumnFamilyDefinition def) throws ConnectionException {
return internalColumnFamily(((ThriftColumnFamilyDefinitionImpl) def).getThriftColumnFamilyDefinition());
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Map<String, Object> options) throws ConnectionException {
final ThriftColumnFamilyDefinitionImpl def = new ThriftColumnFamilyDefinitionImpl();
def.setFields(options);
return internalColumnFamily(def.getThriftColumnFamilyDefinition());
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Properties props) throws ConnectionException {
final CfDef def;
try {
def = ThriftUtils.getThriftObjectFromProperties(CfDef.class, props);
} catch (Exception e) {
throw new BadRequestException("Error converting properties to CfDef", e);
}
return internalColumnFamily(def);
}
private OperationResult<SchemaChangeResult> internalColumnFamily(final CfDef def) throws ConnectionException {
return executeSchemaChangeOperation(new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.UPDATE_COLUMN_FAMILY), def.getKeyspace()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_update_column_family(def));
}
});
}
@Override
public KeyspaceDefinition makeKeyspaceDefinition() {
return new ThriftKeyspaceDefinitionImpl();
}
@Override
public OperationResult<SchemaChangeResult> addKeyspace(final KeyspaceDefinition def) throws ConnectionException {
return internalCreateKeyspace(((ThriftKeyspaceDefinitionImpl) def).getThriftKeyspaceDefinition());
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Map<String, Object> options) throws ConnectionException {
final ThriftKeyspaceDefinitionImpl def = new ThriftKeyspaceDefinitionImpl();
def.setFields(options);
return internalCreateKeyspace(def.getThriftKeyspaceDefinition());
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Properties props) throws ConnectionException {
final KsDef def;
try {
def = ThriftUtils.getThriftObjectFromProperties(KsDef.class, props);
if (def.getCf_defs() == null) {
def.setCf_defs(Lists.<CfDef>newArrayList());
}
} catch (Exception e) {
throw new BadRequestException("Error converting properties to KsDef", e);
}
return internalCreateKeyspace(def);
}
private OperationResult<SchemaChangeResult> internalCreateKeyspace(final KsDef def) throws ConnectionException {
return executeSchemaChangeOperation(new AbstractOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_KEYSPACE)) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl()
.setSchemaId(client.system_add_keyspace(def));
}
});
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final KeyspaceDefinition def) throws ConnectionException {
return internalUpdateKeyspace(((ThriftKeyspaceDefinitionImpl) def).getThriftKeyspaceDefinition());
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Map<String, Object> options) throws ConnectionException {
final ThriftKeyspaceDefinitionImpl def = new ThriftKeyspaceDefinitionImpl();
try {
def.setFields(options);
} catch (Exception e) {
throw new BadRequestException("Error converting properties to KsDef", e);
}
return internalUpdateKeyspace(def.getThriftKeyspaceDefinition());
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Properties props) throws ConnectionException {
final KsDef def;
try {
def = ThriftUtils.getThriftObjectFromProperties(KsDef.class, props);
if (def.getCf_defs() == null) {
def.setCf_defs(Lists.<CfDef>newArrayList());
}
} catch (Exception e) {
throw new BadRequestException("Error converting properties to KsDef", e);
}
return internalUpdateKeyspace(def);
}
private OperationResult<SchemaChangeResult> internalUpdateKeyspace(final KsDef def) throws ConnectionException {
return executeSchemaChangeOperation(new AbstractOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.UPDATE_KEYSPACE)) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_update_keyspace(def));
}
});
}
@Override
public ColumnDefinition makeColumnDefinition() {
return new ThriftColumnDefinitionImpl();
}
@Override
public AstyanaxConfiguration getConfig() {
return config;
}
@Override
public OperationResult<SchemaChangeResult> dropColumnFamily(final String keyspaceName, final String columnFamilyName) throws ConnectionException {
return connectionPool
.executeWithFailover(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.DROP_COLUMN_FAMILY), keyspaceName) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_drop_column_family(columnFamilyName));
}
}, RunOnce.get());
}
@Override
public OperationResult<SchemaChangeResult> dropKeyspace(final String keyspaceName) throws ConnectionException {
return connectionPool
.executeWithFailover(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.DROP_KEYSPACE), keyspaceName) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_drop_keyspace(keyspaceName));
}
}, RunOnce.get());
}
@Override
public Properties getAllKeyspaceProperties() throws ConnectionException {
List<KeyspaceDefinition> keyspaces = this.describeKeyspaces();
Properties props = new Properties();
for (KeyspaceDefinition ksDef : keyspaces) {
ThriftKeyspaceDefinitionImpl thriftKsDef = (ThriftKeyspaceDefinitionImpl)ksDef;
try {
for (Entry<Object, Object> prop : thriftKsDef.getProperties().entrySet()) {
props.setProperty(ksDef.getName() + "." + prop.getKey(), (String) prop.getValue());
}
} catch (Exception e) {
}
}
return props;
}
@Override
public Properties getKeyspaceProperties(String keyspace) throws ConnectionException {
KeyspaceDefinition ksDef = this.describeKeyspace(keyspace);
if (ksDef == null)
throw new NotFoundException(String.format("Keyspace '%s' not found", keyspace));
Properties props = new Properties();
ThriftKeyspaceDefinitionImpl thriftKsDef = (ThriftKeyspaceDefinitionImpl)ksDef;
try {
for (Entry<Object, Object> prop : thriftKsDef.getProperties().entrySet()) {
props.setProperty((String)prop.getKey(), (String) prop.getValue());
}
} catch (Exception e) {
LOG.error(String.format("Error fetching properties for keyspace '%s'", keyspace));
}
return props;
}
@Override
public Properties getColumnFamilyProperties(String keyspace, String columnFamily) throws ConnectionException {
KeyspaceDefinition ksDef = this.describeKeyspace(keyspace);
ColumnFamilyDefinition cfDef = ksDef.getColumnFamily(columnFamily);
if (cfDef == null)
throw new NotFoundException(String.format("Column family '%s' in keyspace '%s' not found", columnFamily, keyspace));
Properties props = new Properties();
ThriftColumnFamilyDefinitionImpl thriftCfDef = (ThriftColumnFamilyDefinitionImpl)cfDef;
try {
for (Entry<Object, Object> prop : thriftCfDef.getProperties().entrySet()) {
props.setProperty((String)prop.getKey(), (String) prop.getValue());
}
} catch (Exception e) {
LOG.error("Error processing column family properties");
}
return props;
}
/**
* Do a quick check to see if there is a schema disagreement. This is done as an extra precaution
* to reduce the chances of putting the cluster into a bad state. This will not gurantee however, that
* by the time a schema change is made the cluster will be in the same state.
* @param client
* @throws Exception
*/
private void precheckSchemaAgreement(Client client) throws Exception {
Map<String, List<String>> schemas = client.describe_schema_versions();
if (schemas.size() > 1) {
throw new SchemaDisagreementException("Can't change schema due to pending schema agreement");
}
}
}
| 7,987 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftTypes.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
public enum ThriftTypes {
STOP ,
VOID ,
BOOL ,
BYTE ,
DOUBLE ,
IGNORE1,
I16 ,
IGNORE2,
I32 ,
IGNORE3,
I64 ,
STRING ,
STRUCT ,
MAP ,
SET ,
LIST ,
ENUM
}
| 7,988 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftSuperColumnMutationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.util.List;
import com.netflix.astyanax.AbstractColumnListMutation;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.Deletion;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SuperColumn;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ColumnPath;
/**
* @deprecated Use composite columns instead
* @author elandau
*
* @param <C>
*/
public class ThriftSuperColumnMutationImpl<C> extends AbstractColumnListMutation<C> {
private final List<Mutation> mutationList;
private final ColumnPath<C> path;
private SuperColumn superColumn;
private SlicePredicate deletionPredicate;
public ThriftSuperColumnMutationImpl(long timestamp, List<Mutation> mutationList, ColumnPath<C> path) {
super(timestamp);
this.path = path;
this.mutationList = mutationList;
}
@Override
public <V> ColumnListMutation<C> putColumn(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
Column column = new Column();
column.setName(path.getSerializer().toByteBuffer(columnName));
column.setValue(valueSerializer.toByteBuffer(value));
column.setTimestamp(timestamp);
if (ttl != null)
column.setTtl(ttl);
else if (defaultTtl != null)
column.setTtl(defaultTtl);
addMutation(column);
return this;
}
private void addMutation(Column column) {
// 2. Create the super column mutation if this is the first call
if (superColumn == null) {
superColumn = new SuperColumn().setName(path.get(0));
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setSuper_column(superColumn));
mutationList.add(mutation);
}
superColumn.addToColumns(column);
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName, Integer ttl) {
Column column = new Column();
column.setName(path.getSerializer().toByteBuffer(columnName));
column.setValue(ThriftUtils.EMPTY_BYTE_BUFFER);
column.setTimestamp(timestamp);
if (ttl != null)
column.setTtl(ttl);
else if (defaultTtl != null)
column.setTtl(defaultTtl);
addMutation(column);
return this;
}
@Override
public ColumnListMutation<C> delete() {
Deletion d = new Deletion();
d.setSuper_column(path.get(0));
d.setTimestamp(timestamp);
mutationList.add(new Mutation().setDeletion(d));
timestamp++;
return this;
}
@Override
public <SC> ColumnListMutation<SC> withSuperColumn(ColumnPath<SC> superColumnPath) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> incrementCounterColumn(C columnName, long amount) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> deleteColumn(C columnName) {
if (deletionPredicate == null) {
deletionPredicate = new SlicePredicate();
Deletion d = new Deletion();
d.setTimestamp(timestamp);
d.setSuper_column(path.get(0));
d.setPredicate(deletionPredicate);
mutationList.add(new Mutation().setDeletion(d));
}
deletionPredicate.addToColumn_names(path.getSerializer().toByteBuffer(columnName));
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value, Integer ttl) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value, Integer ttl) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value) {
throw new UnsupportedOperationException();
}
}
| 7,989 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractThriftCqlPreparedStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.UUID;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.CqlPreparedStatement;
public abstract class AbstractThriftCqlPreparedStatement implements CqlPreparedStatement {
@Override
public <V> CqlPreparedStatement withByteBufferValue(V value, Serializer<V> serializer) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withValue(ByteBuffer value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withValues(List<ByteBuffer> value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withStringValue(String value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withIntegerValue(Integer value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withBooleanValue(Boolean value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withDoubleValue(Double value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withLongValue(Long value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withFloatValue(Float value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withShortValue(Short value) {
// TODO Auto-generated method stub
return null;
}
@Override
public CqlPreparedStatement withUUIDValue(UUID value) {
// TODO Auto-generated method stub
return null;
}
}
| 7,990 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftColumnFamilyQueryImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.CounterSuperColumn;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SuperColumn;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.RowCopier;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.query.AllRowsQuery;
import com.netflix.astyanax.query.ColumnCountQuery;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.query.ColumnQuery;
import com.netflix.astyanax.query.CqlQuery;
import com.netflix.astyanax.query.IndexQuery;
import com.netflix.astyanax.query.RowQuery;
import com.netflix.astyanax.query.RowSliceColumnCountQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.shallows.EmptyColumnList;
import com.netflix.astyanax.shallows.EmptyRowsImpl;
import com.netflix.astyanax.thrift.model.ThriftColumnImpl;
import com.netflix.astyanax.thrift.model.ThriftColumnOrSuperColumnListImpl;
import com.netflix.astyanax.thrift.model.ThriftCounterColumnImpl;
import com.netflix.astyanax.thrift.model.ThriftCounterSuperColumnImpl;
import com.netflix.astyanax.thrift.model.ThriftRowsListImpl;
import com.netflix.astyanax.thrift.model.ThriftRowsSliceImpl;
import com.netflix.astyanax.thrift.model.ThriftSuperColumnImpl;
/**
* Implementation of all column family queries using the thrift API.
*
* @author elandau
*
* @NotThreadSafe
* This class creates objects that keep track of internal state for pagination, pinned host, keyspace tracers etc
* Hence it is NOT recommended to use the same instance with multiple threads.
* Each thread should get it's own ThriftColumnFamilyQueryImpl<K,C> instance from keyspace object by calling keyspace.prepareQuery()
*
* @param <K>
* @param <C>
*/
public class ThriftColumnFamilyQueryImpl<K, C> implements ColumnFamilyQuery<K, C> {
private final static Logger LOG = LoggerFactory.getLogger(ThriftColumnFamilyQueryImpl.class);
final ConnectionPool<Cassandra.Client> connectionPool;
final ColumnFamily<K, C> columnFamily;
final KeyspaceTracerFactory tracerFactory;
final ThriftKeyspaceImpl keyspace;
ConsistencyLevel consistencyLevel;
final ListeningExecutorService executor;
Host pinnedHost;
RetryPolicy retry;
public ThriftColumnFamilyQueryImpl(ExecutorService executor, KeyspaceTracerFactory tracerFactory,
ThriftKeyspaceImpl keyspace, ConnectionPool<Cassandra.Client> cp, ColumnFamily<K, C> columnFamily,
ConsistencyLevel consistencyLevel, RetryPolicy retry) {
this.keyspace = keyspace;
this.connectionPool = cp;
this.consistencyLevel = consistencyLevel;
this.columnFamily = columnFamily;
this.tracerFactory = tracerFactory;
this.executor = MoreExecutors.listeningDecorator(executor);
this.retry = retry;
}
// Single ROW query
@Override
public RowQuery<K, C> getKey(final K rowKey) {
return new AbstractRowQueryImpl<K, C>(columnFamily.getColumnSerializer()) {
private boolean firstPage = true;
@Override
public ColumnQuery<C> getColumn(final C column) {
return new ColumnQuery<C>() {
@Override
public OperationResult<Column<C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(new AbstractKeyspaceOperationImpl<Column<C>>(
tracerFactory.newTracer(CassandraOperationType.GET_COLUMN, columnFamily), pinnedHost,
keyspace.getKeyspaceName()) {
@Override
public Column<C> internalExecute(Client client, ConnectionContext context) throws Exception {
ColumnOrSuperColumn cosc = client.get(
columnFamily.getKeySerializer().toByteBuffer(rowKey),
new org.apache.cassandra.thrift.ColumnPath().setColumn_family(
columnFamily.getName()).setColumn(
columnFamily.getColumnSerializer().toByteBuffer(column)),
ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
if (cosc.isSetColumn()) {
org.apache.cassandra.thrift.Column c = cosc.getColumn();
return new ThriftColumnImpl<C>(columnFamily.getColumnSerializer().fromBytes(
c.getName()), c);
}
else if (cosc.isSetSuper_column()) {
// TODO: Super columns
// should be deprecated
SuperColumn sc = cosc.getSuper_column();
return new ThriftSuperColumnImpl<C>(columnFamily.getColumnSerializer().fromBytes(
sc.getName()), sc);
}
else if (cosc.isSetCounter_column()) {
org.apache.cassandra.thrift.CounterColumn c = cosc.getCounter_column();
return new ThriftCounterColumnImpl<C>(columnFamily.getColumnSerializer().fromBytes(
c.getName()), c);
}
else if (cosc.isSetCounter_super_column()) {
// TODO: Super columns
// should be deprecated
CounterSuperColumn sc = cosc.getCounter_super_column();
return new ThriftCounterSuperColumnImpl<C>(columnFamily.getColumnSerializer()
.fromBytes(sc.getName()), sc);
}
else {
throw new RuntimeException("Unknown column type in response");
}
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Column<C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Column<C>>>() {
@Override
public OperationResult<Column<C>> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public OperationResult<ColumnList<C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<ColumnList<C>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROW, columnFamily), pinnedHost, keyspace.getKeyspaceName()) {
@Override
public ColumnList<C> execute(Client client, ConnectionContext context) throws ConnectionException {
if (isPaginating && paginateNoMore) {
return new EmptyColumnList<C>();
}
return super.execute(client, context);
}
@Override
public ColumnList<C> internalExecute(Client client, ConnectionContext context) throws Exception {
List<ColumnOrSuperColumn> columnList = client.get_slice(columnFamily.getKeySerializer()
.toByteBuffer(rowKey), new ColumnParent().setColumn_family(columnFamily
.getName()), predicate, ThriftConverter
.ToThriftConsistencyLevel(consistencyLevel));
// Special handling for pagination
if (isPaginating && predicate.isSetSlice_range()) {
// Did we reach the end of the query.
if (columnList.size() != predicate.getSlice_range().getCount()) {
paginateNoMore = true;
}
// If this is the first page then adjust the
// count so we fetch one extra column
// that will later be dropped
if (firstPage) {
firstPage = false;
if (predicate.getSlice_range().getCount() != Integer.MAX_VALUE)
predicate.getSlice_range().setCount(predicate.getSlice_range().getCount() + 1);
}
else {
if (!columnList.isEmpty())
columnList.remove(0);
}
// Set the start column for the next page to
// the last column of this page.
// We will discard this column later.
if (!columnList.isEmpty()) {
ColumnOrSuperColumn last = Iterables.getLast(columnList);
if (last.isSetColumn()) {
predicate.getSlice_range().setStart(last.getColumn().getName());
} else if (last.isSetCounter_column()) {
predicate.getSlice_range().setStart(last.getCounter_column().getName());
} else if (last.isSetSuper_column()) {
// TODO: Super columns
// should be deprecated
predicate.getSlice_range().setStart(last.getSuper_column().getName());
} else if (last.isSetCounter_super_column()) {
// TODO: Super columns
// should be deprecated
predicate.getSlice_range().setStart(last.getCounter_super_column().getName());
}
}
}
ColumnList<C> result = new ThriftColumnOrSuperColumnListImpl<C>(columnList,
columnFamily.getColumnSerializer());
return result;
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ColumnCountQuery getCount() {
return new ColumnCountQuery() {
@Override
public OperationResult<Integer> execute() throws ConnectionException {
return connectionPool.executeWithFailover(new AbstractKeyspaceOperationImpl<Integer>(
tracerFactory.newTracer(CassandraOperationType.GET_COLUMN_COUNT, columnFamily),
pinnedHost, keyspace.getKeyspaceName()) {
@Override
public Integer internalExecute(Client client, ConnectionContext context) throws Exception {
return client.get_count(columnFamily.getKeySerializer().toByteBuffer(rowKey),
new ColumnParent().setColumn_family(columnFamily.getName()), predicate,
ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Integer>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Integer>>() {
@Override
public OperationResult<Integer> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public ListenableFuture<OperationResult<ColumnList<C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<ColumnList<C>>>() {
@Override
public OperationResult<ColumnList<C>> call() throws Exception {
return execute();
}
});
}
@Override
public RowCopier<K, C> copyTo(final ColumnFamily<K, C> otherColumnFamily, final K otherRowKey) {
return new RowCopier<K, C>() {
private boolean useOriginalTimestamp = true;
@Override
public OperationResult<Void> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Void>(tracerFactory.newTracer(
CassandraOperationType.COPY_TO, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
long currentTime = keyspace.getConfig().getClock().getCurrentTime();
List<ColumnOrSuperColumn> columnList = client.get_slice(columnFamily
.getKeySerializer().toByteBuffer(rowKey), new ColumnParent()
.setColumn_family(columnFamily.getName()), predicate, ThriftConverter
.ToThriftConsistencyLevel(consistencyLevel));
// Create mutation list from columns in
// the response
List<Mutation> mutationList = new ArrayList<Mutation>();
for (ColumnOrSuperColumn sosc : columnList) {
ColumnOrSuperColumn cosc;
if (sosc.isSetColumn()) {
cosc = new ColumnOrSuperColumn().setColumn(sosc.getColumn());
if (!useOriginalTimestamp)
cosc.getColumn().setTimestamp(currentTime);
}
else if (sosc.isSetSuper_column()) {
cosc = new ColumnOrSuperColumn().setSuper_column(sosc.getSuper_column());
if (!useOriginalTimestamp) {
for (org.apache.cassandra.thrift.Column subColumn : sosc.getSuper_column().getColumns()) {
subColumn.setTimestamp(currentTime);
subColumn.setTimestamp(currentTime);
}
}
}
else if (sosc.isSetCounter_column()) {
cosc = new ColumnOrSuperColumn().setCounter_column(sosc.getCounter_column());
}
else if (sosc.isSetCounter_super_column()) {
cosc = new ColumnOrSuperColumn().setCounter_super_column(sosc.getCounter_super_column());
}
else {
continue;
}
mutationList.add(new Mutation().setColumn_or_supercolumn(cosc));
}
// Create mutation map
Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
HashMap<String, List<Mutation>> cfmap = new HashMap<String, List<Mutation>>();
cfmap.put(otherColumnFamily.getName(), mutationList);
mutationMap.put(columnFamily.getKeySerializer().toByteBuffer(otherRowKey),
cfmap);
// Execute the mutation
client.batch_mutate(mutationMap,
ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
return null;
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
@Override
public RowCopier<K, C> withOriginalTimestamp(boolean useOriginalTimestamp) {
this.useOriginalTimestamp = useOriginalTimestamp;
return this;
}
};
}
};
}
@Override
public RowSliceQuery<K, C> getKeyRange(final K startKey, final K endKey, final String startToken,
final String endToken, final int count) {
return new AbstractRowSliceQueryImpl<K, C>(columnFamily.getColumnSerializer()) {
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Rows<K, C>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_RANGE, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Rows<K, C> internalExecute(Client client, ConnectionContext context) throws Exception {
// This is a sorted list
// Same call for standard and super columns via
// the ColumnParent
KeyRange range = new KeyRange();
if (startKey != null)
range.setStart_key(columnFamily.getKeySerializer().toByteBuffer(startKey));
if (endKey != null)
range.setEnd_key(columnFamily.getKeySerializer().toByteBuffer(endKey));
range.setCount(count).setStart_token(startToken).setEnd_token(endToken);
List<org.apache.cassandra.thrift.KeySlice> keySlices = client.get_range_slices(
new ColumnParent().setColumn_family(columnFamily.getName()), predicate, range,
ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
if (keySlices == null || keySlices.isEmpty()) {
return new EmptyRowsImpl<K, C>();
}
else {
return new ThriftRowsSliceImpl<K, C>(keySlices, columnFamily.getKeySerializer(),
columnFamily.getColumnSerializer());
}
}
@Override
public ByteBuffer getRowKey() {
if (startKey != null)
return columnFamily.getKeySerializer().toByteBuffer(startKey);
return null;
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Rows<K, C>>>() {
@Override
public OperationResult<Rows<K, C>> call() throws Exception {
return execute();
}
});
}
@Override
public RowSliceColumnCountQuery<K> getColumnCounts() {
throw new RuntimeException("Not supported yet");
}
};
}
@Override
public RowSliceQuery<K, C> getKeySlice(final Iterable<K> keys) {
return new AbstractRowSliceQueryImpl<K, C>(columnFamily.getColumnSerializer()) {
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Rows<K, C>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_SLICE, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Rows<K, C> internalExecute(Client client, ConnectionContext context) throws Exception {
Map<ByteBuffer, List<ColumnOrSuperColumn>> cfmap = client.multiget_slice(columnFamily
.getKeySerializer().toBytesList(keys), new ColumnParent()
.setColumn_family(columnFamily.getName()), predicate, ThriftConverter
.ToThriftConsistencyLevel(consistencyLevel));
if (cfmap == null || cfmap.isEmpty()) {
return new EmptyRowsImpl<K, C>();
}
else {
return new ThriftRowsListImpl<K, C>(cfmap, columnFamily.getKeySerializer(),
columnFamily.getColumnSerializer());
}
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Rows<K, C>>>() {
@Override
public OperationResult<Rows<K, C>> call() throws Exception {
return execute();
}
});
}
@Override
public RowSliceColumnCountQuery<K> getColumnCounts() {
return new RowSliceColumnCountQuery<K>() {
@Override
public OperationResult<Map<K, Integer>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Map<K, Integer>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_SLICE, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Map<K, Integer> internalExecute(Client client, ConnectionContext context) throws Exception {
Map<ByteBuffer, Integer> cfmap = client.multiget_count(
columnFamily.getKeySerializer().toBytesList(keys),
new ColumnParent().setColumn_family(columnFamily.getName()),
predicate,
ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
if (cfmap == null || cfmap.isEmpty()) {
return Maps.newHashMap();
}
else {
return columnFamily.getKeySerializer().fromBytesMap(cfmap);
}
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Map<K, Integer>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Map<K, Integer>>>() {
@Override
public OperationResult<Map<K, Integer>> call() throws Exception {
return execute();
}
});
}
};
}
};
}
@Override
public RowSliceQuery<K, C> getKeySlice(final K keys[]) {
return getKeySlice(Arrays.asList(keys));
}
@Override
public RowSliceQuery<K, C> getKeySlice(final Collection<K> keys) {
return new AbstractRowSliceQueryImpl<K, C>(columnFamily.getColumnSerializer()) {
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Rows<K, C>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_SLICE, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Rows<K, C> internalExecute(Client client, ConnectionContext context) throws Exception {
Map<ByteBuffer, List<ColumnOrSuperColumn>> cfmap = client.multiget_slice(columnFamily
.getKeySerializer().toBytesList(keys), new ColumnParent()
.setColumn_family(columnFamily.getName()), predicate, ThriftConverter
.ToThriftConsistencyLevel(consistencyLevel));
if (cfmap == null || cfmap.isEmpty()) {
return new EmptyRowsImpl<K, C>();
}
else {
return new ThriftRowsListImpl<K, C>(cfmap, columnFamily.getKeySerializer(),
columnFamily.getColumnSerializer());
}
}
@Override
public ByteBuffer getRowKey() {
// / return
// partitioner.getToken(columnFamily.getKeySerializer().toByteBuffer(keys.iterator().next())).token;
return null;
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Rows<K, C>>>() {
@Override
public OperationResult<Rows<K, C>> call() throws Exception {
return execute();
}
});
}
@Override
public RowSliceColumnCountQuery<K> getColumnCounts() {
return new RowSliceColumnCountQuery<K>() {
@Override
public OperationResult<Map<K, Integer>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Map<K, Integer>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_SLICE, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Map<K, Integer> internalExecute(Client client, ConnectionContext context) throws Exception {
Map<ByteBuffer, Integer> cfmap = client.multiget_count(columnFamily
.getKeySerializer().toBytesList(keys), new ColumnParent()
.setColumn_family(columnFamily.getName()), predicate, ThriftConverter
.ToThriftConsistencyLevel(consistencyLevel));
if (cfmap == null || cfmap.isEmpty()) {
return Maps.newHashMap();
}
else {
return columnFamily.getKeySerializer().fromBytesMap(cfmap);
}
}
@Override
public ByteBuffer getRowKey() {
// / return
// partitioner.getToken(columnFamily.getKeySerializer().toByteBuffer(keys.iterator().next())).token;
return null;
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Map<K, Integer>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Map<K, Integer>>>() {
@Override
public OperationResult<Map<K, Integer>> call() throws Exception {
return execute();
}
});
}
};
}
};
}
@Override
public ColumnFamilyQuery<K, C> setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
@Override
public IndexQuery<K, C> searchWithIndex() {
return new AbstractIndexQueryImpl<K, C>(columnFamily) {
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<Rows<K, C>>(tracerFactory.newTracer(
CassandraOperationType.GET_ROWS_BY_INDEX, columnFamily), pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public Rows<K, C> execute(Client client, ConnectionContext context) throws ConnectionException {
if (isPaginating && paginateNoMore) {
return new EmptyRowsImpl<K, C>();
}
return super.execute(client, context);
}
@Override
public Rows<K, C> internalExecute(Client client, ConnectionContext context) throws Exception {
List<org.apache.cassandra.thrift.KeySlice> cfmap;
cfmap = client.get_indexed_slices(
new ColumnParent().setColumn_family(columnFamily.getName()), indexClause,
predicate, ThriftConverter.ToThriftConsistencyLevel(consistencyLevel));
if (cfmap == null) {
return new EmptyRowsImpl<K, C>();
}
else {
if (isPaginating) {
if (!firstPage && !cfmap.isEmpty() &&
cfmap.get(0).bufferForKey().equals(indexClause.bufferForStart_key())) {
cfmap.remove(0);
}
try {
if (!cfmap.isEmpty()) {
setNextStartKey(ByteBuffer.wrap(Iterables.getLast(cfmap).getKey()));
}
else {
paginateNoMore = true;
}
}
catch (ArithmeticException e) {
paginateNoMore = true;
}
}
return new ThriftRowsSliceImpl<K, C>(cfmap, columnFamily.getKeySerializer(),
columnFamily.getColumnSerializer());
}
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Rows<K, C>>>() {
@Override
public OperationResult<Rows<K, C>> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public CqlQuery<K, C> withCql(final String cql) {
return keyspace.cqlStatementFactory.createCqlQuery(this, cql);
}
@Override
public AllRowsQuery<K, C> getAllRows() {
return new ThriftAllRowsQueryImpl<K, C>(this);
}
@Override
public ColumnFamilyQuery<K, C> pinToHost(Host host) {
this.pinnedHost = host;
return this;
}
@Override
public ColumnFamilyQuery<K, C> withRetryPolicy(RetryPolicy retry) {
this.retry = retry;
return this;
}
@Override
public RowQuery<K, C> getRow(K rowKey) {
return getKey(rowKey);
}
@Override
public RowSliceQuery<K, C> getRowRange(K startKey, K endKey, String startToken, String endToken, int count) {
return getKeyRange(startKey, endKey, startToken, endToken, count);
}
@Override
public RowSliceQuery<K, C> getRowSlice(K... keys) {
return getKeySlice(keys);
}
@Override
public RowSliceQuery<K, C> getRowSlice(Collection<K> keys) {
return getKeySlice(keys);
}
@Override
public RowSliceQuery<K, C> getRowSlice(Iterable<K> keys) {
return getKeySlice(keys);
}
@Override
public ColumnFamilyQuery<K, C> withCaching(boolean condition) {
// do nothing
return this;
}
}
| 7,991 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractKeyspaceOperationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import com.netflix.astyanax.CassandraOperationTracer;
import com.netflix.astyanax.connectionpool.Host;
public abstract class AbstractKeyspaceOperationImpl<R> extends AbstractOperationImpl<R> {
private String keyspaceName;
public AbstractKeyspaceOperationImpl(CassandraOperationTracer tracer, Host pinnedHost, String keyspaceName) {
super(tracer, pinnedHost);
this.keyspaceName = keyspaceName;
}
public AbstractKeyspaceOperationImpl(CassandraOperationTracer tracer, String keyspaceName) {
super(tracer);
this.keyspaceName = keyspaceName;
}
@Override
public String getKeyspace() {
return this.keyspaceName;
}
@Override
public ByteBuffer getRowKey() {
return null;
}
}
| 7,992 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlFactoryResolver.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.netflix.astyanax.AstyanaxConfiguration;
public class ThriftCqlFactoryResolver {
private static final Pattern VERSION_REGEX = Pattern.compile("^([0-9])+\\.([0-9])+(.*)");
public static ThriftCqlFactory createFactory(AstyanaxConfiguration config) {
if (config.getTargetCassandraVersion() != null) {
Matcher m = VERSION_REGEX.matcher(config.getTargetCassandraVersion());
if (m.matches()) {
int major = Integer.parseInt(m.group(1));
int minor = Integer.parseInt(m.group(2));
if (major > 1 || (major == 1 && minor >= 2)) {
return new ThriftCql3Factory();
}
}
}
return new ThriftCql2Factory();
}
}
| 7,993 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractRowSliceQueryImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.RowSliceQuery;
public abstract class AbstractRowSliceQueryImpl<K, C> implements RowSliceQuery<K, C> {
protected SlicePredicate predicate = new SlicePredicate().setSlice_range(ThriftUtils.createAllInclusiveSliceRange());
private Serializer<C> serializer;
public AbstractRowSliceQueryImpl(Serializer<C> serializer) {
this.serializer = serializer;
}
@Override
public RowSliceQuery<K, C> withColumnSlice(C... columns) {
if (columns != null) {
predicate.setColumn_names(serializer.toBytesList(Arrays.asList(columns))).setSlice_rangeIsSet(false);
}
return this;
}
@Override
public RowSliceQuery<K, C> withColumnSlice(Collection<C> columns) {
if (columns != null)
predicate.setColumn_names(serializer.toBytesList(columns)).setSlice_rangeIsSet(false);
return this;
}
@Override
public RowSliceQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
predicate.setSlice_range(ThriftUtils.createSliceRange(serializer, startColumn, endColumn, reversed, count));
return this;
}
@Override
public RowSliceQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count) {
predicate.setSlice_range(new SliceRange(startColumn, endColumn, reversed, count));
return this;
}
@Override
public RowSliceQuery<K, C> withColumnSlice(ColumnSlice<C> slice) {
if (slice.getColumns() != null) {
predicate.setColumn_names(serializer.toBytesList(slice.getColumns())).setSlice_rangeIsSet(false);
}
else {
predicate.setSlice_range(ThriftUtils.createSliceRange(serializer, slice.getStartColumn(),
slice.getEndColumn(), slice.getReversed(), slice.getLimit()));
}
return this;
}
@Override
public RowSliceQuery<K, C> withColumnRange(ByteBufferRange range) {
predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd())
.setCount(range.getLimit()).setReversed(range.isReversed()));
return this;
}
}
| 7,994 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCql2Factory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.query.CqlQuery;
public class ThriftCql2Factory implements ThriftCqlFactory {
@Override
public CqlStatement createCqlStatement(ThriftKeyspaceImpl keyspace) {
return new ThriftCqlStatement(keyspace);
}
@Override
public <K, C> CqlQuery<K, C> createCqlQuery(ThriftColumnFamilyQueryImpl<K, C> cfQuery, String cql) {
return new ThriftCqlQuery<K, C>(cfQuery, cql);
}
}
| 7,995 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.query.CqlQuery;
public interface ThriftCqlFactory {
public CqlStatement createCqlStatement(ThriftKeyspaceImpl keyspace);
public <K, C> CqlQuery<K, C> createCqlQuery(ThriftColumnFamilyQueryImpl<K, C> cfQuery, String cql);
}
| 7,996 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractIndexQueryImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.UUID;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.IndexColumnExpression;
import com.netflix.astyanax.query.IndexOperationExpression;
import com.netflix.astyanax.query.IndexQuery;
import com.netflix.astyanax.query.IndexValueExpression;
import com.netflix.astyanax.query.PreparedIndexExpression;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
public abstract class AbstractIndexQueryImpl<K, C> implements IndexQuery<K, C> {
protected final org.apache.cassandra.thrift.IndexClause indexClause = new org.apache.cassandra.thrift.IndexClause();
protected SlicePredicate predicate = new SlicePredicate().setSlice_range(ThriftUtils.createAllInclusiveSliceRange());
protected boolean isPaginating = false;
protected boolean paginateNoMore = false;
protected boolean firstPage = true;
protected ColumnFamily<K, C> columnFamily;
public AbstractIndexQueryImpl(ColumnFamily<K, C> columnFamily) {
this.columnFamily = columnFamily;
indexClause.setStart_key(ByteBuffer.allocate(0));
}
@Override
public IndexQuery<K, C> withColumnSlice(C... columns) {
if (columns != null) {
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(Arrays.asList(columns)))
.setSlice_rangeIsSet(false);
}
return this;
}
@Override
public IndexQuery<K, C> withColumnSlice(Collection<C> columns) {
if (columns != null)
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(columns)).setSlice_rangeIsSet(
false);
return this;
}
@Override
public IndexQuery<K, C> withColumnSlice(ColumnSlice<C> slice) {
if (slice.getColumns() != null) {
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(slice.getColumns()))
.setSlice_rangeIsSet(false);
}
else {
predicate.setSlice_range(ThriftUtils.createSliceRange(columnFamily.getColumnSerializer(),
slice.getStartColumn(), slice.getEndColumn(), slice.getReversed(), slice.getLimit()));
}
return this;
}
@Override
public IndexQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
predicate.setSlice_range(ThriftUtils.createSliceRange(columnFamily.getColumnSerializer(), startColumn,
endColumn, reversed, count));
return this;
}
@Override
public IndexQuery<K, C> withColumnRange(ByteBufferRange range) {
predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd())
.setCount(range.getLimit()).setReversed(range.isReversed()));
return this;
}
@Override
public IndexQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count) {
predicate.setSlice_range(new SliceRange(startColumn, endColumn, reversed, count));
return this;
}
@Override
public IndexQuery<K, C> setLimit(int count) {
return setRowLimit(count);
}
@Override
public IndexQuery<K, C> setRowLimit(int count) {
indexClause.setCount(count);
return this;
}
@Override
public IndexQuery<K, C> setStartKey(K key) {
indexClause.setStart_key(columnFamily.getKeySerializer().toByteBuffer(key));
return this;
}
protected void setNextStartKey(ByteBuffer byteBuffer) {
indexClause.setStart_key(byteBuffer);
if (firstPage) {
firstPage = false;
if (indexClause.getCount() != Integer.MAX_VALUE)
indexClause.setCount(indexClause.getCount() + 1);
}
}
private IndexQuery<K, C> getThisQuery() {
return this;
}
static interface IndexExpression<K, C> extends IndexColumnExpression<K, C>, IndexOperationExpression<K, C>,
IndexValueExpression<K, C> {
}
public IndexQuery<K, C> addPreparedExpressions(Collection<PreparedIndexExpression<K, C>> expressions) {
for (PreparedIndexExpression<K, C> expression : expressions) {
org.apache.cassandra.thrift.IndexExpression expr = new org.apache.cassandra.thrift.IndexExpression()
.setColumn_name(expression.getColumn().duplicate()).setValue(expression.getValue().duplicate());
switch (expression.getOperator()) {
case EQ:
expr.setOp(IndexOperator.EQ);
break;
case LT:
expr.setOp(IndexOperator.LT);
break;
case GT:
expr.setOp(IndexOperator.GT);
break;
case GTE:
expr.setOp(IndexOperator.GTE);
break;
case LTE:
expr.setOp(IndexOperator.LTE);
break;
default:
throw new RuntimeException("Invalid operator type: " + expression.getOperator().name());
}
indexClause.addToExpressions(expr);
}
return this;
}
@Override
public IndexColumnExpression<K, C> addExpression() {
return new IndexExpression<K, C>() {
private final org.apache.cassandra.thrift.IndexExpression internalExpression = new org.apache.cassandra.thrift.IndexExpression();
@Override
public IndexOperationExpression<K, C> whereColumn(C columnName) {
internalExpression.setColumn_name(columnFamily.getColumnSerializer().toBytes(columnName));
return this;
}
@Override
public IndexValueExpression<K, C> equals() {
internalExpression.setOp(IndexOperator.EQ);
return this;
}
@Override
public IndexValueExpression<K, C> greaterThan() {
internalExpression.setOp(IndexOperator.GT);
return this;
}
@Override
public IndexValueExpression<K, C> lessThan() {
internalExpression.setOp(IndexOperator.LT);
return this;
}
@Override
public IndexValueExpression<K, C> greaterThanEquals() {
internalExpression.setOp(IndexOperator.GTE);
return this;
}
@Override
public IndexValueExpression<K, C> lessThanEquals() {
internalExpression.setOp(IndexOperator.LTE);
return this;
}
@Override
public IndexQuery<K, C> value(String value) {
internalExpression.setValue(StringSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(long value) {
internalExpression.setValue(LongSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(int value) {
internalExpression.setValue(IntegerSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(boolean value) {
internalExpression.setValue(BooleanSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(Date value) {
internalExpression.setValue(DateSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(byte[] value) {
internalExpression.setValue(BytesArraySerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(ByteBuffer value) {
internalExpression.setValue(ByteBufferSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(double value) {
internalExpression.setValue(DoubleSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public IndexQuery<K, C> value(UUID value) {
internalExpression.setValue(UUIDSerializer.get().toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
@Override
public <V> IndexQuery<K, C> value(V value, Serializer<V> valueSerializer) {
internalExpression.setValue(valueSerializer.toBytes(value));
indexClause.addToExpressions(internalExpression);
return getThisQuery();
}
};
}
@Override
public IndexQuery<K, C> setIsPaginating() {
return autoPaginateRows(true);
}
@Override
public IndexQuery<K, C> autoPaginateRows(boolean autoPaginate) {
this.isPaginating = autoPaginate;
return this;
}
}
| 7,997 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractRowQueryImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.RowQuery;
public abstract class AbstractRowQueryImpl<K, C> implements RowQuery<K, C> {
protected final SlicePredicate predicate = new SlicePredicate().setSlice_range(ThriftUtils.createAllInclusiveSliceRange());
protected final Serializer<C> serializer;
protected boolean isPaginating = false;
protected boolean paginateNoMore = false;
public AbstractRowQueryImpl(Serializer<C> serializer) {
this.serializer = serializer;
}
@Override
public RowQuery<K, C> withColumnSlice(C... columns) {
if (columns != null)
predicate.setColumn_names(serializer.toBytesList(Arrays.asList(columns))).setSlice_rangeIsSet(false);
return this;
}
@Override
public RowQuery<K, C> withColumnSlice(Collection<C> columns) {
if (columns != null)
predicate.setColumn_names(serializer.toBytesList(columns)).setSlice_rangeIsSet(false);
return this;
}
@Override
public RowQuery<K, C> withColumnSlice(ColumnSlice<C> slice) {
if (slice.getColumns() != null) {
predicate.setColumn_names(serializer.toBytesList(slice.getColumns())).setSlice_rangeIsSet(false);
}
else {
predicate.setSlice_range(ThriftUtils.createSliceRange(serializer, slice.getStartColumn(),
slice.getEndColumn(), slice.getReversed(), slice.getLimit()));
}
return this;
}
@Override
public RowQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
predicate.setSlice_range(ThriftUtils.createSliceRange(serializer, startColumn, endColumn, reversed, count));
return this;
}
@Override
public RowQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count) {
predicate.setSlice_range(new SliceRange(startColumn, endColumn, reversed, count));
return this;
}
@Override
public RowQuery<K, C> setIsPaginating() {
return autoPaginate(true);
}
@Override
public RowQuery<K, C> autoPaginate(boolean enabled) {
this.isPaginating = enabled;
return this;
}
@Override
public RowQuery<K, C> withColumnRange(ByteBufferRange range) {
predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd())
.setCount(range.getLimit()).setReversed(range.isReversed()));
return this;
}
}
| 7,998 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlStatementResult.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import org.apache.cassandra.thrift.CqlResult;
import com.netflix.astyanax.cql.CqlSchema;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.thrift.model.ThriftCqlRowsImpl;
public class ThriftCqlStatementResult implements CqlStatementResult {
private CqlResult result;
public ThriftCqlStatementResult(CqlResult result) {
this.result = result;
}
@Override
public long asCount() {
throw new RuntimeException("Not supported yet");
}
@Override
public <K, C> Rows<K, C> getRows(ColumnFamily<K, C> columnFamily) {
if (!result.isSetRows())
throw new RuntimeException("CQL reponse doesn't contain rows");
return new ThriftCqlRowsImpl<K, C>(result.getRows(), columnFamily.getKeySerializer(), columnFamily.getColumnSerializer());
}
@Override
public CqlSchema getSchema() {
return new ThriftCqlSchema(result.getSchema());
}
// switch (res.getType()) {
// case ROWS:
// return new ThriftCqlResultImpl<K, C>(new ThriftCqlRowsImpl<K, C>(res.getRows(),
// columnFamily.getKeySerializer(), columnFamily.getColumnSerializer()));
// case INT:
// return new ThriftCqlResultImpl<K, C>(res.getNum());
//
// default:
// return null;
// }
}
| 7,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.